Python语言技术文档

微信小程序技术文档

php语言技术文档

jsp语言技术文档

asp语言技术文档

C#/.NET语言技术文档

html5/css技术文档

javascript

点击排行

您现在的位置:首页 > 技术文档 > Python网络爬虫

python多线程抓取天涯帖子内容示例

来源:中文源码网    浏览:279 次    日期:2024-05-15 06:53:45
【下载文档:  python多线程抓取天涯帖子内容示例.txt 】


python多线程抓取天涯帖子内容示例
使用re, urllib, threading 多线程抓取天涯帖子内容,设置url为需抓取的天涯帖子的第一页,设置file_name为下载后的文件名
复制代码 代码如下:#coding:utf-8
import urllibimport reimport threadingimport os, time
class Down_Tianya(threading.Thread): """多线程下载""" def __init__(self, url, num, dt): threading.Thread.__init__(self) self.url = url self.num = num self.txt_dict = dt
def run(self): print 'downling from %s' % self.url self.down_text()
def down_text(self): """根据传入的url抓出各页内容,按页数做键存入字典""" html_content =urllib.urlopen(self.url).read() text_pattern = re.compile('时间:(.*?).*?.*?
(\d*)\s*下页') page_result = page_pattern.search(html_page) if page_result: page_num = int(page_result.group(1)) return page_num
def write_text(dict, fn): """把字典内容按键(页数)写入文本,每个键值为每页内容的list列表""" tx_file = open(fn, 'w+') pn = len(dict) for i in range(1, pn+1): tx_list = dict[i] for tx in tx_list: tx = tx.replace('
', '\r\n').replace('
', '\r\n').replace(' ', '') tx_file.write(tx.strip()+'\r\n'*4) tx_file.close()
def main(): url = 'http://bbs.tianya.cn/post-16-996521-1.shtml' file_name ='abc.txt' my_page = page(url) my_dict = {}
print 'page num is : %s' % my_page
threads = [] """根据页数构造urls进行多线程下载""" for num in range(1, my_page+1): myurl = '%s%s.shtml' % (url[:-7], num) downlist = Down_Tianya(myurl, num, my_dict) downlist.start() threads.append(downlist) """检查下载完成后再进行写入""" for t in threads: t.join()
write_text(my_dict, file_name)
print 'All download finished. Save file at directory: %s' % os.getcwd()
if __name__ == '__main__': main()
down_tianya.py
复制代码 代码如下:#coding:utf-8
import urllibimport reimport threadingimport os
class Down_Tianya(threading.Thread): """多线程下载""" def __init__(self, url, num, dt): threading.Thread.__init__(self) self.url = url self.num = num self.txt_dict = dt
def run(self): print 'downling from %s' % self.url self.down_text()
def down_text(self): """根据传入的url抓出各页内容,按页数做键存入字典""" html_content =urllib.urlopen(self.url).read() text_pattern = re.compile('
时间:(.*?).*?.*?
(\d*)\s*下页') page_result = page_pattern.search(html_page) if page_result: page_num = int(page_result.group(1)) return page_num
def write_text(dict, fn): """把字典内容按键(页数)写入文本,每个键值为每页内容的list列表""" tx_file = open(fn, 'w+') pn = len(dict) for i in range(1, pn+1): tx_list = dict[i] for tx in tx_list: tx = tx.replace('
', '\r\n').replace('
', '\r\n').replace(' ', '') tx_file.write(tx.strip()+'\r\n'*4) tx_file.close()
def main(): url = 'http://bbs.tianya.cn/post-16-996521-1.shtml' file_name ='abc.txt' my_page = page(url) my_dict = {}
print 'page num is : %s' % my_page
threads = [] """根据页数构造urls进行多线程下载""" for num in range(1, my_page+1): myurl = '%s%s.shtml' % (url[:-7], num) downlist = Down_Tianya(myurl, num, my_dict) downlist.start() threads.append(downlist) """检查下载完成后再进行写入""" for t in threads: t.join()
write_text(my_dict, file_name)
print 'All download finished. Save file at directory: %s' % os.getcwd()
if __name__ == '__main__': main()

相关内容