def url_manager_proc(self, url_q: Queue, conn_q: Queue, root_url): print('url manager process start...') url_manager = UrlManager() url_manager.add_new_url(root_url) print('url manager process started...') while True: while url_manager.has_new_url(): new_url = url_manager.get_new_url() print('new_url', new_url) # 将新的URL发给工作节点 url_q.put(new_url) # 加一个判断条件, 当爬取2000个链接后就关闭, 并保存进度 if url_manager.old_url_size() > 2000: # 通知爬行节点工作结束 url_q.put('end') print('控制节点发起结束通知') # 关闭管理节点, 同事存储set状态 url_manager.save_process(path.join('dist', 'new_urls.txt'), url_manager.new_urls) url_manager.save_process(path.join('dist', 'old_urls.txt'), url_manager.old_urls) return # 将从result_solve_proc 获取到的URL添加到URL管理器 try: if not conn_q.empty(): urls = conn_q.get() url_manager.add_new_urls(urls) except BaseException as e: time.sleep(0.1)
class SpiderMain(object): def __init__(self): self.urls = UrlManager() self.downloader = HtmlDownloader() self.parser = HtmlParser() self.outputer = HtmlOutputer() def craw(self, url): count = 1 self.urls.add_new_url(url) while self.urls.has_new_url(): try: new_url = self.urls.get_new_url() html_cont = self.downloader.download(new_url) new_urls, html_data = self.parser.parse(new_url, html_cont) self.urls.add_new_urls(new_urls) self.outputer.collect_data(html_data) print "%d craw success : %s" % (count, new_url) if count >= 10: break count = count + 1 except Exception as e: print str(e) print "%d craw failed : %s" % (count, new_url) self.outputer.output()
class SpiderMain(object): def __init__(self): self.urls = UrlManager() self.downloader = HtmlDownloader() self.parser = HtmlParser() self.output = HtmlOutputer() def craw(self, root_url, page_amount=5, time_sleep=None): count = 1 # 添加第一个待爬取url self.urls.add_new_url(root_url) # 如果集合中有url, 就取出一个url 请求, 没有链接则跳出。 while self.urls.has_new_url(): try: # 开始爬取 new_url = self.urls.get_new_url() print(f'craw{count}:{new_url}') # 请求url, 返回html html_content = self.downloader.download(new_url) # xpath 解析html,得到需要的数据 new_urls, new_data = self.parser.parse(html_content) # 一个词条页面上关联的a 链表列表加入到url 管理器中待爬取 self.urls.add_new_urls(new_urls) self.output.collect_data(new_url, new_data) count += 1 if count > page_amount: break time.sleep(2) except Exception as e: print(e) print(f'抓取失败:{new_url}') self.output.output_html()
class Scheduler(object): def __init__(self): self.url_manager = UrlManager() self.downloader = Downloader() self.parser = Parser() self.data_output = DadaOutput() def crawl(self, start_url, max_page): self.url_manager.add_new_url(start_url) while self.url_manager.has_url( ) and self.url_manager.old_url_size() < max_page: page_url = self.url_manager.get_new_url() page_html = self.downloader.down(page_url) new_urls, new_data = self.parser.parse(start_url, page_html) self.url_manager.add_new_urls(new_urls) self.data_output.store_data(new_data) self.data_output.output_html() print('第%s条数据写入' % (self.url_manager.old_url_size()))
class SpiderMain(): def __init__(self): # URL 管理器 # self.urls = UrlManager.UrlManager() self.urls = UrlManager() # URL 下载器 # self.downloader = HtmlDownloader.HtmlDownloader() self.downloader = HtmlDownloader() # URL 解析器 # self.parser = html_parser.HtmlParser() self.parser = HtmlParser() # self.outputer = html_outputer.HtmlOutputer() self.outputer = HtmlOutputer() def craw(self, root_url): count = 1 originSet = set() originSet.add(root_url) self.urls.add_new_urls(originSet) while self.urls.has_new_rul(): try: new_url = self.urls.get_new_url() print "craw %d : %s" % (count, new_url) html_cont = self.downloader.downloader(new_url) # 输出信息 downStat = "ERROR" if html_cont != None: downStat = "SUCCESS" print "[Page ID : %d downloader %s!]" % (count, downStat) new_urls, new_data = self.parser.parser(new_url, html_cont) # print "\nnew_urls[%s], new_data[%s]" % (new_urls, new_data) self.urls.add_new_urls(new_urls) self.outputer.collect_data(new_data) if count == 15: break count = count + 1 except Exception as err: print "craw failed! ERROR infomation : %s" % err self.outputer.output_html()
class Spider(): def __init__(self): self.manager = UrlManager() self.downloader = HTMLDownloader() self.parser = HTMLParser() self.output = DataOutput() def crawl(self, root_url): self.manager.add_new_url(root_url) while (self.manager.has_new_url() and self.manager.old_urls_size() < 50): try: new_url = self.manager.get_new_url() html = self.downloader.download(new_url) new_urls, data = self.parser.parser(new_url, html) self.manager.add_new_urls(new_urls) self.output.store_data(data) print("已经抓取%s个链接" % self.manager.old_urls_size()) except Exception as e: print(e) self.output.output_html()
class SpiderMain: def __init__(self): self.url_manager = UrlManager() self.html_downloader = HtmlDownloader() self.html_parser = HtmlParser() self.data_storage = DataStorage() def start(self): """ 爬虫的主启动方法 :return: """ self.url_manager.add_new_url( "http://127.0.0.1:8848/xiaomi-master/index.html") # 从url管理器获取url url = self.url_manager.get_new_url() # 将获取到的url使用下载器进行下载 html = self.html_downloader.download(url) # 将html进行解析 res = self.html_parser.parser(html) # 数据存储 self.data_storage.storage(res)
class SpiderMain(object): def __init__(self): self.urls = UrlManager() self.downlaoder = HtmlDownlaoder() self.parser = HtmlParser() self.outputer = HtmlOutputer() def craw(self, root_url): count = 1 # 把根url 传入url管理列表 self.urls.add_url(root_url) # 页面爬取循环程序 while self.urls.has_new_url(): try: # 获取一个待爬取的url new_url = self.urls.get_new_url() print('craw %d: %s' % (count, new_url)) # 下载该url爬取context html_cont = self.downlaoder.download(new_url) # 通过解析器,解析该url下载到的内容,获取新的 new_urls 和 新的 data new_urls, new_data = self.parser.parse(new_url, html_cont) # 把获取到 新的url添加到新的url管理器, self.urls.add_new_urls(new_urls) # 把获取的新的data添加到新的数据处理器中 self.outputer.collect_data(new_data) if count == 100: break count += 1 except Exception as e: print('craw failed') self.outputer.output_html()
def url_manager_proc(self, url_q, conn_q, root_url): url_manager = UrlManager() url_manager.add_new_url(root_url) print(url_q) while True: while (url_manager.has_new_url()): new_url = url_manager.get_new_url() url_q.put(new_url) print('old_url=%s' % url_manager.old_url_size()) if (url_manager.old_url_size() > 2000): url_q.put('end') print('控制节点发起结束通知!') url_manager.save_progress('new_urls.txt', url_manager.new_urls) url_manager.save_progress('old_urls.txt', url_manager.old_urls) return try: if not conn_q.empty(): urls = conn_q.get() url_manager.add_new_urls(urls) except BaseException as e: time.sleep(0.1)
class SpiderMain(object): def __init__(self): self.urls = UrlManager() self.downloader = HtmlDownloader() self.parser = HtmlParser() self.outputer = HtmlOutputer() def craw(self, root_url): count = 1 self.urls.add_new_url(root_url) while self.urls.has_new_url(): try: new_url = self.urls.get_new_url() # 获取新url html_cont = self.downloader.download(new_url) # 下载url内容 new_urls, new_data = self.parser.parse(new_url, html_cont) # 解析url内容 self.urls.add_new_urls(new_urls) # 将解析到的新url存入url管理器 self.outputer.collect_data(new_data) # 收集解析到的数据 if count == 200: break count = count + 1 except: print("craw failed") self.outputer.output_html()
class SpiderMain(): """爬虫程序主模块""" def __init__(self): """构造函数,初始化属性""" self.urls = UrlManager() self.log = MyLog("spider_main", "logs") self.downloader = HtmlDownloader() self.parser = HtmlParser() self.outputer = HtmlOutputer() #self.util=utill.DBConn() def craw(self, root_url): """爬虫入口函数""" areas = { "gulou": 100, "jianye": 72, "qinhuai": 100, "xuanwu": 67, "yuhuatai": 32, "qixia": 62, "baijiahu": 33, "chalukou1": 26, "jiangningqita11": 3, "dongshanzhen": 29, "jiangningdaxuecheng": 15, "jiulonghu": 12, "jiangjundadao11": 22, "kexueyuan": 9, "qilinzhen": 42, "tiexinqiao": 9, "pukou": 100, "liuhe": 1, } #areas = {"gulou":1} #1、抓取所有二手房详情界面链接,并将所有连接放入URL管理模块 for area, pg_sum in areas.items(): for num in range(1, pg_sum + 1): #1.1 拼接页面地址: https://nj.lianjia.com/ershoufang/gulou/pg2/ pg_url = root_url + area + "/pg" + str(num) + "/" self.log.logger.info("1.1 拼接页面地址:" + pg_url) print("1.1 拼接页面地址:" + pg_url) #1.2 启动下载器,下载页面. try: html_cont = self.downloader.download(pg_url) except Exception as e: self.log.logger.error("1.2 下载页面出现异常:" + repr(e)) time.sleep(60 * 30) else: #1.3 解析PG页面,获得二手房详情页面的链接,并将所有链接放入URL管理模块 try: ershoufang_urls = self.parser.get_erhoufang_urls( html_cont) except Exception as e: self.log.logger.error("1.3 页面解析出现异常:" + repr(e)) else: self.urls.add_new_urls(ershoufang_urls) #暂停0~3秒的整数秒,时间区间:[0,3] time.sleep(random.randint(0, 3)) time.sleep(60 * 20) #2、解析二手房具体细心页面 id = 1 stop = 1 while self.urls.has_new_url(): #2.1 获取url try: detail_url = self.urls.get_new_url() self.log.logger.info("2.1 二手房页面地址:" + detail_url) print("2.1 二手房页面地址:" + detail_url) except Exception as e: print("2.1 拼接地址出现异常") self.log.logger.error("2.1 拼接地址出现异常:" + detail_url) #2.2 下载页面 try: detail_html = self.downloader.download(detail_url) except Exception as e: self.log.logger.error("2.2 下载页面出现异常:" + repr(e)) self.urls.add_new_url(detail_url) time.sleep(60 * 30) else: #2.3 解析页面 try: ershoufang_data = self.parser.get_ershoufang_data( detail_html, id) except Exception as e: self.log.logger.error("2.3 解析页面出现异常:" + repr(e)) else: #2.4 输出数据 try: self.outputer.collect_data(ershoufang_data) except Exception as e: self.log.logger.error("2.4 输出数据出现异常:" + repr(e)) else: print(id) id = id + 1 stop = stop + 1 #暂停0~3秒的整数秒,时间区间:[0,3] time.sleep(random.randint(0, 3)) if stop == 2500: stop = 1 time.sleep(60 * 20)
class spider_main(): def __init__(self): self.urls = UrlManager() self.parser = HtmlParser() self.downloader = UrlDownloader() self.log = MyLog("spider", "logs") self.output = HtmlOutPut() # 主模块中开始爬虫 def Crawling(self, root_url): # 用字典存放地区名和网页数 areas = { "gulou": 100, "jianye": 100, "qinhuai": 100, "xuanwu": 100, "yuhuatai": 100, "qixia": 100, "baijiahu": 64, "jiangningqita11": 5, "chalukou1": 63, "dongshanzhen": 42, "jiangningdaxuecheng": 28, "jiulonghu": 28, "jiangjundadao11": 50, "kexueyuan": 16, "pukou": 100, "liuhe": 13, "lishui": 9, "jiangning": 100, "qilinzhen": 83, "tangshanzhen": 2, "fenghuangxijie1": 82, "xianlin2": 33, "yaohuamen": 4, "maigaoqiao1": 33, "maqun1": 31, "qixiaqita1": 5, "xiaozhuang": 9, "yanziji": 2, "yueyuan": 15, "wanshou1": 5, "hongshan1": 16, "caochangmendajie": 27, "dinghuaimendajie": 37, "fujianlu": 9, "hanzhongmendajie": 19, "huxijie": 15, "jiangdong2": 8, "nanhu4": 38, "nanyuan2": 38, "shuiximen1": 13, "wandaguangchang1": 25, "xiaoxing": 13, "yuhuaxincun": 15, "lukou": 14, "dingshanjiedao": 8, "gaoxinqu2": 12, "jiangpujiedao": 29, "pukouqita11": 8, "qiaobei": 100, "taishanjiedao": 12 } # 通过拼接形成所有的url地址,将所有的url连接保存 for area, num in areas.items(): for n in range(1, num + 1): # 拼接url: https://nj.lianjia.com/ershoufang/ splice_url = root_url + area + "/pg" + str(n) + "/" # 将拼接url写入日志 self.log.logger.info("url地址拼接" + splice_url) # 控制台打印 print("url地址拼接" + splice_url) # 拼接完成后开始进行网页下载 try: html_down = self.downloader.download(splice_url) except Exception as e: # 将错误信息写入日志 self.log.logger.error("html下载出现错误" + repr(e)) # 挂起进程 time.sleep(60) else: # 如果下载页面不出现错误,进行网页解析 try: secondhome_urls = self.parser.get_secondhandhome_urls( html_down) except Exception as e: # 将错误信息写入日志 self.log.logger.error("html页面解析错误" + repr(e)) else: # 页面解析正常 self.urls.add_new_urls(secondhome_urls) # time.sleep(random.randint(0,3)) time.sleep(60) # 具体解析html 获取需要的数据集 id = 1 # 起始 stop = 1 while self.urls.isEmpty_new_urls(): # 取出url try: temp_url = self.urls.get_new_url() # 控制台打印 print("html页面地址" + temp_url) # 日志写入 self.log.logger.info("html页面地址" + temp_url) except Exception as e: # 错误信息写入日志 # 控制台打印 print("html页面地址获取失败" + temp_url) self.log.logger.error("获取url错误" + repr(e)) # url获取正常进行下载 try: temp_data = self.downloader.download(temp_url) except Exception as e: # 控制台打印 print("页面下载失败" + temp_url) # 错误写入日志 self.log.logger.error("页面下载失败" + repr(e)) self.urls.add_new_url(temp_url) time.sleep(10) else: # 正常下载后 进行页面解析 try: temp_parser = self.parser.get_secondhandhome_data( temp_data, id) except Exception as e: self.log.logger.error("html页面解析错误" + repr(e)) print("html页面解析错误" + repr(e)) else: # 页面解析正常 进行写出 try: self.output.write_data(temp_parser) except Exception as e: self.log.logger.error("数据集写出错误" + repr(e)) print("数据集写出错误" + repr(e)) else: print(id) id = id + 1 stop = stop + 1 time.sleep(0.2) if stop == 2500: stop = 1 time.sleep(60)