def url_manager_proc(self, url_q: Queue, conn_q: Queue, root_url): print('url manager process start...') url_manager = UrlManager() url_manager.add_new_url(root_url) print('url manager process started...') while True: while url_manager.has_new_url(): new_url = url_manager.get_new_url() print('new_url', new_url) # 将新的URL发给工作节点 url_q.put(new_url) # 加一个判断条件, 当爬取2000个链接后就关闭, 并保存进度 if url_manager.old_url_size() > 2000: # 通知爬行节点工作结束 url_q.put('end') print('控制节点发起结束通知') # 关闭管理节点, 同事存储set状态 url_manager.save_process(path.join('dist', 'new_urls.txt'), url_manager.new_urls) url_manager.save_process(path.join('dist', 'old_urls.txt'), url_manager.old_urls) return # 将从result_solve_proc 获取到的URL添加到URL管理器 try: if not conn_q.empty(): urls = conn_q.get() url_manager.add_new_urls(urls) except BaseException as e: time.sleep(0.1)
class SpiderMain(object): def __init__(self): self.urls = UrlManager() self.downloader = HtmlDownloader() self.parser = HtmlParser() self.outputer = HtmlOutputer() def craw(self, url): count = 1 self.urls.add_new_url(url) while self.urls.has_new_url(): try: new_url = self.urls.get_new_url() html_cont = self.downloader.download(new_url) new_urls, html_data = self.parser.parse(new_url, html_cont) self.urls.add_new_urls(new_urls) self.outputer.collect_data(html_data) print "%d craw success : %s" % (count, new_url) if count >= 10: break count = count + 1 except Exception as e: print str(e) print "%d craw failed : %s" % (count, new_url) self.outputer.output()
class SpiderMain(object): def __init__(self): self.urls = UrlManager() self.downloader = HtmlDownloader() self.parser = HtmlParser() self.output = HtmlOutputer() def craw(self, root_url, page_amount=5, time_sleep=None): count = 1 # 添加第一个待爬取url self.urls.add_new_url(root_url) # 如果集合中有url, 就取出一个url 请求, 没有链接则跳出。 while self.urls.has_new_url(): try: # 开始爬取 new_url = self.urls.get_new_url() print(f'craw{count}:{new_url}') # 请求url, 返回html html_content = self.downloader.download(new_url) # xpath 解析html,得到需要的数据 new_urls, new_data = self.parser.parse(html_content) # 一个词条页面上关联的a 链表列表加入到url 管理器中待爬取 self.urls.add_new_urls(new_urls) self.output.collect_data(new_url, new_data) count += 1 if count > page_amount: break time.sleep(2) except Exception as e: print(e) print(f'抓取失败:{new_url}') self.output.output_html()
class Scheduler(object): def __init__(self): self.url_manager = UrlManager() self.downloader = Downloader() self.parser = Parser() self.data_output = DadaOutput() def crawl(self, start_url, max_page): self.url_manager.add_new_url(start_url) while self.url_manager.has_url( ) and self.url_manager.old_url_size() < max_page: page_url = self.url_manager.get_new_url() page_html = self.downloader.down(page_url) new_urls, new_data = self.parser.parse(start_url, page_html) self.url_manager.add_new_urls(new_urls) self.data_output.store_data(new_data) self.data_output.output_html() print('第%s条数据写入' % (self.url_manager.old_url_size()))
class Spider(): def __init__(self): self.manager = UrlManager() self.downloader = HTMLDownloader() self.parser = HTMLParser() self.output = DataOutput() def crawl(self, root_url): self.manager.add_new_url(root_url) while (self.manager.has_new_url() and self.manager.old_urls_size() < 50): try: new_url = self.manager.get_new_url() html = self.downloader.download(new_url) new_urls, data = self.parser.parser(new_url, html) self.manager.add_new_urls(new_urls) self.output.store_data(data) print("已经抓取%s个链接" % self.manager.old_urls_size()) except Exception as e: print(e) self.output.output_html()
class SpiderMain: def __init__(self): self.url_manager = UrlManager() self.html_downloader = HtmlDownloader() self.html_parser = HtmlParser() self.data_storage = DataStorage() def start(self): """ 爬虫的主启动方法 :return: """ self.url_manager.add_new_url( "http://127.0.0.1:8848/xiaomi-master/index.html") # 从url管理器获取url url = self.url_manager.get_new_url() # 将获取到的url使用下载器进行下载 html = self.html_downloader.download(url) # 将html进行解析 res = self.html_parser.parser(html) # 数据存储 self.data_storage.storage(res)
def url_manager_proc(self, url_q, conn_q, root_url): url_manager = UrlManager() url_manager.add_new_url(root_url) print(url_q) while True: while (url_manager.has_new_url()): new_url = url_manager.get_new_url() url_q.put(new_url) print('old_url=%s' % url_manager.old_url_size()) if (url_manager.old_url_size() > 2000): url_q.put('end') print('控制节点发起结束通知!') url_manager.save_progress('new_urls.txt', url_manager.new_urls) url_manager.save_progress('old_urls.txt', url_manager.old_urls) return try: if not conn_q.empty(): urls = conn_q.get() url_manager.add_new_urls(urls) except BaseException as e: time.sleep(0.1)
class SpiderMain(object): def __init__(self): self.urls = UrlManager() self.downloader = HtmlDownloader() self.parser = HtmlParser() self.outputer = HtmlOutputer() def craw(self, root_url): count = 1 self.urls.add_new_url(root_url) while self.urls.has_new_url(): try: new_url = self.urls.get_new_url() # 获取新url html_cont = self.downloader.download(new_url) # 下载url内容 new_urls, new_data = self.parser.parse(new_url, html_cont) # 解析url内容 self.urls.add_new_urls(new_urls) # 将解析到的新url存入url管理器 self.outputer.collect_data(new_data) # 收集解析到的数据 if count == 200: break count = count + 1 except: print("craw failed") self.outputer.output_html()
class SpiderMain(): """爬虫程序主模块""" def __init__(self): """构造函数,初始化属性""" self.urls = UrlManager() self.log = MyLog("spider_main", "logs") self.downloader = HtmlDownloader() self.parser = HtmlParser() self.outputer = HtmlOutputer() #self.util=utill.DBConn() def craw(self, root_url): """爬虫入口函数""" areas = { "gulou": 100, "jianye": 72, "qinhuai": 100, "xuanwu": 67, "yuhuatai": 32, "qixia": 62, "baijiahu": 33, "chalukou1": 26, "jiangningqita11": 3, "dongshanzhen": 29, "jiangningdaxuecheng": 15, "jiulonghu": 12, "jiangjundadao11": 22, "kexueyuan": 9, "qilinzhen": 42, "tiexinqiao": 9, "pukou": 100, "liuhe": 1, } #areas = {"gulou":1} #1、抓取所有二手房详情界面链接,并将所有连接放入URL管理模块 for area, pg_sum in areas.items(): for num in range(1, pg_sum + 1): #1.1 拼接页面地址: https://nj.lianjia.com/ershoufang/gulou/pg2/ pg_url = root_url + area + "/pg" + str(num) + "/" self.log.logger.info("1.1 拼接页面地址:" + pg_url) print("1.1 拼接页面地址:" + pg_url) #1.2 启动下载器,下载页面. try: html_cont = self.downloader.download(pg_url) except Exception as e: self.log.logger.error("1.2 下载页面出现异常:" + repr(e)) time.sleep(60 * 30) else: #1.3 解析PG页面,获得二手房详情页面的链接,并将所有链接放入URL管理模块 try: ershoufang_urls = self.parser.get_erhoufang_urls( html_cont) except Exception as e: self.log.logger.error("1.3 页面解析出现异常:" + repr(e)) else: self.urls.add_new_urls(ershoufang_urls) #暂停0~3秒的整数秒,时间区间:[0,3] time.sleep(random.randint(0, 3)) time.sleep(60 * 20) #2、解析二手房具体细心页面 id = 1 stop = 1 while self.urls.has_new_url(): #2.1 获取url try: detail_url = self.urls.get_new_url() self.log.logger.info("2.1 二手房页面地址:" + detail_url) print("2.1 二手房页面地址:" + detail_url) except Exception as e: print("2.1 拼接地址出现异常") self.log.logger.error("2.1 拼接地址出现异常:" + detail_url) #2.2 下载页面 try: detail_html = self.downloader.download(detail_url) except Exception as e: self.log.logger.error("2.2 下载页面出现异常:" + repr(e)) self.urls.add_new_url(detail_url) time.sleep(60 * 30) else: #2.3 解析页面 try: ershoufang_data = self.parser.get_ershoufang_data( detail_html, id) except Exception as e: self.log.logger.error("2.3 解析页面出现异常:" + repr(e)) else: #2.4 输出数据 try: self.outputer.collect_data(ershoufang_data) except Exception as e: self.log.logger.error("2.4 输出数据出现异常:" + repr(e)) else: print(id) id = id + 1 stop = stop + 1 #暂停0~3秒的整数秒,时间区间:[0,3] time.sleep(random.randint(0, 3)) if stop == 2500: stop = 1 time.sleep(60 * 20)
class spider_main(): def __init__(self): self.urls = UrlManager() self.parser = HtmlParser() self.downloader = UrlDownloader() self.log = MyLog("spider", "logs") self.output = HtmlOutPut() # 主模块中开始爬虫 def Crawling(self, root_url): # 用字典存放地区名和网页数 areas = { "gulou": 100, "jianye": 100, "qinhuai": 100, "xuanwu": 100, "yuhuatai": 100, "qixia": 100, "baijiahu": 64, "jiangningqita11": 5, "chalukou1": 63, "dongshanzhen": 42, "jiangningdaxuecheng": 28, "jiulonghu": 28, "jiangjundadao11": 50, "kexueyuan": 16, "pukou": 100, "liuhe": 13, "lishui": 9, "jiangning": 100, "qilinzhen": 83, "tangshanzhen": 2, "fenghuangxijie1": 82, "xianlin2": 33, "yaohuamen": 4, "maigaoqiao1": 33, "maqun1": 31, "qixiaqita1": 5, "xiaozhuang": 9, "yanziji": 2, "yueyuan": 15, "wanshou1": 5, "hongshan1": 16, "caochangmendajie": 27, "dinghuaimendajie": 37, "fujianlu": 9, "hanzhongmendajie": 19, "huxijie": 15, "jiangdong2": 8, "nanhu4": 38, "nanyuan2": 38, "shuiximen1": 13, "wandaguangchang1": 25, "xiaoxing": 13, "yuhuaxincun": 15, "lukou": 14, "dingshanjiedao": 8, "gaoxinqu2": 12, "jiangpujiedao": 29, "pukouqita11": 8, "qiaobei": 100, "taishanjiedao": 12 } # 通过拼接形成所有的url地址,将所有的url连接保存 for area, num in areas.items(): for n in range(1, num + 1): # 拼接url: https://nj.lianjia.com/ershoufang/ splice_url = root_url + area + "/pg" + str(n) + "/" # 将拼接url写入日志 self.log.logger.info("url地址拼接" + splice_url) # 控制台打印 print("url地址拼接" + splice_url) # 拼接完成后开始进行网页下载 try: html_down = self.downloader.download(splice_url) except Exception as e: # 将错误信息写入日志 self.log.logger.error("html下载出现错误" + repr(e)) # 挂起进程 time.sleep(60) else: # 如果下载页面不出现错误,进行网页解析 try: secondhome_urls = self.parser.get_secondhandhome_urls( html_down) except Exception as e: # 将错误信息写入日志 self.log.logger.error("html页面解析错误" + repr(e)) else: # 页面解析正常 self.urls.add_new_urls(secondhome_urls) # time.sleep(random.randint(0,3)) time.sleep(60) # 具体解析html 获取需要的数据集 id = 1 # 起始 stop = 1 while self.urls.isEmpty_new_urls(): # 取出url try: temp_url = self.urls.get_new_url() # 控制台打印 print("html页面地址" + temp_url) # 日志写入 self.log.logger.info("html页面地址" + temp_url) except Exception as e: # 错误信息写入日志 # 控制台打印 print("html页面地址获取失败" + temp_url) self.log.logger.error("获取url错误" + repr(e)) # url获取正常进行下载 try: temp_data = self.downloader.download(temp_url) except Exception as e: # 控制台打印 print("页面下载失败" + temp_url) # 错误写入日志 self.log.logger.error("页面下载失败" + repr(e)) self.urls.add_new_url(temp_url) time.sleep(10) else: # 正常下载后 进行页面解析 try: temp_parser = self.parser.get_secondhandhome_data( temp_data, id) except Exception as e: self.log.logger.error("html页面解析错误" + repr(e)) print("html页面解析错误" + repr(e)) else: # 页面解析正常 进行写出 try: self.output.write_data(temp_parser) except Exception as e: self.log.logger.error("数据集写出错误" + repr(e)) print("数据集写出错误" + repr(e)) else: print(id) id = id + 1 stop = stop + 1 time.sleep(0.2) if stop == 2500: stop = 1 time.sleep(60)
class NewsCrawler: def __init__(self): self.seed = ['', 'http://news.163.com/' ] # 网易新闻首页 self.downloader = multiThreadDownloader.downloader() self.analyze = HtmlAnalyze() self.craw_url_man = UrlManager() self.page_url_man = UrlManager() self.conn = MySQLdb.connect( host='localhost', user='******', passwd='toor', db='newsGather', charset='utf8') self.cur = self.conn.cursor() # 将数据库中已下载的url加入url管理器的old_urls中 self.cur.execute("select url from news_info;") results = self.cur.fetchall() exist_urls = list() if results == (): pass else: for i in results: exist_urls.append(i[0]) self.page_url_man.add_old_urls(exist_urls) def get_news(self, website): # 处理url管理器中的新的新闻url news = list() dic = dict() count = 0 new_urls = self.page_url_man.get_new_urls(len(self.page_url_man.new_urls)) print "获取新闻网页:" pages = self.downloader.download(new_urls, 6) print "分析新闻网页并存储新闻...." for page in pages: dic = self.analyze.Content(website, page['content']) if dic: dic['url'] = page['url'] news.append(dic) try: print 'save ',dic['url'] sql_raw = "INSERT IGNORE INTO news_info (url, post_time, title, keywords, content, source, origin) VALUES ('%s', '%s', '%s', '%s', '%s', '%s', \"%s\")" % (dic['url'], dic['post_time'], dic['title'], dic['keywords'], raw(dic['content']), dic['source'], dic['origin']) spider.cur.execute(sql_raw) spider.conn.commit() count += 1 except: print "save error!" print '抓取新闻数:%d' % count return news def craw(self, news_num, website, expand_patt, news_patt): # 新闻抓取 # print "hello" self.craw_url_man.add_new_url(self.seed[website]) news = list() dic = dict() count = 0 i = 0 while self.craw_url_man.has_new_url: print "第%d次扩展:" % i #print "获取待扩展页面:" craw_num = len(self.craw_url_man.new_urls) if craw_num < 60: new_urls = self.craw_url_man.get_new_urls(craw_num) else: new_urls = self.craw_url_man.get_new_urls(60) pages = self.downloader.download(new_urls, 6) print "分析待扩展页面....." for page in pages: craw_new_urls = self.analyze.getUrl(page['content'], expand_patt) self.craw_url_man.add_new_urls(craw_new_urls) page_new_urls = self.analyze.getUrl(page['content'], news_patt) #count = count + len(page_new_urls) self.page_url_man.add_new_urls(page_new_urls) count = len(self.page_url_man.new_urls) if count > news_num: news += self.get_news(website) break else: i = i + 1 news += self.get_news(website) continue break return news
class SpiderMain(): def __init__(self): self.urlDownLoader = HtmlDownloader() self.htmlParser = HtmlParser() self.urlManager = UrlManager() self.jsondataParser = JsonData_Parser() self.htmlOutPuter = HtmlOutPuter() def _get_from_discover_toplist(self, url): urls = self.htmlParser.parse( htmlContent=self.urlDownLoader.download(url), type='discover_toplist') self.urlManager.add_new_urls(urls) def _get_from_discover_artist(self, url): urls = self.htmlParser.parse( htmlContent=self.urlDownLoader.download(url), type='discover_artist') self.urlManager.add_new_urls(urls) def _get_from_artist(self, url): results = self.htmlParser.parse( htmlContent=self.urlDownLoader.download(url), type='artist') for name, urls in results.items(): print(name) self.urlManager.add_new_urls(urls) def _get_from_song(self, url): tmp = {} name = self.htmlParser.parse( htmlContent=self.urlDownLoader.download(url), type='song') print("正收集:" + name) comments = self.jsondataParser.parse( self.urlDownLoader.downloadJsonData(url)) tmp[name] = comments self.htmlOutPuter.collect_datas(tmp) def _parse_url(self, url): res = '' SONG = 'song' DISCOVER = 'discover' ARTIST = 'artist' TOPLIST = 'toplist' if (url.find(DISCOVER) != -1): res += DISCOVER if (url.find(ARTIST) != -1): if (res != ''): res += '_' + ARTIST else: res += ARTIST if (url.find(TOPLIST) != -1): if (res != ''): res += '_' + TOPLIST else: res += TOPLIST if (url.find(SONG) != -1): res += SONG return res def craw(self, rootUrl, direction=""): if (rootUrl.find('#') != -1): pos = rootUrl.find('#') rootUrl = rootUrl[:pos] + rootUrl[pos + 2:] self.urlManager.add_new_url(rootUrl) while self.urlManager.has_new_url(): url = self.urlManager.get_url() methodName = '_get_from_' + self._parse_url(url) method = getattr(self, methodName) if (method != None): method(url) self.htmlOutPuter.output_html(direction=direction)