class SpiderMain(object): def __init__(self): self.urls = UrlManager() self.downloader = HtmlDownloader() self.parser = HtmlParser() self.output = HtmlOutputer() def craw(self, root_url, page_amount=5, time_sleep=None): count = 1 # 添加第一个待爬取url self.urls.add_new_url(root_url) # 如果集合中有url, 就取出一个url 请求, 没有链接则跳出。 while self.urls.has_new_url(): try: # 开始爬取 new_url = self.urls.get_new_url() print(f'craw{count}:{new_url}') # 请求url, 返回html html_content = self.downloader.download(new_url) # xpath 解析html,得到需要的数据 new_urls, new_data = self.parser.parse(html_content) # 一个词条页面上关联的a 链表列表加入到url 管理器中待爬取 self.urls.add_new_urls(new_urls) self.output.collect_data(new_url, new_data) count += 1 if count > page_amount: break time.sleep(2) except Exception as e: print(e) print(f'抓取失败:{new_url}') self.output.output_html()
class SpiderMain(): def __init__(self): # URL 管理器 # self.urls = UrlManager.UrlManager() self.urls = UrlManager() # URL 下载器 # self.downloader = HtmlDownloader.HtmlDownloader() self.downloader = HtmlDownloader() # URL 解析器 # self.parser = html_parser.HtmlParser() self.parser = HtmlParser() # self.outputer = html_outputer.HtmlOutputer() self.outputer = HtmlOutputer() def craw(self, root_url): count = 1 originSet = set() originSet.add(root_url) self.urls.add_new_urls(originSet) while self.urls.has_new_rul(): try: new_url = self.urls.get_new_url() print "craw %d : %s" % (count, new_url) html_cont = self.downloader.downloader(new_url) # 输出信息 downStat = "ERROR" if html_cont != None: downStat = "SUCCESS" print "[Page ID : %d downloader %s!]" % (count, downStat) new_urls, new_data = self.parser.parser(new_url, html_cont) # print "\nnew_urls[%s], new_data[%s]" % (new_urls, new_data) self.urls.add_new_urls(new_urls) self.outputer.collect_data(new_data) if count == 15: break count = count + 1 except Exception as err: print "craw failed! ERROR infomation : %s" % err self.outputer.output_html()
class SpiderMain(object): def __init__(self): self.urls = UrlManager() self.downlaoder = HtmlDownlaoder() self.parser = HtmlParser() self.outputer = HtmlOutputer() def craw(self, root_url): count = 1 # 把根url 传入url管理列表 self.urls.add_url(root_url) # 页面爬取循环程序 while self.urls.has_new_url(): try: # 获取一个待爬取的url new_url = self.urls.get_new_url() print('craw %d: %s' % (count, new_url)) # 下载该url爬取context html_cont = self.downlaoder.download(new_url) # 通过解析器,解析该url下载到的内容,获取新的 new_urls 和 新的 data new_urls, new_data = self.parser.parse(new_url, html_cont) # 把获取到 新的url添加到新的url管理器, self.urls.add_new_urls(new_urls) # 把获取的新的data添加到新的数据处理器中 self.outputer.collect_data(new_data) if count == 100: break count += 1 except Exception as e: print('craw failed') self.outputer.output_html()
class SpiderMain(object): def __init__(self): self.urls = UrlManager() self.downloader = HtmlDownloader() self.parser = HtmlParser() self.outputer = HtmlOutputer() def craw(self, root_url): count = 1 self.urls.add_new_url(root_url) while self.urls.has_new_url(): try: new_url = self.urls.get_new_url() # 获取新url html_cont = self.downloader.download(new_url) # 下载url内容 new_urls, new_data = self.parser.parse(new_url, html_cont) # 解析url内容 self.urls.add_new_urls(new_urls) # 将解析到的新url存入url管理器 self.outputer.collect_data(new_data) # 收集解析到的数据 if count == 200: break count = count + 1 except: print("craw failed") self.outputer.output_html()
class SpiderMain(object): """docstring for SpiderMain""" def __init__(self): self.urlManage = UrlManage() self.downloader = HtmlDownloader() self.parser = HtmlParser() self.outputer = HtmlOutputer() def craw(self,url): self.urlManage.add_new_url(url) count = 1 while self.urlManage.has_new_url(): url = self.urlManage.get_new_url() print '%dth page,address:%s' % (count,url) html_content = self.downloader.downloadPage(url) new_urls,new_data = self.parser.parse(html_content,url) self.urlManage.add_new_urls(new_urls) self.outputer.collect_data(new_data) if count == 10: break count = count + 1 self.outputer.output_html()