예제 #1
0
 def url_manage_proc(self, url_q, conn_q, root_url, page_num):
     url_manager = UrlManager()
     url_manager.add_new_url(root_url)
     print('url_mannager is working...')
     while True:
         while url_manager.has_new_url():
             # 从URL管理器获取新的URL
             new_url = url_manager.get_new_url()
             # 将新的URL发到工作节点
             url_q.put(new_url)
             # 加上判断, 爬满2000个链接终止爬虫并保存进度
             if (url_manager.old_urls_size() > page_num):
                 # 通知爬虫节点结束工作
                 url_q.put('end')
                 print('控制节点发起结束通知!')
                 # 关闭节点同事存储状态
                 url_manager.save_process('new_urls.txt',
                                          url_manager.new_urls)
                 url_manager.save_process('old_urls.txt',
                                          url_manager.old_urls)
                 return
             # 从result_solve_proc获取的URL添加到URL管理器
             print('url control working..., solve result')
         try:
             if not conn_q.empty():
                 urls = conn_q.get()
                 url_manager.add_new_urls(urls)
         except Exception as e:
             time.sleep(1)  # 延时休息
         print('has crawl page num : ', url_manager.old_urls_size())
         time.sleep(5)
예제 #2
0
    def url_manager_proc(self, url_q, conn_q, root_url):
        url_manager = UrlManager()
        # url_manager.add_new_url(root_url)
        while True:
            while (url_manager.has_new_url()):

                # 从URL管理器获取新的url
                new_url = url_manager.get_new_url()
                print(new_url)
                # 将新的URL发给工作节点
                url_q.put(new_url)
                print('old_url=', url_manager.old_url_size())

                # 加一个判断条件,当爬去2000个链接后就关闭,并保存进度
                if (url_manager.old_url_size() > 2000):
                    # 通知爬行节点工作结束
                    url_q.put('end')
                    print('控制节点发起结束通知!')

                    # 关闭管理节点,同时存储set状态
                    url_manager.save_progress('new_urls.txt',
                                              url_manager.new_urls)
                    url_manager.save_progress('old_urls.txt',
                                              url_manager.old_urls)
                    return
            # 将从result_solve_proc获取到的urls添加到URL管理器之间
            try:
                if not conn_q.empty():
                    urls = conn_q.get()
                    url_manager.add_new_urls(urls)
            except BaseException as e:
                time.sleep(0.1)  # 延时休息
예제 #3
0
파일: main.py 프로젝트: gloomyline/ML
def crawl(init_url):
    url_pool = UrlManager()
    downloader = Downloader()
    parser = HtmlParser()
    outputer = Outputer()
    temp_url = init_url
    while temp_url:
        driver = downloader.download(temp_url)
        content, temp_url = parser.parse(driver)
        outputer.write(content)
    outputer.close()
예제 #4
0
파일: anjuke.py 프로젝트: silyman1/demo
 def __init__(self, ):
     self.count = 0
     self.wcount = 0
     self.mylock = Lock()
     self.csvfile = file('sz.csv', 'a')  #ks.csv
     self.csvfile.write(codecs.BOM_UTF8)
     self.item_queue = Queue()
     self.headers = {
         'User-Agent':
         'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.96 Safari/537.36'
     }
     self.proxies = {
         'http': 'https://121.61.0.33:9999',
         "https": 'https://121.61.0.33:9999'
     }
     self.mysign = True
     #https://suzhou.anjuke.com/community
     self.rawurl = 'https://suzhou.anjuke.com/community/'
     self.urlmanager = UrlManager()
예제 #5
0
 def url_manager_proc(self, url_q, conn_q, root_url):
     url_manager = UrlManager()
     url_manager.add_new_url(root_url)
     while True:
         if url_manager.has_new_url():
             new_url = url_manager.get_new_url()
             url_q.put(new_url)
             print('old_url=', url_manager.old_url_size())
             if url_manager.old_url_size() > 2000:
                 url_q.put('end')
                 print('Manager notify ending!')
                 url_manager.save_progress('new_urls.txt',
                                           url_manager.new_urls)
                 url_manager.save_progress('old_urls.txt',
                                           url_manager.old_urls)
                 return
         try:
             if not conn_q.empty():
                 urls = conn_q.get()
                 url_manager.add_new_urls(urls)
         except BaseException as e:
             time.sleep(0.1)
예제 #6
0
파일: spider.py 프로젝트: yfgeek/spiders
 def __init__(self):
     self.urls = UrlManager()  #url管理器
예제 #7
0
 def __init__(self):
     self.manage = UrlManager()
     self.parser = HtmlParser()
     self.downloader = Htmldownloader()
     self.output = DataOutput()