예제 #1
0
class SpiderMain(object):
    def __init__(self):
        self.urls = UrlManager()
        self.downloader = HtmlDownloader()
        self.parser = HtmlParser()
        self.outputer = HtmlOutputer()

    def craw(self, url):
        count = 1
        self.urls.add_new_url(url)
        while self.urls.has_new_url():
            try:
                new_url = self.urls.get_new_url()
                html_cont = self.downloader.download(new_url)
                new_urls, html_data = self.parser.parse(new_url, html_cont)
                self.urls.add_new_urls(new_urls)
                self.outputer.collect_data(html_data)
                print "%d craw success : %s" % (count, new_url)
                if count >= 10:
                    break
                count = count + 1
            except Exception as e:
                print str(e)
                print "%d craw failed : %s" % (count, new_url)
        self.outputer.output()
예제 #2
0
 def url_manager_proc(self, url_q: Queue, conn_q: Queue, root_url):
     print('url manager process start...')
     url_manager = UrlManager()
     url_manager.add_new_url(root_url)
     print('url manager process started...')
     while True:
         while url_manager.has_new_url():
             new_url = url_manager.get_new_url()
             print('new_url', new_url)
             # 将新的URL发给工作节点
             url_q.put(new_url)
             # 加一个判断条件, 当爬取2000个链接后就关闭, 并保存进度
             if url_manager.old_url_size() > 2000:
                 # 通知爬行节点工作结束
                 url_q.put('end')
                 print('控制节点发起结束通知')
                 # 关闭管理节点, 同事存储set状态
                 url_manager.save_process(path.join('dist', 'new_urls.txt'),
                                          url_manager.new_urls)
                 url_manager.save_process(path.join('dist', 'old_urls.txt'),
                                          url_manager.old_urls)
                 return
         # 将从result_solve_proc 获取到的URL添加到URL管理器
         try:
             if not conn_q.empty():
                 urls = conn_q.get()
                 url_manager.add_new_urls(urls)
         except BaseException as e:
             time.sleep(0.1)
예제 #3
0
class SpiderMain(object):
    def __init__(self):
        self.urls = UrlManager()
        self.downloader = HtmlDownloader()
        self.parser = HtmlParser()
        self.output = HtmlOutputer()

    def craw(self, root_url, page_amount=5, time_sleep=None):
        count = 1
        # 添加第一个待爬取url
        self.urls.add_new_url(root_url)
        # 如果集合中有url, 就取出一个url 请求, 没有链接则跳出。
        while self.urls.has_new_url():
            try:
                # 开始爬取
                new_url = self.urls.get_new_url()
                print(f'craw{count}:{new_url}')
                # 请求url, 返回html
                html_content = self.downloader.download(new_url)
                # xpath 解析html,得到需要的数据
                new_urls, new_data = self.parser.parse(html_content)
                # 一个词条页面上关联的a 链表列表加入到url 管理器中待爬取
                self.urls.add_new_urls(new_urls)
                self.output.collect_data(new_url, new_data)
                count += 1
                if count > page_amount:
                    break

                time.sleep(2)
            except Exception as e:
                print(e)
                print(f'抓取失败:{new_url}')
        self.output.output_html()
예제 #4
0
파일: spider.py 프로젝트: lover-520/Crawler
class Spider():
    def __init__(self):
        self.manager = UrlManager()
        self.downloader = HTMLDownloader()
        self.parser = HTMLParser()
        self.output = DataOutput()

    def crawl(self, root_url):
        self.manager.add_new_url(root_url)
        while (self.manager.has_new_url()
               and self.manager.old_urls_size() < 50):
            try:
                new_url = self.manager.get_new_url()
                html = self.downloader.download(new_url)
                new_urls, data = self.parser.parser(new_url, html)
                self.manager.add_new_urls(new_urls)
                self.output.store_data(data)
                print("已经抓取%s个链接" % self.manager.old_urls_size())
            except Exception as e:
                print(e)
        self.output.output_html()
예제 #5
0
class SpiderMain(object):
    def __init__(self):
        self.urls = UrlManager()
        self.downlaoder = HtmlDownlaoder()
        self.parser = HtmlParser()
        self.outputer = HtmlOutputer()

    def craw(self, root_url):
        count = 1
        # 把根url 传入url管理列表
        self.urls.add_url(root_url)

        # 页面爬取循环程序
        while self.urls.has_new_url():
            try:
                # 获取一个待爬取的url
                new_url = self.urls.get_new_url()
                print('craw %d: %s' % (count, new_url))

                # 下载该url爬取context
                html_cont = self.downlaoder.download(new_url)

                # 通过解析器,解析该url下载到的内容,获取新的 new_urls 和 新的 data
                new_urls, new_data = self.parser.parse(new_url, html_cont)

                # 把获取到 新的url添加到新的url管理器,
                self.urls.add_new_urls(new_urls)

                # 把获取的新的data添加到新的数据处理器中
                self.outputer.collect_data(new_data)

                if count == 100:
                    break

                count += 1
            except Exception as e:
                print('craw failed')

        self.outputer.output_html()
예제 #6
0
 def url_manager_proc(self, url_q, conn_q, root_url):
     url_manager = UrlManager()
     url_manager.add_new_url(root_url)
     print(url_q)
     while True:
         while (url_manager.has_new_url()):
             new_url = url_manager.get_new_url()
             url_q.put(new_url)
             print('old_url=%s' % url_manager.old_url_size())
             if (url_manager.old_url_size() > 2000):
                 url_q.put('end')
                 print('控制节点发起结束通知!')
                 url_manager.save_progress('new_urls.txt',
                                           url_manager.new_urls)
                 url_manager.save_progress('old_urls.txt',
                                           url_manager.old_urls)
                 return
         try:
             if not conn_q.empty():
                 urls = conn_q.get()
                 url_manager.add_new_urls(urls)
         except BaseException as e:
             time.sleep(0.1)
예제 #7
0
class SpiderMain(object):
    def __init__(self):
        self.urls = UrlManager()
        self.downloader = HtmlDownloader()
        self.parser = HtmlParser()
        self.outputer = HtmlOutputer()

    def craw(self, root_url):
        count = 1
        self.urls.add_new_url(root_url)
        while self.urls.has_new_url():
            try:
                new_url = self.urls.get_new_url()  # 获取新url
                html_cont = self.downloader.download(new_url)  # 下载url内容
                new_urls, new_data = self.parser.parse(new_url,
                                                       html_cont)  # 解析url内容
                self.urls.add_new_urls(new_urls)  # 将解析到的新url存入url管理器
                self.outputer.collect_data(new_data)  # 收集解析到的数据
                if count == 200:
                    break
                count = count + 1
            except:
                print("craw failed")
        self.outputer.output_html()
예제 #8
0
class SpiderMain():
    """爬虫程序主模块"""
    def __init__(self):
        """构造函数,初始化属性"""
        self.urls = UrlManager()
        self.log = MyLog("spider_main", "logs")
        self.downloader = HtmlDownloader()
        self.parser = HtmlParser()
        self.outputer = HtmlOutputer()
        #self.util=utill.DBConn()

    def craw(self, root_url):
        """爬虫入口函数"""
        areas = {
            "gulou": 100,
            "jianye": 72,
            "qinhuai": 100,
            "xuanwu": 67,
            "yuhuatai": 32,
            "qixia": 62,
            "baijiahu": 33,
            "chalukou1": 26,
            "jiangningqita11": 3,
            "dongshanzhen": 29,
            "jiangningdaxuecheng": 15,
            "jiulonghu": 12,
            "jiangjundadao11": 22,
            "kexueyuan": 9,
            "qilinzhen": 42,
            "tiexinqiao": 9,
            "pukou": 100,
            "liuhe": 1,
        }

        #areas = {"gulou":1}

        #1、抓取所有二手房详情界面链接,并将所有连接放入URL管理模块
        for area, pg_sum in areas.items():
            for num in range(1, pg_sum + 1):
                #1.1 拼接页面地址: https://nj.lianjia.com/ershoufang/gulou/pg2/
                pg_url = root_url + area + "/pg" + str(num) + "/"
                self.log.logger.info("1.1 拼接页面地址:" + pg_url)
                print("1.1 拼接页面地址:" + pg_url)
                #1.2 启动下载器,下载页面.
                try:
                    html_cont = self.downloader.download(pg_url)
                except Exception as e:
                    self.log.logger.error("1.2 下载页面出现异常:" + repr(e))
                    time.sleep(60 * 30)
                else:
                    #1.3 解析PG页面,获得二手房详情页面的链接,并将所有链接放入URL管理模块
                    try:
                        ershoufang_urls = self.parser.get_erhoufang_urls(
                            html_cont)
                    except Exception as e:
                        self.log.logger.error("1.3 页面解析出现异常:" + repr(e))
                    else:
                        self.urls.add_new_urls(ershoufang_urls)
                        #暂停0~3秒的整数秒,时间区间:[0,3]
                        time.sleep(random.randint(0, 3))

        time.sleep(60 * 20)
        #2、解析二手房具体细心页面
        id = 1
        stop = 1
        while self.urls.has_new_url():
            #2.1 获取url
            try:
                detail_url = self.urls.get_new_url()
                self.log.logger.info("2.1 二手房页面地址:" + detail_url)
                print("2.1 二手房页面地址:" + detail_url)
            except Exception as e:
                print("2.1 拼接地址出现异常")
                self.log.logger.error("2.1 拼接地址出现异常:" + detail_url)

            #2.2 下载页面
            try:
                detail_html = self.downloader.download(detail_url)
            except Exception as e:
                self.log.logger.error("2.2 下载页面出现异常:" + repr(e))
                self.urls.add_new_url(detail_url)
                time.sleep(60 * 30)
            else:
                #2.3 解析页面
                try:
                    ershoufang_data = self.parser.get_ershoufang_data(
                        detail_html, id)
                except Exception as e:
                    self.log.logger.error("2.3 解析页面出现异常:" + repr(e))
                else:
                    #2.4 输出数据
                    try:
                        self.outputer.collect_data(ershoufang_data)
                    except Exception as e:
                        self.log.logger.error("2.4 输出数据出现异常:" + repr(e))
                    else:
                        print(id)
                        id = id + 1
                        stop = stop + 1
                        #暂停0~3秒的整数秒,时间区间:[0,3]
                        time.sleep(random.randint(0, 3))
                        if stop == 2500:
                            stop = 1
                            time.sleep(60 * 20)
예제 #9
0
class SpiderMain():
    def __init__(self):
        self.urlDownLoader = HtmlDownloader()
        self.htmlParser = HtmlParser()
        self.urlManager = UrlManager()
        self.jsondataParser = JsonData_Parser()
        self.htmlOutPuter = HtmlOutPuter()

    def _get_from_discover_toplist(self, url):
        urls = self.htmlParser.parse(
            htmlContent=self.urlDownLoader.download(url),
            type='discover_toplist')
        self.urlManager.add_new_urls(urls)

    def _get_from_discover_artist(self, url):
        urls = self.htmlParser.parse(
            htmlContent=self.urlDownLoader.download(url),
            type='discover_artist')
        self.urlManager.add_new_urls(urls)

    def _get_from_artist(self, url):
        results = self.htmlParser.parse(
            htmlContent=self.urlDownLoader.download(url), type='artist')
        for name, urls in results.items():
            print(name)
            self.urlManager.add_new_urls(urls)

    def _get_from_song(self, url):
        tmp = {}
        name = self.htmlParser.parse(
            htmlContent=self.urlDownLoader.download(url), type='song')
        print("正收集:" + name)
        comments = self.jsondataParser.parse(
            self.urlDownLoader.downloadJsonData(url))
        tmp[name] = comments
        self.htmlOutPuter.collect_datas(tmp)

    def _parse_url(self, url):
        res = ''
        SONG = 'song'
        DISCOVER = 'discover'
        ARTIST = 'artist'
        TOPLIST = 'toplist'
        if (url.find(DISCOVER) != -1):
            res += DISCOVER
        if (url.find(ARTIST) != -1):
            if (res != ''):
                res += '_' + ARTIST
            else:
                res += ARTIST
        if (url.find(TOPLIST) != -1):
            if (res != ''):
                res += '_' + TOPLIST
            else:
                res += TOPLIST
        if (url.find(SONG) != -1):
            res += SONG
        return res

    def craw(self, rootUrl, direction=""):
        if (rootUrl.find('#') != -1):
            pos = rootUrl.find('#')
            rootUrl = rootUrl[:pos] + rootUrl[pos + 2:]
        self.urlManager.add_new_url(rootUrl)
        while self.urlManager.has_new_url():
            url = self.urlManager.get_url()
            methodName = '_get_from_' + self._parse_url(url)
            method = getattr(self, methodName)
            if (method != None):
                method(url)
        self.htmlOutPuter.output_html(direction=direction)