Exemple #1
0
 def query_question(self, question_id):
     """
     :rtype: Question_Info
     """
     question = DB.query_row(u'select * from Question where question_id in ({question_id})'.format(question_id=question_id))
     question = self.format_question(question)  # 包装成标准的信息格式
     return question
 def query_question(self, question_id):
     """
     :rtype: Question_Info
     """
     question = DB.query_row(u'select * from Question where question_id in ({question_id})'.format(question_id=question_id))
     question = self.format_question(question)  # 包装成标准的信息格式
     return question
    def query_answer(self, answer_id):
        """

        :type answer_id:int
        :return:
        """
        answer = DB.query_row(u'select * from Answer where answer_id in ({answer_id})'.format(answer_id=answer_id))
        answer = self.format_answer(answer)
        return answer
 def query_answer_list_by_author_page_id(self, author_page_id):
     # 需要先查出来对应的author_id
     author_info = DB.query_row(u'select author_id from Author where author_page_id="{author_page_id}"'.format(author_page_id=author_page_id))
     author_id = author_info[u'author_id']
     raw_answer_list = DB.query_all(u'select * from Answer where author_id="{author_id}"  {order_by} '.format(author_id=author_id, order_by=Config.answer_order_by))
     answer_list = []
     for raw_answer in raw_answer_list:
         answer_list.append(self.format_answer(raw_answer))
     return answer_list
Exemple #5
0
 def query_answer_list_by_author_page_id(self, author_page_id):
     # 需要先查出来对应的author_id
     author_info = DB.query_row(u'select author_id from Author where author_page_id="{author_page_id}"'.format(author_page_id=author_page_id))
     author_id = author_info[u'author_id']
     raw_answer_list = DB.query_all(u'select * from Answer where author_id="{author_id}"  {order_by} '.format(author_id=author_id, order_by=Config.answer_order_by))
     answer_list = []
     for raw_answer in raw_answer_list:
         answer_list.append(self.format_answer(raw_answer))
     return answer_list
Exemple #6
0
    def query_answer(self, answer_id):
        """

        :type answer_id:int
        :return:
        """
        answer = DB.query_row(u'select * from Answer where answer_id in ({answer_id})'.format(answer_id=answer_id))
        answer = self.format_answer(answer)
        return answer
Exemple #7
0
    def extract_topic(self):
        raw_topic = DB.query_row(u'select * from Topic where topic_id="{topic_id}"'.format(topic_id=self.task.topic_id))
        self.info_page = Topic_Info(raw_topic)

        answer_list = self.query_answer_list(self.info_page.best_answer_id_list.split(','))
        question_id_dict = OrderedDict()
        #   依次获取对应的Question对象
        for answer in answer_list:
            if answer.question_id not in question_id_dict:
                question_id_dict[answer.question_id] = Question(self.query_question(answer.question_id))
            question_id_dict[answer.question_id].append_answer(answer)
        for question_id in question_id_dict:
            self.question_list.append(question_id_dict[question_id])
        return
    def extract_topic(self):
        raw_topic = DB.query_row(u'select * from Topic where topic_id="{topic_id}"'.format(topic_id=self.task.topic_id))
        self.info_page = Topic_Info(raw_topic)

        answer_list = self.query_answer_list(self.info_page.best_answer_id_list.split(','))
        question_id_dict = OrderedDict()
        #   依次获取对应的Question对象
        for answer in answer_list:
            if answer.question_id not in question_id_dict:
                question_id_dict[answer.question_id] = Question(self.query_question(answer.question_id))
            question_id_dict[answer.question_id].append_answer(answer)
        for question_id in question_id_dict:
            self.question_list.append(question_id_dict[question_id])
        return
Exemple #9
0
    def extract_collection(self):
        raw_collection = DB.query_row(
            u'select * from Collection where collection_id="{collection_id}"'.format(collection_id=self.task.collection_id))
        self.info_page = Collection_Info(raw_collection)

        answer_list = self.query_answer_list(self.info_page.collected_answer_id_list.split(','))
        question_id_dict = OrderedDict()
        #   依次获取对应的Question对象
        for answer in answer_list:
            if answer.question_id not in question_id_dict:
                question_id_dict[answer.question_id] = Question(self.query_question(answer.question_id))
            question_id_dict[answer.question_id].append_answer(answer)
        for question_id in question_id_dict:
            self.question_list.append(question_id_dict[question_id])
        return
    def extract_collection(self):
        raw_collection = DB.query_row(
                u'select * from Collection where collection_id="{collection_id}"'.format(collection_id=self.task.collection_id))
        self.info_page = Collection_Info(raw_collection)

        answer_list = self.query_answer_list(self.info_page.collected_answer_id_list.split(','))
        question_id_dict = OrderedDict()
        #   依次获取对应的Question对象
        for answer in answer_list:
            if answer.question_id not in question_id_dict:
                question_id_dict[answer.question_id] = Question(self.query_question(answer.question_id))
            question_id_dict[answer.question_id].append_answer(answer)
        for question_id in question_id_dict:
            self.question_list.append(question_id_dict[question_id])
        return
Exemple #11
0
    def extract_author(self):
        raw_author = DB.query_row(u'select * from Author where author_page_id="{author_page_id}" '.format(author_page_id=self.task.author_page_id))
        self.info_page = Author_Info(raw_author)

        answer_list = self.query_answer_list_by_author_page_id(self.info_page.author_page_id)
        question_id_dict = OrderedDict()
        #   依次获取对应的Question对象
        for answer in answer_list:
            if answer.question_id not in question_id_dict:
                db_question_info = self.query_question(answer.question_id)
                if not db_question_info:
                    #   当返回值为空的时候,直接跳过即可
                    continue
                question_id_dict[answer.question_id] = Question(db_question_info)
            question_id_dict[answer.question_id].append_answer(answer)
        for question_id in question_id_dict:
            self.question_list.append(question_id_dict[question_id])
        return
    def extract_author(self):
        raw_author = DB.query_row(u'select * from Author where author_page_id="{author_page_id}" '.format(author_page_id=self.task.author_page_id))
        self.info_page = Author_Info(raw_author)

        answer_list = self.query_answer_list_by_author_page_id(self.info_page.author_page_id)
        question_id_dict = OrderedDict()
        #   依次获取对应的Question对象
        for answer in answer_list:
            if answer.question_id not in question_id_dict:
                db_question_info = self.query_question(answer.question_id)
                if not db_question_info:
                    #   当返回值为空的时候,直接跳过即可
                    continue
                question_id_dict[answer.question_id] = Question(db_question_info)
            question_id_dict[answer.question_id].append_answer(answer)
        for question_id in question_id_dict:
            self.question_list.append(question_id_dict[question_id])
        return
    def catch(account_id):
        # 关键就在这里了

        article_url_index_list = []
        #   获取最大页码
        url = 'http://www.taoguba.com.cn/Article/' + account_id + '/1'
        front_page_content = Http.get_content(url)
        star_page = 1

        with open('ReadList.txt', 'r') as read_list:
            read_list = read_list.readlines()
            for line in read_list:
                if str(line).__contains__('#'):
                    split_url = line.split('#')[0]
                    trgId = split_url.split('/')[-2]
                    if trgId == account_id:
                        pg = (split_url.split('/')[-1])
                        print pg
                        star_page = int(pg)

                        if star_page == 0:
                            star_page = 1
                        else:
                            print star_page

        max_page = 2
        dom = BeautifulSoup(front_page_content, "lxml")
        list_pcyc_l_ = dom.find_all('div', class_="left t_page01")
        try:
            for tgo_tgo_ in list_pcyc_l_:
                linkl = tgo_tgo_.findAll('a')
                tarUrl = linkl[0].get('href')
                max_page = int(tarUrl.split('/')[3])

        except IndexError as e:
            max_page = 1
        column_info = TGBColumnParser(front_page_content).get_column_info()

        from src.worker import Worker
        Worker.save_record_list(u'Column', [column_info])

        Debug.logger.info(u"最大页数抓取完毕,共{max_page}页".format(max_page=max_page))
        index_work_set = OrderedDict()
        # 获取每一页中文章的地址的地址
        # star_page = 100
        for raw_front_page_index in range(star_page, max_page + 1):
            request_url = 'http://www.taoguba.com.cn/Article/' + account_id + '/' + str(
                raw_front_page_index)
            article_url_index_list.append(request_url)

        article_count = len(article_url_index_list)
        Debug.logger.info(u"文章链接抓取完毕,共{article_count}篇文章待抓取".format(
            article_count=article_count))

        index_work_set = OrderedDict()
        for article_url_index in article_url_index_list:
            print 'query : ' + article_url_index
            article_db = DB.query_row(
                'select count(*) as article_count from Article where article_id = "{}"'
                .format(article_url_index))
            if article_db['article_count'] > 0:
                continue

            request_url = article_url_index

            index_work_set[article_url_index] = request_url

        re_catch_counter = 0
        while len(index_work_set) > 0 and re_catch_counter <= 20:
            re_catch_counter += 1
            for article_url_index in index_work_set:
                request_url = index_work_set[article_url_index]
                Debug.logger.info(
                    u"开始抓取{countert}号文章,剩余{article_count}篇".format(
                        countert=article_url_index,
                        article_count=len(index_work_set)))
                request_url_content = Http.get_content(request_url)

                article_info = TGBArticleParser(
                    request_url_content).get_article_info()
                if len(article_info) > 0:
                    article_info['article_id'] = article_url_index
                    article_info['column_id'] = account_id
                    Worker.save_record_list(u'Article', [article_info])
                del index_work_set[article_url_index]
        return
    def catch(account_id):
        # 关键就在这里了

        mock_sleep_time = 0.5
        base_sleep_time = 1
        max_sleep_time = 1

        article_url_index_list = []
        #   获取最大页码

        url = 'http://www.jintiankansha.me/tag/{}?page=1'.format(account_id)

        column_info = JinWanKanSaEmptColumnParser('').get_column_info()

        column_info[u'column_id'] = account_id
        dt = datetime.datetime.now()
        column_info[u'title'] = u"AI_{}".format(dt.strftime("%Y-%m-%d"))
        max_page = 1

        typeToTry = 'tag'

        with open('ReadList.txt', 'r') as read_list:
            read_list = read_list.readlines()
            for line in read_list:
                split_url = line.split('#')[0]
                if split_url.split('/')[-1] == account_id:
                    dt = datetime.datetime.now()
                    column_info[u'title'] = u"{}_{}".format(
                        line.split('#')[1], dt.strftime("%Y-%m-%d"))

                    max_page = int(line.split('#')[2])

                    typeToTry = str(int(line.split('#')[-1])).strip('\n')

        from src.worker import Worker
        Worker.save_record_list(u'Column', [column_info])

        Debug.logger.info(u"最大页数抓取完毕,共{max_page}页".format(max_page=max_page))
        index_work_set = OrderedDict()
        #   获取每一页中文章的地址的地址
        for raw_front_page_index in range(0, max_page + 1):
            # request_url = u'http://www.jintiankansha.me/column/{}?page={}'.format(account_id, raw_front_page_index)
            request_url = u'http://www.jintiankansha.me/{}/{}?page={}'.format(
                typeToTry, account_id, raw_front_page_index)
            print request_url
            index_work_set[raw_front_page_index] = request_url

        re_catch_counter = 0
        catch_counter = 0
        while len(index_work_set) > 0 and re_catch_counter <= 20:
            re_catch_counter += 1
            for raw_front_page_index in index_work_set:
                catch_counter += 1
                Debug.logger.info(u'第『{}』遍抓取数据'.format(re_catch_counter))
                request_url = index_work_set[raw_front_page_index]
                Debug.logger.info(
                    u"开始抓取第{raw_front_page_index}页中的文章链接,剩余{max_page}页".format(
                        raw_front_page_index=raw_front_page_index,
                        max_page=len(index_work_set)))
                request_url_content = Http.get_content(request_url)

                soup = BeautifulSoup(request_url_content, 'html.parser')
                list_p_list = soup.find_all('span', class_="item_title")

                for tgo_right in list_p_list:
                    for link in tgo_right.findAll('a'):
                        ttt = str(link.get('href'))
                        print ttt
                        if not (ttt is None):
                            article_url_index_list.append(ttt)

                del index_work_set[raw_front_page_index]

        # article_url_index_list.append('http://www.jintiankansha.me/t/u8MygoqKI8')

        article_count = len(article_url_index_list)
        Debug.logger.info(u"文章链接抓取完毕,共{article_count}篇文章待抓取".format(
            article_count=article_count))

        index_work_set = OrderedDict()
        for article_url_index in article_url_index_list:
            print 'query : ' + article_url_index
            article_db = DB.query_row(
                'select count(*) as article_count from Article where article_id = "{}"'
                .format(article_url_index))
            if article_db['article_count'] > 0:
                continue

            request_url = article_url_index

            index_work_set[article_url_index] = request_url

        re_catch_counter = 0
        while len(index_work_set) > 0 and re_catch_counter <= 20:
            re_catch_counter += 1
            for article_url_index in index_work_set:
                request_url = index_work_set[article_url_index]
                Debug.logger.info(
                    u"开始抓取  {countert} 号文章,剩余{article_count}篇".format(
                        countert=article_url_index,
                        article_count=len(index_work_set)))
                request_url_content = Http.get_content(request_url)
                time.sleep(mock_sleep_time)
                if len(request_url_content) == 0:
                    random_sleep_time = base_sleep_time + random.randint(
                        0, max_sleep_time) / 100.0
                    Debug.logger.info(u"随机休眠{}秒".format(random_sleep_time))
                    time.sleep(random_sleep_time)
                    continue
                article_info = JinWanKanSaArticleParser(
                    request_url_content).get_article_info()
                if len(article_info) > 0:
                    article_info['article_id'] = article_url_index
                    article_info['column_id'] = account_id
                    Worker.save_record_list(u'Article', [article_info])
                del index_work_set[article_url_index]
        return
Exemple #15
0
    def catch(account_id):
        # 关键就在这里了

        mock_sleep_time = 0.5
        base_sleep_time = 10
        max_sleep_time = 10

        article_url_index_list = []
        #   获取最大页码

        column_info = Todo3ColumnParser('').get_column_info()
        column_info[u'column_id'] = account_id
        column_info[u'title'] = "新能源汽车"
        column_info['article_count'] = 0
        column_info['follower_count'] = 0
        column_info['description'] = ''
        column_info['image_url'] = ''

        from src.worker import Worker
        Worker.save_record_list(u'Column', [column_info])
        star_page = 1
        max_page = 1

        from src.worker import Worker
        Worker.save_record_list(u'Column', [column_info])

        Debug.logger.info(u"最大页数抓取完毕,共{max_page}页".format(max_page=max_page))
        index_work_set = OrderedDict()
        #获取每一页中文章的地址的地址
        for raw_front_page_index in range(star_page, max_page):
            request_url = u'https://post.smzdm.com/fenlei/xinnengyuanche/p{}/'.format(
                raw_front_page_index)
            index_work_set[raw_front_page_index] = request_url

        re_catch_counter = 0
        catch_counter = 0
        while len(index_work_set) > 0 and re_catch_counter <= 20:
            re_catch_counter += 1
            for raw_front_page_index in index_work_set:
                catch_counter += 1
                Debug.logger.info(u'第『{}』遍抓取数据'.format(re_catch_counter))
                request_url = index_work_set[raw_front_page_index]
                Debug.logger.info(
                    u"开始抓取第{raw_front_page_index}页中的文章链接,剩余{max_page}页".format(
                        raw_front_page_index=raw_front_page_index,
                        max_page=len(index_work_set)))
                request_url_content = Http.get_content(request_url)

                soup = BeautifulSoup(request_url_content, 'lxml')
                list_p_list = soup.find_all('div',
                                            class_='list-border clearfix')
                for p in list_p_list:
                    # print p
                    list_pcyc_li = p.find_all('a')
                    li = list_pcyc_li[0]

                    tarUrl = li.get('href')
                    ttt = str(tarUrl).split("#")[-1]
                    print ttt
                    if not (ttt is None):
                        article_url_index_list.append(ttt)

                del index_work_set[raw_front_page_index]

        article_count = len(article_url_index_list)
        Debug.logger.info(u"文章链接抓取完毕,共{article_count}篇文章待抓取".format(
            article_count=article_count))

        index_work_set = OrderedDict()
        for article_url_index in article_url_index_list:
            print 'query : ' + article_url_index
            article_db = DB.query_row(
                'select count(*) as article_count from Article where article_id = "{}"'
                .format(article_url_index))
            if article_db['article_count'] > 0:
                continue

            request_url = article_url_index

            index_work_set[article_url_index] = request_url

        re_catch_counter = 0
        while len(index_work_set) > 0 and re_catch_counter <= 20:
            re_catch_counter += 1
            for article_url_index in index_work_set:
                request_url = index_work_set[article_url_index]
                Debug.logger.info(
                    u"开始抓取{countert}号文章,剩余{article_count}篇".format(
                        countert=article_url_index,
                        article_count=len(index_work_set)))
                request_url_content = Http.get_content(request_url)

                article_info = Todo3ArticleParser(
                    request_url_content).get_article_info()
                if len(article_info) > 0:
                    article_info['article_id'] = article_url_index
                    article_info['column_id'] = account_id
                    Worker.save_record_list(u'Article', [article_info])
                del index_work_set[article_url_index]
        return
    def catch(account_id):
        # 关键就在这里了

        mock_sleep_time = 0.5
        base_sleep_time = 10
        max_sleep_time = 10

        article_url_index_list = []
        #   获取最大页码
        url = 'https://www.wuxiareview.com/category/{}'.format(account_id)
        front_page_content = Http.get_content(url)

        column_info = WuXiaColumnParser(front_page_content).get_column_info()
        column_info[u'column_id'] = account_id
        max_page = 2
        if account_id == 'daidai':

            column_info[u'title'] = "吃瓜群众岱岱"
            max_page = 1
        elif account_id == 'gzmdzst':

            column_info[u'title'] = "顾子明的政事堂"
            max_page = 1
        else:

            column_info[u'title'] = "时文"
            max_page = 2

        from src.worker import Worker
        Worker.save_record_list(u'Column', [column_info])

        Debug.logger.info(u"最大页数抓取完毕,共{max_page}页".format(max_page=max_page))
        index_work_set = OrderedDict()
        #   获取每一页中文章的地址的地址
        for raw_front_page_index in range(0, max_page):
            request_url = u'https://www.wuxiareview.com/category/{}/{}/'.format(
                account_id, raw_front_page_index)
            index_work_set[raw_front_page_index] = request_url

        re_catch_counter = 0
        catch_counter = 0
        while len(index_work_set) > 0 and re_catch_counter <= 20:
            re_catch_counter += 1
            for raw_front_page_index in index_work_set:
                catch_counter += 1
                Debug.logger.info(u'第『{}』遍抓取数据'.format(re_catch_counter))
                request_url = index_work_set[raw_front_page_index]
                Debug.logger.info(
                    u"开始抓取第{raw_front_page_index}页中的文章链接,剩余{max_page}页".format(
                        raw_front_page_index=raw_front_page_index,
                        max_page=len(index_work_set)))
                request_url_content = Http.get_content(request_url)

                soup = BeautifulSoup(request_url_content, 'lxml')
                list_p_list = soup.find_all('article', class_="excerpt")
                for p in list_p_list:
                    # print p
                    list_pcyc_li = p.find_all('a')
                    for li in list_pcyc_li:
                        # print li.text
                        tarUrl = li.get('href')
                        ttt = str(tarUrl).split("#")[-1]
                        print ttt
                        if not (ttt is None):
                            article_url_index_list.append(ttt)

                del index_work_set[raw_front_page_index]

        article_count = len(article_url_index_list)
        Debug.logger.info(u"文章链接抓取完毕,共{article_count}篇文章待抓取".format(
            article_count=article_count))

        index_work_set = OrderedDict()
        for article_url_index in article_url_index_list:
            print 'query : ' + article_url_index
            article_db = DB.query_row(
                'select count(*) as article_count from Article where article_id = "{}"'
                .format(article_url_index))
            if article_db['article_count'] > 0:
                continue

            request_url = article_url_index

            index_work_set[article_url_index] = request_url

        re_catch_counter = 0
        while len(index_work_set) > 0 and re_catch_counter <= 20:
            re_catch_counter += 1
            for article_url_index in index_work_set:
                request_url = index_work_set[article_url_index]
                Debug.logger.info(
                    u"开始抓取{countert}号文章,剩余{article_count}篇".format(
                        countert=article_url_index,
                        article_count=len(index_work_set)))
                request_url_content = Http.get_content(request_url)

                article_info = WuXiaArticleParser(
                    request_url_content).get_article_info()
                if len(article_info) > 0:
                    article_info['article_id'] = article_url_index
                    article_info['column_id'] = account_id
                    Worker.save_record_list(u'Article', [article_info])
                del index_work_set[article_url_index]
        return
    def catch(account_id):
        # 关键就在这里了

        mock_sleep_time = 0.5
        base_sleep_time = 10
        max_sleep_time = 10

        article_url_index_list = []
        #   获取最大页码
        url = 'http://www.gushequ.com/{}/'.format(account_id)
        front_page_content = Http.get_content(url)

        column_info = TodoColumnParser(front_page_content).get_column_info()
        column_info[u'column_id'] = account_id
        column_info[u'title'] = "股社区"

        from src.worker import Worker
        Worker.save_record_list(u'Column', [column_info])
        star_page = 0
        max_page = 24
        if account_id == '2018':
            star_page = 0
            max_page = 24

        elif account_id == '2017':
            star_page = 24
            max_page = 58

        elif account_id == '2016':
            star_page = 58
            max_page = 92

        from src.worker import Worker
        Worker.save_record_list(u'Column', [column_info])

        Debug.logger.info(u"最大页数抓取完毕,共{max_page}页".format(max_page=max_page))
        index_work_set = OrderedDict()
        #获取每一页中文章的地址的地址
        for raw_front_page_index in range(star_page, max_page):
            request_url = u'http://www.gushequ.com/page/{}/'.format(
                raw_front_page_index)
            index_work_set[raw_front_page_index] = request_url

        re_catch_counter = 0
        catch_counter = 0
        while len(index_work_set) > 0 and re_catch_counter <= 20:
            re_catch_counter += 1
            for raw_front_page_index in index_work_set:
                catch_counter += 1
                Debug.logger.info(u'第『{}』遍抓取数据'.format(re_catch_counter))
                request_url = index_work_set[raw_front_page_index]
                Debug.logger.info(
                    u"开始抓取第{raw_front_page_index}页中的文章链接,剩余{max_page}页".format(
                        raw_front_page_index=raw_front_page_index,
                        max_page=len(index_work_set)))
                request_url_content = Http.get_content(request_url)

                soup = BeautifulSoup(request_url_content, 'lxml')
                list_p_list = soup.find_all('article')
                for p in list_p_list:
                    # print p
                    list_pcyc_li = p.find_all('a')
                    for li in list_pcyc_li:

                        tarUrl = li.get('href')
                        ttt = str(tarUrl).split("#")[-1]
                        print ttt
                        if not (ttt is None):
                            article_url_index_list.append(ttt)

                del index_work_set[raw_front_page_index]

        article_count = len(article_url_index_list)
        Debug.logger.info(u"文章链接抓取完毕,共{article_count}篇文章待抓取".format(
            article_count=article_count))

        index_work_set = OrderedDict()
        for article_url_index in article_url_index_list:
            print 'query : ' + article_url_index
            article_db = DB.query_row(
                'select count(*) as article_count from Article where article_id = "{}"'
                .format(article_url_index))
            if article_db['article_count'] > 0:
                continue

            request_url = article_url_index

            index_work_set[article_url_index] = request_url

        re_catch_counter = 0
        while len(index_work_set) > 0 and re_catch_counter <= 20:
            re_catch_counter += 1
            for article_url_index in index_work_set:
                request_url = index_work_set[article_url_index]
                Debug.logger.info(
                    u"开始抓取{countert}号文章,剩余{article_count}篇".format(
                        countert=article_url_index,
                        article_count=len(index_work_set)))
                request_url_content = Http.get_content(request_url)

                article_info = TodoArticleParser(
                    request_url_content).get_article_info()
                if len(article_info) > 0:
                    article_info['article_id'] = article_url_index
                    article_info['column_id'] = account_id
                    Worker.save_record_list(u'Article', [article_info])
                del index_work_set[article_url_index]
        return
 def query_article(self, article_id):
     print u"query_article {}".format(article_id)
     raw_article = DB.query_row(u'select * from Article where article_id="{article_id}" '.format(article_id=article_id))
     article = self.format_article(raw_article)
     return article
    def catch(account_id):
        # 关键就在这里了

        mock_sleep_time = 0.5
        base_sleep_time = 10
        max_sleep_time = 10

        article_url_index_list = []
        #   获取最大页码

        star_page = 1
        max_page = 1
        column_info = Todo1ColumnParser("").get_column_info()
        column_info[u'column_id'] = account_id

        with open('ReadList.txt', 'r') as read_list:
            read_list = read_list.readlines()
            for line in read_list:
                split_url = line.split('#')[0]
                if str(split_url).__contains__(account_id):
                    # Config.now_id_likeName = line.split('#')[1]
                    max_page = int(line.split('#')[-1]) + 1
                    column_info[u'title'] = str(line.split('#')[1])

                    # max_page = 1
                    print max_page

        from src.worker import Worker
        Worker.save_record_list(u'Column', [column_info])

        from src.worker import Worker
        Worker.save_record_list(u'Column', [column_info])

        Debug.logger.info(u"最大页数抓取完毕,共{max_page}页".format(max_page=max_page))
        index_work_set = OrderedDict()
        #获取每一页中文章的地址的地址
        for raw_front_page_index in range(star_page, max_page):
            request_url = u'https://www.guancha.cn/{}/list_{}.shtml'.format(
                account_id, raw_front_page_index)
            index_work_set[raw_front_page_index] = request_url

        re_catch_counter = 0
        catch_counter = 0
        while len(index_work_set) > 0 and re_catch_counter <= 20:
            re_catch_counter += 1
            for raw_front_page_index in index_work_set:
                catch_counter += 1
                Debug.logger.info(u'第『{}』遍抓取数据'.format(re_catch_counter))
                request_url = index_work_set[raw_front_page_index]
                Debug.logger.info(
                    u"开始抓取第{raw_front_page_index}页中的文章链接,剩余{max_page}页".format(
                        raw_front_page_index=raw_front_page_index,
                        max_page=len(index_work_set)))
                request_url_content = Http.get_content(request_url)

                soup = BeautifulSoup(request_url_content, 'lxml')
                list_p_list = soup.find_all('h4', class_="module-title")
                for p in list_p_list:
                    # print p
                    list_pcyc_li = p.find_all('a')
                    for li in list_pcyc_li:
                        ttt = li.get('href')
                        print ttt
                        if not (ttt is None):

                            ss = str(ttt).split('.')
                            article_url_index_list.append(
                                u"https://www.guancha.cn{}_s.{}".format(
                                    ss[0], ss[1]))

                del index_work_set[raw_front_page_index]

        article_count = len(article_url_index_list)
        Debug.logger.info(u"文章链接抓取完毕,共{article_count}篇文章待抓取".format(
            article_count=article_count))

        index_work_set = OrderedDict()
        for article_url_index in article_url_index_list:
            print 'query : ' + article_url_index
            article_db = DB.query_row(
                'select count(*) as article_count from Article where article_id = "{}"'
                .format(article_url_index))
            if article_db['article_count'] > 0:
                continue

            request_url = article_url_index

            index_work_set[article_url_index] = request_url

        re_catch_counter = 0
        while len(index_work_set) > 0 and re_catch_counter <= 20:
            re_catch_counter += 1
            for article_url_index in index_work_set:
                request_url = index_work_set[article_url_index]
                Debug.logger.info(
                    u"开始抓取{countert}号文章,剩余{article_count}篇".format(
                        countert=article_url_index,
                        article_count=len(index_work_set)))
                request_url_content = Http.get_content(request_url)

                article_info = Todo1ArticleParser(
                    request_url_content).get_article_info()
                if len(article_info) > 0:
                    article_info['article_id'] = article_url_index
                    article_info['column_id'] = account_id
                    Worker.save_record_list(u'Article', [article_info])
                del index_work_set[article_url_index]
        return
Exemple #20
0
 def query_column(self, column_id):
     raw_column = DB.query_row(u'select * from Column where column_id="{column_id}"'.format(column_id=column_id))
     column = self.format_column(raw_column)  # 包装成标准的信息格式
     return column
 def query_column(self, column_id):
     raw_column = DB.query_row(u'select * from Column where column_id="{column_id}"'.format(column_id=column_id))
     column = self.format_column(raw_column)  # 包装成标准的信息格式
     return column
Exemple #22
0
 def query_article(self, article_id):
     raw_article = DB.query_row(u'select * from Article where article_id="{article_id}" '.format(article_id=article_id))
     article = self.format_article(raw_article)
     return article
Exemple #23
0
    def catch(account_id):
        # 关键就在这里了

        mock_sleep_time = 0.5
        base_sleep_time = 10
        max_sleep_time = 10

        article_url_index_list = []
        #   获取最大页码

        column_info = Todo2ColumnParser("").get_column_info()
        column_info[u'column_id'] = account_id
        column_info[u'title'] = "纽约时报"

        from src.worker import Worker
        Worker.save_record_list(u'Column', [column_info])
        star_page = 0
        max_page = 0

        from src.worker import Worker
        Worker.save_record_list(u'Column', [column_info])

        Debug.logger.info(u"最大页数抓取完毕,共{max_page}页".format(max_page=max_page))
        index_work_set = OrderedDict()
        #获取每一页中文章的地址的地址
        for raw_front_page_index in range(star_page, max_page):
            request_url = u'https://cn.nytimes.com/search/data/?query=DAVID%20BARBOZA&lang=&dt=json&from={}&size=10'.format(
                raw_front_page_index * 10)
            # request_url = u'https://cn.nytimes.com/real-estate/{}/'.format(raw_front_page_index)
            index_work_set[raw_front_page_index] = request_url

        re_catch_counter = 0
        catch_counter = 0
        while len(index_work_set) > 0 and re_catch_counter <= 20:
            re_catch_counter += 1
            for raw_front_page_index in index_work_set:
                catch_counter += 1
                Debug.logger.info(u'第『{}』遍抓取数据'.format(re_catch_counter))
                request_url = index_work_set[raw_front_page_index]
                Debug.logger.info(
                    u"开始抓取第{raw_front_page_index}页中的文章链接,剩余{max_page}页".format(
                        raw_front_page_index=raw_front_page_index,
                        max_page=len(index_work_set)))
                content = Http.get_content(request_url)

                # json 返回中 url

                if not content:
                    return
                jdata = json.loads(content)
                articles = jdata['items']
                for article in articles:
                    print article['headline']
                    uur = article['web_url_with_host']

                    print uur

                    article_url_index_list.append(uur)

                # soup = BeautifulSoup(content, 'lxml')
                # list_p_list = soup.find_all('h3' ,class_="regularSummaryHeadline")
                # for p in list_p_list:
                #     # print p
                #     list_pcyc_li = p.find_all('a')
                #     for li in list_pcyc_li:
                #
                #         tarUrl = str(li.get('href'))
                #         print  tarUrl
                #
                #         if not (tarUrl is None):
                #             if str(tarUrl).__contains__("cn.nytimes.com"):
                #                 article_url_index_list.append(u"https:{}".format(tarUrl))
                #             else:
                #                 article_url_index_list.append(u"https://cn.nytimes.com{}".format(tarUrl))

                del index_work_set[raw_front_page_index]

        article_count = len(article_url_index_list)
        Debug.logger.info(u"文章链接抓取完毕,共{article_count}篇文章待抓取".format(
            article_count=article_count))

        index_work_set = OrderedDict()
        for article_url_index in article_url_index_list:
            print 'query : ' + article_url_index
            article_db = DB.query_row(
                'select count(*) as article_count from Article where article_id = "{}"'
                .format(article_url_index))
            if article_db['article_count'] > 0:
                continue

            request_url = article_url_index

            index_work_set[article_url_index] = request_url

        re_catch_counter = 0
        while len(index_work_set) > 0 and re_catch_counter <= 20:
            re_catch_counter += 1
            for article_url_index in index_work_set:
                request_url = index_work_set[article_url_index]
                Debug.logger.info(
                    u"开始抓取{countert}号文章,剩余{article_count}篇".format(
                        countert=article_url_index,
                        article_count=len(index_work_set)))
                request_url_content = Http.get_content(request_url)

                article_info = Todo2ArticleParser(
                    request_url_content).get_article_info()
                if len(article_info) > 0:
                    article_info['article_id'] = article_url_index
                    article_info['column_id'] = account_id
                    Worker.save_record_list(u'Article', [article_info])
                del index_work_set[article_url_index]
        return
    def catch(account_id):
        # 关键就在这里了

        mock_sleep_time = 0.5
        base_sleep_time = 10
        max_sleep_time = 10

        article_url_index_list = []
        #   获取最大页码
        url = 'http://www.360doc.com/userhome/{}'.format(account_id)
        front_page_content = Http.get_content(url)

        # Config.now_id_likeName = account_id
        # Config.save()

        column_info = Doc360ColumnParser(front_page_content).get_column_info()
        column_info[u'column_id'] = account_id
        column_info[u'title'] = "明公"
        max_page = 2
        # if account_id == 'daidai':
        #
        #     column_info[u'title'] = "吃瓜群众岱岱"
        #     max_page = 1
        # elif account_id == 'gzmdzst':
        #
        #     column_info[u'title'] = "顾子明的政事堂"
        #     max_page = 1
        # else:
        #
        #     column_info[u'title'] = "时文"
        #     max_page = 1

        from src.worker import Worker
        Worker.save_record_list(u'Column', [column_info])

        Debug.logger.info(u"最大页数抓取完毕,共{max_page}页".format(max_page=max_page))
        index_work_set = OrderedDict()
        #   获取每一页中文章的地址的地址
        for raw_front_page_index in range(0, max_page):
            request_url = u"http://www.360doc.com/ajax/getUserArticle.aspx?pagenum=50&curnum={}&icid=13&ishowabstract=null&word=&userid={}&isoriginal=0&_={}"
            urequest_url = (request_url.format(raw_front_page_index,
                                               account_id,
                                               int(time.time() * 1000)))

            index_work_set[raw_front_page_index] = urequest_url

        re_catch_counter = 0
        catch_counter = 0
        while len(index_work_set) > 0 and re_catch_counter <= 20:
            re_catch_counter += 1
            for raw_front_page_index in index_work_set:
                catch_counter += 1
                Debug.logger.info(u'第『{}』遍抓取数据'.format(re_catch_counter))
                request_url = index_work_set[raw_front_page_index]
                Debug.logger.info(
                    u"开始抓取第{raw_front_page_index}页中的文章链接,剩余{max_page}页".format(
                        raw_front_page_index=raw_front_page_index,
                        max_page=len(index_work_set)))
                request_url_content = Http.get_content(request_url)

                soup = BeautifulSoup(request_url_content, 'lxml')
                list_p_list = soup.find_all('div',
                                            class_="list listwz1 font14")
                for p in list_p_list:
                    # print p
                    list_pcyc_li = p.find_all('a')
                    for li in list_pcyc_li:

                        # print li.text
                        tarUrl = li.get('href')
                        # deep level
                        print tarUrl
                        article_url_index_list.append(tarUrl)

                del index_work_set[raw_front_page_index]

        article_count = len(article_url_index_list)
        Debug.logger.info(u"文章链接抓取完毕,共{article_count}篇文章待抓取".format(
            article_count=article_count))

        index_work_set = OrderedDict()
        for article_url_index in article_url_index_list:
            print 'query : ' + article_url_index
            article_db = DB.query_row(
                'select count(*) as article_count from Article where article_id = "{}"'
                .format(article_url_index))
            if article_db['article_count'] > 0:
                continue

            request_url = article_url_index

            index_work_set[article_url_index] = request_url

        re_catch_counter = 0
        while len(index_work_set) > 0 and re_catch_counter <= 20:
            re_catch_counter += 1
            for article_url_index in index_work_set:
                request_url = index_work_set[article_url_index]
                Debug.logger.info(
                    u"开始抓取{countert}号文章,剩余{article_count}篇".format(
                        countert=article_url_index,
                        article_count=len(index_work_set)))
                request_url_content = Http.get_content(request_url)

                article_info = Doc360ArticleParser(
                    request_url_content).get_article_info()
                if len(article_info) > 0:
                    article_info['article_id'] = article_url_index
                    article_info['column_id'] = account_id
                    Worker.save_record_list(u'Article', [article_info])
                del index_work_set[article_url_index]
        return
Exemple #25
0
    def catch(account_id):
        # 关键就在这里了

        mock_sleep_time = 0.5
        base_sleep_time = 1
        max_sleep_time = 1

        article_url_index_list = []
        #   获取最大页码

        column_info = WeiXinColumnParser('').get_column_info()
        column_info[u'column_id'] = account_id
        column_info[u'title'] = account_id
        column_info[u'image_url'] = 'https://wpimg.wallstcn.com/3598b719-ab0d-4be7-bc09-30c3ae29a3cc.jpg?imageView2/1/w/240/h/240'
        max_page = 1
        # with open('ReadList.txt', 'r') as read_list:
        #     read_list = read_list.readlines()
        #     for line in read_list:
        #         split_url = line.split('#')[0]
        #         if str(split_url).__contains__(account_id):
        #             # Config.now_id_likeName = line.split('#')[1]
        #             max_page = int(line.split('#')[-1]) + 1
        #             column_info[u'title'] = str(line.split('#')[1])
        #
        #             # max_page = 1
        #             print max_page



        from src.worker import Worker
        Worker.save_record_list(u'Column', [column_info])

        Debug.logger.info(u"最大页数抓取完毕,共{max_page}页".format(max_page=max_page))


        # article_url_index_list.append('https://mp.weixin.qq.com/s?__biz=MjM5MjczNDc0Mw==&mid=2650847984&idx=2&sn=b7b111e5964d2f2fb568ba0d419e3edf&chksm=bd55d1888a22589e2f3bab0613b346427079efc6b82fac869d4f78244a500c3e5cc8cb8402ed&scene=21#wechat_redirect')
        # article_url_index_list.append('https://mp.weixin.qq.com/s/yj1BT3jWyxLjlEnzz0vEtQ')

        with open('/Users/0/Desktop/list.txt', 'r') as read_list:
            read_list = read_list.readlines()
            for line in read_list:
                article_url_index_list.append(str(line).strip('\n'))

        article_count = len(article_url_index_list)
        Debug.logger.info(u"文章链接抓取完毕,共{article_count}篇文章待抓取".format(article_count=article_count))

        index_work_set = OrderedDict()
        for article_url_index in article_url_index_list:
            print  'query : ' + article_url_index
            article_db = DB.query_row(
                    'select count(*) as article_count from Article where article_id = "{}"'.format(article_url_index))
            if article_db['article_count'] > 0:
                continue

            request_url = article_url_index

            index_work_set[article_url_index] = request_url

        re_catch_counter = 0
        while len(index_work_set) > 0 and re_catch_counter <= 20:
            re_catch_counter += 1
            for article_url_index in index_work_set:
                request_url = index_work_set[article_url_index]
                Debug.logger.info(u"开始抓取  {countert} 号文章,剩余{article_count}篇".format(countert=article_url_index,
                                                                                    article_count=len(index_work_set)))
                request_url_content = Http.get_content(request_url)
                time.sleep(mock_sleep_time)
                if len(request_url_content) == 0:
                    random_sleep_time = base_sleep_time + random.randint(0, max_sleep_time) / 100.0
                    Debug.logger.info(u"随机休眠{}秒".format(random_sleep_time))
                    time.sleep(random_sleep_time)
                    continue
                #article_info = Todo2ArticleParser(request_url_content).get_article_info()
                # article_info = HuXiuArticleParser(request_url_content).get_article_info()
                article_info = WeiXinArticleParser(request_url_content).get_article_info()
                # article_info = WallStreetArticleParser(request_url_content).get_article_info()
                if len(article_info) > 0:
                    article_info['article_id'] = article_url_index
                    article_info['column_id'] = account_id
                    Worker.save_record_list(u'Article', [article_info])
                del index_work_set[article_url_index]
        return
Exemple #26
0
    def catch(account_id):
        # 关键就在这里了

        mock_sleep_time = 0.5
        base_sleep_time = 10
        max_sleep_time = 10

        article_url_index_list = []
        #
        url = 'http://xinsheng.huawei.com/{}'.format(account_id)
        front_page_content = Http.get_content(url)

        column_info = HuaWeiColumnParser(front_page_content).get_column_info()
        column_info[u'column_id'] = account_id
        column_info[u'title'] = "华为家事"
        column_info[
            u'image_url'] = 'file:///Users/ex-liyan010/Desktop/share/hcover.jpeg'

        from src.worker import Worker
        Worker.save_record_list(u'Column', [column_info])

        max_page = 0

        Debug.logger.info(u"最大页数抓取完毕,共{max_page}页".format(max_page=max_page))
        index_work_set = OrderedDict()
        #   获取每一页中文章的地址的地址
        for raw_front_page_index in range(0, max_page):
            request_url = u'http://xinsheng.huawei.com/cn/index.php?app=forum&mod=List&act=index&class=461&order=cTime&type=&sign=&special=&cate=155&p={}'.format(
                raw_front_page_index)
            index_work_set[raw_front_page_index] = request_url

        re_catch_counter = 0
        catch_counter = 0
        while len(index_work_set) > 0 and re_catch_counter <= 20:
            re_catch_counter += 1
            for raw_front_page_index in index_work_set:
                catch_counter += 1
                Debug.logger.info(u'第『{}』遍抓取数据'.format(re_catch_counter))
                request_url = index_work_set[raw_front_page_index]
                Debug.logger.info(
                    u"开始抓取第{raw_front_page_index}页中的文章链接,剩余{max_page}页".format(
                        raw_front_page_index=raw_front_page_index,
                        max_page=len(index_work_set)))
                request_url_content = Http.get_content(request_url)

                soup = BeautifulSoup(request_url_content, "lxml")
                content_dom = soup.find_all('div', class_="bbs_list")[0]

                # print content_dom.text
                #     t_dom = dom.find_all('tbody')
                #     # print t_dom

                font_box_dom = content_dom.find_all('div', class_="font_box")
                # print time_dom
                for xx in font_box_dom:
                    linkl = xx.findAll('a')

                    tarUrl = linkl[0].get('href')
                    print tarUrl
                    article_url_index_list.append(tarUrl)

                del index_work_set[raw_front_page_index]

        article_url_index_list.append(
            'http://xinsheng.huawei.com/cn/index.php?app=forum&mod=Detail&act=index&id=4343641'
        )
        article_url_index_list.append(
            'http://xinsheng.huawei.com/cn/index.php?app=forum&mod=Detail&act=index&id=4340813'
        )
        article_url_index_list.append(
            'http://xinsheng.huawei.com/cn/index.php?app=group&mod=Bbs&act=detail&tid=4346331'
        )
        article_url_index_list.append(
            'http://xinsheng.huawei.com/cn/index.php?app=group&mod=Bbs&act=detail&tid=4347493'
        )
        article_url_index_list.append(
            'http://xinsheng.huawei.com/cn/index.php?app=group&mod=Bbs&act=detail&tid=4342141'
        )

        article_count = len(article_url_index_list)
        Debug.logger.info(u"文章链接抓取完毕,共{article_count}篇文章待抓取".format(
            article_count=article_count))

        index_work_set = OrderedDict()
        for article_url_index in article_url_index_list:
            print 'query : ' + article_url_index
            article_db = DB.query_row(
                'select count(*) as article_count from Article where article_id = "{}"'
                .format(article_url_index))
            if article_db['article_count'] > 0:
                continue

            request_url = article_url_index

            index_work_set[article_url_index] = request_url

        re_catch_counter = 0
        while len(index_work_set) > 0 and re_catch_counter <= 20:
            re_catch_counter += 1
            for article_url_index in index_work_set:
                request_url = index_work_set[article_url_index]
                Debug.logger.info(
                    u"开始抓取{countert}号文章,剩余{article_count}篇".format(
                        countert=article_url_index,
                        article_count=len(index_work_set)))
                request_url_content = Http.get_content(request_url)

                article_info = HuaWeiArticleParser(
                    request_url_content).get_article_info()
                if len(article_info) > 0:
                    article_info['article_id'] = article_url_index
                    article_info['column_id'] = account_id
                    Worker.save_record_list(u'Article', [article_info])
                del index_work_set[article_url_index]
        return
    def catch(account_id):
        # 关键就在这里了

        mock_sleep_time = 0.5
        base_sleep_time = 10
        max_sleep_time = 10

        article_url_index_list = []
        #   获取最大页码
        url = 'https://www.huxiu.com/{}'.format(account_id)
        front_page_content = Http.get_content(url)

        # Config.now_id_likeName = account_id
        # Config.save()

        column_info = HuXiuColumnParser(front_page_content).get_column_info()
        column_info[u'column_id'] = account_id
        column_info[u'title'] = account_id

        from src.worker import Worker
        Worker.save_record_list(u'Column', [column_info])

        u_result = urllib.quote(
            account_id.decode(sys.stdin.encoding).encode('utf8'))
        print account_id
        max_page = 2

        idds = ''
        #
        with open('ReadList.txt', 'r') as read_list:
            read_list = read_list.readlines()
            for line in read_list:
                split_url = line.split('#')[0]
                if split_url.split('/')[-1] == account_id:
                    # Config.now_id_likeName = line.split('#')[1]
                    max_page = int(line.split('#')[-1]) + 1
                    idds = str(line.split('#')[1])
                    print max_page
        max_page = -1
        #   分析网页内容,存到数据库里

        Debug.logger.info(u"最大页数抓取完毕,共{max_page}页".format(max_page=max_page))
        index_work_set = OrderedDict()
        #   获取每一页中文章的地址的地址
        for raw_front_page_index in range(0, max_page + 1):
            #https://www.huxiu.com/search.html?s=%E5%B7%B4%E8%8F%B2%E7%89%B9&sort=dateline:desc
            request_url = u'https://www.huxiu.com/search.html?s={}&sort=dateline%3Adesc&per_page={}'.format(
                u_result, raw_front_page_index)
            #request_url = u'https://www.huxiu.com/member/{}/article/{}.html'.format(idds,raw_front_page_index)
            # request_url = 'https://www.huxiu.com/member/1872007.html'
            index_work_set[raw_front_page_index] = request_url

        re_catch_counter = 0
        catch_counter = 0
        while len(index_work_set) > 0 and re_catch_counter <= 20:
            re_catch_counter += 1
            for raw_front_page_index in index_work_set:
                catch_counter += 1
                Debug.logger.info(u'第『{}』遍抓取数据'.format(re_catch_counter))
                request_url = index_work_set[raw_front_page_index]
                Debug.logger.info(
                    u"开始抓取第{raw_front_page_index}页中的文章链接,剩余{max_page}页".format(
                        raw_front_page_index=raw_front_page_index,
                        max_page=len(index_work_set)))
                request_url_content = Http.get_content(request_url)

                soup = BeautifulSoup(request_url_content, "lxml")

                list_pcyc_l_ = soup.find_all('li')
                # list_pcyc_l_ = soup.find_all('div',class_='mob-ctt')
                for tgo_right in list_pcyc_l_:
                    for link in tgo_right.findAll('a'):
                        hre = str(link.get('href'))
                        if hre.startswith('/article/', 0, 10):
                            print u'https://www.huxiu.com{}'.format(
                                link.get('href'))
                            article_url_index_list.append(
                                'https://www.huxiu.com{}'.format(
                                    link.get('href')))

                del index_work_set[raw_front_page_index]

        article_url_index_list.append(
            'https://www.huxiu.com/article/299355.html')

        article_count = len(article_url_index_list)
        Debug.logger.info(u"文章链接抓取完毕,共{article_count}篇文章待抓取".format(
            article_count=article_count))

        index_work_set = OrderedDict()
        for article_url_index in article_url_index_list:
            print 'query : ' + article_url_index
            article_db = DB.query_row(
                'select count(*) as article_count from Article where article_id = "{}"'
                .format(article_url_index))
            if article_db['article_count'] > 0:
                continue

            request_url = article_url_index

            index_work_set[article_url_index] = request_url

        re_catch_counter = 0
        while len(index_work_set) > 0 and re_catch_counter <= 20:
            re_catch_counter += 1
            for article_url_index in index_work_set:
                request_url = index_work_set[article_url_index]
                Debug.logger.info(
                    u"开始抓取{countert}号文章,剩余{article_count}篇".format(
                        countert=article_url_index,
                        article_count=len(index_work_set)))
                request_url_content = Http.get_content(request_url)

                article_info = HuXiuArticleParser(
                    request_url_content).get_article_info()
                if len(article_info) > 0:
                    article_info['article_id'] = article_url_index
                    article_info['column_id'] = account_id
                    Worker.save_record_list(u'Article', [article_info])
                del index_work_set[article_url_index]
        return
    def catch(account_id):
        # 关键就在这里了

        mock_sleep_time = 28
        base_sleep_time = 62
        max_sleep_time = 80

        article_url_index_list =[]
        #   获取最大页码
        url = 'http://chuansong.me/account/{}'.format(account_id)
        # front_page_content = Http.get_content(url)
        front_page_content =''
        # max_page =WechatWorker.parse_max_page(front_page_content)
        # if max_page > 200:
        #     max_page =200
        max_page = 0
        #   分析网页内容,存到数据库里
        column_info = WechatColumnParser(front_page_content).get_column_info()
        column_info[u'column_id'] = account_id

        with open('ReadList.txt', 'r') as read_list:
             read_list = read_list.readlines()
             for line in read_list:
                 split_url = line.split('#')[0]
                 if str(split_url).__contains__(account_id):
                    column_info[u'title'] = str(line.split('#')[1])


        from src.worker import Worker
        Worker.save_record_list(u'Column', [column_info])

        Debug.logger.info(u"最大页数抓取完毕,共{max_page}页".format(max_page=max_page))
        index_work_set = OrderedDict()
        #   获取每一页中文章的地址的地址
        for raw_front_page_index in range(0, max_page):
            front_page_index = raw_front_page_index * 12
            request_url = url + '?start={}'.format(front_page_index)
            index_work_set[raw_front_page_index] = request_url

        re_catch_counter = 0
        catch_counter = 0
        while len(index_work_set) > 0 and re_catch_counter <= 20:
            re_catch_counter += 1
            for raw_front_page_index in index_work_set:
                catch_counter += 1
                Debug.logger.info(u'第『{}』遍抓取数据'.format(re_catch_counter))
                request_url = index_work_set[raw_front_page_index]
                Debug.logger.info(
                        u"开始抓取第{raw_front_page_index}页中的文章链接,剩余{max_page}页".format(raw_front_page_index=raw_front_page_index, max_page=len(index_work_set)))
                request_url_content = Http.get_content(request_url)
                time.sleep(mock_sleep_time)
                if len(request_url_content) == 0 or catch_counter % 5 == 0:
                    random_sleep_time = base_sleep_time + random.randint(0, max_sleep_time) / 100.0
                    Debug.logger.info(u"随机休眠{}秒".format(random_sleep_time))
                    time.sleep(random_sleep_time)
                    continue


                random_sleep_time = base_sleep_time + random.randint(0, max_sleep_time) / 10.0
                Debug.logger.info(u"随机休眠{}秒".format(random_sleep_time))
                time.sleep(random_sleep_time)

                article_url_index_list += Match.wechat_article_index(content=request_url_content)
                del index_work_set[raw_front_page_index]

        article_count = len(article_url_index_list)
        Debug.logger.info(u"文章链接抓取完毕,共{article_count}篇文章待抓取".format(article_count=article_count))

        index_work_set = OrderedDict()
        for article_url_index in article_url_index_list:
            article_db = DB.query_row('select count(*) as article_count from Article where article_id = {}'.format(article_url_index))
            if article_db['article_count'] > 0:
                continue

            request_url = 'http://chuansong.me/n/{}'.format(article_url_index)
            index_work_set[article_url_index] = request_url

        re_catch_counter = 0
        while len(index_work_set) > 0 and re_catch_counter <= 20:
            re_catch_counter += 1
            for article_url_index in index_work_set:
                request_url = index_work_set[article_url_index]
                Debug.logger.info(u"开始抓取{countert}号文章,剩余{article_count}篇".format(countert=article_url_index,
                                                                                 article_count=len(index_work_set)))
                request_url_content = Http.get_content(request_url)
                time.sleep(mock_sleep_time)
                if len(request_url_content) == 0:
                    random_sleep_time = base_sleep_time + random.randint(0, max_sleep_time) / 100.0
                    Debug.logger.info(u"随机休眠{}秒".format(random_sleep_time))
                    time.sleep(random_sleep_time)
                    continue

                random_sleep_time = base_sleep_time + random.randint(0, max_sleep_time) / 10.0
                Debug.logger.info(u"随机休眠{}秒".format(random_sleep_time))
                time.sleep(random_sleep_time)

                article_info = WechatArticleParser(request_url_content).get_article_info()
                if len(article_info) > 0:
                    article_info['article_id'] = article_url_index
                    article_info['column_id'] = account_id
                    Worker.save_record_list(u'Article', [article_info])
                del index_work_set[article_url_index]
        return