Exemplo n.º 1
0
 def __init__(self, get_detail):
     super().__init__()
     self.get_detail = get_detail
     # 添加到question_id
     if get_detail:
         self.url_manager = SpiderFrame.UrlManager(
             use_redis=config.USE_REDIS, db_set_name=config.QUESTION_ID_SET)
Exemplo n.º 2
0
 def __init__(self):
     logger.info("TopicSpider init...")
     super().__init__()
     # url_manager方法已经内置,只需要使用id_manager传入ID参数即可
     self.id_manager = SpiderFrame.UrlManager(
         db_set_name=config.TOPIC_ID_SET, use_redis=config.USE_REDIS)
     self.exit_code = 1
Exemplo n.º 3
0
    def __init__(self):
        logger.info("CommentSpider init...")
        super().__init__()
        self.exit_code = 1

        self.flag = True
        # url_manager方法已经内置,只需要使用id_manager传入ID参数即可
        self.id_manager = SpiderFrame.UrlManager(db_set_name=config.ANSWER_ID_SET, use_redis=config.USE_REDIS)
Exemplo n.º 4
0
from frame import SpiderFrame
from bs4 import BeautifulSoup
from time import sleep
from redis import Redis
from re import findall
from requests import exceptions

import pymongo.errors
import config
import json

logger = SpiderFrame.logger
html_downloader = SpiderFrame.HtmlDownloader()
data_saver = SpiderFrame.DataSaver(db_name=config.DB_NAME,
                                   set_name=config.TOPIC_SET)
url_manager = SpiderFrame.UrlManager(db_set_name=config.TOPIC_SET,
                                     use_redis=config.USE_REDIS)
redis = Redis(host=config.REDIS_HOST,
              port=config.REDIS_PORT,
              password=config.REDIS_PASSWORD)


def parse_base_topic_info(html: str):
    soup = BeautifulSoup(html, "lxml")
    try:
        title = soup.find("h2", {"class": "ContentItem-title"}).text
    except Exception as e:
        logger.error("Get Topic title failed, Exception: {0}".format(e))
        title = ''
    try:
        follower = int(
            soup.find_all("strong", {"class": "NumberBoard-itemValue"
Exemplo n.º 5
0
 def __init__(self):
     super().__init__()
     self.url_manager = SpiderFrame.UrlManager(
         use_redis=config.USE_REDIS, db_set_name=config.QUESTION_SET)
Exemplo n.º 6
0
 def __init__(self, get_detail=False):
     super().__init__()
     self.get_detail = get_detail
     if get_detail:
         self.url_manager = SpiderFrame.UrlManager(db_set_name='知乎@HotList')
Exemplo n.º 7
0
    @update_data    2020/10/06
    @desc           评论爬虫,提供answer的id,爬该answer下所有评论 <已实现增量>
    @info           没有其他链接,不需要队列
    @main_function  spyder(question_id: str)
"""

from frame import SpiderFrame
from requests import exceptions
from json import loads as json_lds
from time import sleep
from redis import Redis
import config

logger = SpiderFrame.logger
html_downloader = SpiderFrame.HtmlDownloader()
url_manager = SpiderFrame.UrlManager(use_redis=config.USE_REDIS,
                                     db_set_name=config.COMMENT_SET)
data_saver = SpiderFrame.DataSaver(db_name=config.DB_NAME,
                                   set_name=config.COMMENT_SET)
redis = Redis(host=config.REDIS_HOST,
              port=config.REDIS_PORT,
              password=config.REDIS_PASSWORD)


def spider(answer_id: str) -> None:
    # 增量爬取评论
    offset = config.MONGO_DOC_LIMIT
    logger.info("Get comments for answer id: {0}".format(answer_id))

    url = "https://www.zhihu.com/api/v4/answers/{}/root_comments?limit=10&offset=0&order=normal&status=open" \
        .format(answer_id)
    res = html_downloader.download(url)