Ejemplo n.º 1
0
Archivo: test.py Proyecto: djun/PSpider
def test_spider():
    """
    test spider
    """
    # initial fetcher / parser / saver, you also can rewrite this three class
    fetcher = spider.Fetcher(max_repeat=3, sleep_time=1)
    parser = spider.Parser(max_deep=2)
    saver = spider.Saver(save_pipe=open("out_spider_thread.txt", "w"))

    # define url_filter
    url_filter = spider.UrlFilter(black_patterns=black_patterns,
                                  white_patterns=white_patterns,
                                  capacity=None)

    # initial web_spider
    web_spider = spider.WebSpider(fetcher,
                                  parser,
                                  saver,
                                  url_filter=url_filter,
                                  monitor_sleep_time=5)

    # add start url
    web_spider.set_start_url("http://zhushou.360.cn/", keys=("360web", ))

    # start web_spider
    result = web_spider.start_work_and_wait_done(fetcher_num=10, is_over=True)

    # print result of web_spider
    print(result[spider.TPEnum.URL_FETCH_SUCC],
          result[spider.TPEnum.HTM_PARSE_SUCC],
          result[spider.TPEnum.ITEM_SAVE_SUCC])
    return
Ejemplo n.º 2
0
def test_spider_async():
    """
    test asyncio spider with asyncio
    """
    loop = asyncio.get_event_loop()

    # initial fetcher / parser / saver, you also can rewrite this three class
    fetcher = spider.FetcherAsync(max_repeat=3, sleep_time=0)
    parser = spider.ParserAsync(max_deep=2)
    saver = spider.SaverAsync(save_pipe=open("out_spider_async.txt", "w"))

    # define url_filter
    url_filter = spider.UrlFilter(black_patterns=black_patterns,
                                  white_patterns=white_patterns,
                                  capacity=10000)

    # initial web_spider
    web_spider_async = spider.WebSpiderAsync(fetcher,
                                             parser,
                                             saver,
                                             url_filter=url_filter,
                                             loop=loop)

    # add start url
    web_spider_async.set_start_url("http://zhushou.360.cn/", keys=("360web", ))

    # start web_spider
    web_spider_async.start_work_and_wait_done(fetcher_num=10)
    return
Ejemplo n.º 3
0
def test_spider_distributed():
    """
    test distributed spider
    """
    # initial fetcher / parser / saver, you also can rewrite this three class
    fetcher = spider.Fetcher(max_repeat=3, sleep_time=0)
    parser = spider.Parser(max_deep=-1)
    saver = spider.Saver(save_pipe=open("out_spider_distributed.txt", "w"))

    # define url_filter
    url_filter = spider.UrlFilter(black_patterns=black_patterns,
                                  white_patterns=white_patterns)

    # initial web_spider
    web_spider_dist = spider.WebSpiderDist(fetcher,
                                           parser,
                                           saver,
                                           url_filter=url_filter,
                                           monitor_sleep_time=5)
    web_spider_dist.init_redis(host="localhost",
                               port=6379,
                               key_wait="spider.wait",
                               key_all="spider.all")

    # add start url
    web_spider_dist.set_start_url("http://zhushou.360.cn/", keys=("360web", ))

    # start web_spider
    web_spider_dist.start_work_and_wait_done(fetcher_num=10)
    return
Ejemplo n.º 4
0
def test_spider():
    """
    test spider
    """
    # initial fetcher / parser / saver, you also can rewrite this three classes
    fetcher = spider.Fetcher(max_repeat=1, sleep_time=0)
    parser = spider.Parser(max_deep=2)
    saver = spider.Saver(save_pipe=open("out_thread.txt", "w"))

    # define url_filter
    url_filter = spider.UrlFilter(black_patterns=black_patterns,
                                  white_patterns=white_patterns,
                                  capacity=None)

    # initial web_spider
    web_spider = spider.WebSpider(fetcher,
                                  parser,
                                  saver,
                                  proxieser=None,
                                  url_filter=url_filter,
                                  monitor_sleep_time=5)

    # add start url
    web_spider.set_start_url("http://zhushou.360.cn/",
                             priority=0,
                             keys={"type": "360"},
                             deep=0)

    # start web_spider
    web_spider.start_work_and_wait_done(fetcher_num=10, is_over=True)
    return
Ejemplo n.º 5
0
def test_spider():
    """
    测试函数
    """
    # 初始化 fetcher / parser / saver / proxieser
    fetcher = MyFetcher(sleep_time=0, max_repeat=1)
    parser = MyParser(max_deep=1)
    saver = MySaver(save_pipe=open("out.txt", "w"))
    # proxieser = MyProxies(sleep_time=5)

    # 定义url_filter
    url_filter = spider.UrlFilter(white_patterns=(re.compile(r"^http[s]?://(www\.)?petstation\.jp"),), capacity=None)

    # 定义爬虫web_spider
    web_spider = spider.WebSpider(fetcher, parser, saver, proxieser=None, url_filter=url_filter, queue_parse_size=-1,
                                  queue_save_size=-1)
    # web_spider = spider.WebSpider(fetcher, parser, saver, proxieser=proxieser, url_filter=url_filter, queue_parse_size=100, queue_proxies_size=100)

    # 添加起始的url
    # web_spider.set_start_url("https://www.appinn.com/", priority=0, keys={"type": "index"}, deep=0)
    web_spider.set_start_url("https://www.petstation.jp/animal_detail.php?animal__id=371144", priority=0,
                             keys={"type": "index"}, deep=0)

    # 开启爬虫web_spider
    web_spider.start_working(fetcher_num=20)

    # 等待爬虫结束
    web_spider.wait_for_finished()
    return
Ejemplo n.º 6
0
def test_spider_distributed():
    """
    test distributed spider
    """
    # initial fetcher / parser / saver
    fetcher = MyFetcher(max_repeat=1, sleep_time=0)
    parser = MyParser(max_deep=-1)
    saver = spider.Saver(save_pipe=open("out_distributed.txt", "w"))

    # define url_filter
    url_filter = spider.UrlFilter(black_patterns=black_patterns,
                                  white_patterns=white_patterns)

    # initial web_spider
    web_spider_dist = spider.WebSpiderDist(fetcher,
                                           parser,
                                           saver,
                                           proxieser=None,
                                           url_filter=url_filter,
                                           monitor_sleep_time=5)
    web_spider_dist.init_redis(host="localhost",
                               port=6379,
                               key_high_priority="spider.high",
                               key_low_priority="spider.low")

    # start web_spider
    web_spider_dist.start_working(fetcher_num=10)

    # wait for finished
    web_spider_dist.wait_for_finished(is_over=True)
    return
Ejemplo n.º 7
0
def test_spider_distributed():
    """
    test distributed spider
    """
    # 定义fetcher, parser和saver, 你也可以重写这三个类中的任何一个
    fetcher = spider.Fetcher(max_repeat=3, sleep_time=0)
    parser = spider.Parser(max_deep=-1)
    saver = spider.Saver(save_pipe=open("out_spider_distributed.txt", "w"))

    # 初始化WebSpiderDist
    web_spider_dist = spider.WebSpiderDist(fetcher,
                                           parser,
                                           saver,
                                           url_filter=spider.UrlFilter(),
                                           monitor_sleep_time=5)
    web_spider_dist.init_redis(host="localhost",
                               port=6379,
                               key_wait="spider.wait",
                               key_all="spider.all")

    # 添加种子Url
    web_spider_dist.set_start_url("http://zhushou.360.cn/", keys=("360web", ))

    # 开始抓取任务并等待其结束
    web_spider_dist.start_work_and_wait_done(fetcher_num=10)
    return
Ejemplo n.º 8
0
def test_spider():
    """
    test spider
    """
    # initial fetcher / parser / saver, you also can rewrite this three class
    fetcher = spider.Fetcher(max_repeat=3, sleep_time=0)
    parser = spider.Parser(max_deep=2)
    saver = spider.Saver(save_pipe=open("out_spider_thread.txt", "w"))

    # define url_filter
    black_patterns = (
        spider.CONFIG_URLPATTERN_FILES,
        r"binding",
        r"download",
    )
    white_patterns = ("^http[s]{0,1}://(www\.){0,1}(zhushou\.360)\.(com|cn)", )
    url_filter = spider.UrlFilter(black_patterns=black_patterns,
                                  white_patterns=white_patterns)

    # initial web_spider
    web_spider = spider.WebSpider(fetcher,
                                  parser,
                                  saver,
                                  url_filter=url_filter,
                                  monitor_sleep_time=5)

    # add start url
    web_spider.set_start_url("http://zhushou.360.cn/", keys=("360web", ))

    # start web_spider
    web_spider.start_work_and_wait_done(fetcher_num=10, is_over=True)
    return
Ejemplo n.º 9
0
def test_spider():
    """
    test spider
    """
    # 定义fetcher,parser和saver, 你也可以重写这三个类中的任何一个
    fetcher = spider.Fetcher(normal_max_repeat=3, normal_sleep_time=0, critical_max_repeat=5, critical_sleep_time=5)
    parser = spider.Parser(max_deep=1, max_repeat=2)
    saver = spider.Saver(save_pipe=open("out_spider.txt", "w"))

    # 定义Url过滤, UrlFilter使用Set, 适合Url数量不多的情况
    black_patterns = (spider.CONFIG_URLPATTERN_FILES, r"binding", r"download", )
    white_patterns = ("^http[s]{0,1}://(www\.){0,1}(wandoujia|(zhushou\.360))\.(com|cn)", )
    url_filter = spider.UrlFilter(black_patterns=black_patterns, white_patterns=white_patterns, capacity=1000)

    # 初始化WebSpider
    web_spider = spider.WebSpider(fetcher, parser, saver, url_filter=url_filter, monitor_sleep_time=5)

    # 首先抓取一次豌豆荚页面, 抓取完成之后不停止monitor
    web_spider.set_start_url("http://www.wandoujia.com/apps", ("wandoujia",), priority=0, deep=0, critical=False)
    web_spider.start_work_and_wait_done(fetcher_num=10, is_over=False)

    # 然后抓取360应用商店页面, 抓取完成之后停止monitor
    web_spider.set_start_url("http://zhushou.360.cn/", ("360app",), priority=0, deep=0, critical=False)
    web_spider.start_work_and_wait_done(fetcher_num=10, is_over=True)
    return
Ejemplo n.º 10
0
def test_spider_async():
    """
    test spider with asyncio
    """
    # 得到Loop
    loop = asyncio.get_event_loop()

    # 定义fetcher, parser和saver, 你也可以重写这三个类中的任何一个
    fetcher = spider.FetcherAsync(max_repeat=3, sleep_time=0)
    parser = spider.ParserAsync(max_deep=1)
    saver = spider.SaverAsync(save_pipe=open("out_spider_async.txt", "w"))

    # 初始化WebSpiderAsync
    web_spider_async = spider.WebSpiderAsync(fetcher,
                                             parser,
                                             saver,
                                             url_filter=spider.UrlFilter(),
                                             loop=loop)

    # 添加种子Url
    web_spider_async.set_start_url("http://zhushou.360.cn/", keys=("360web", ))

    # 开始抓取任务并等待其结束
    web_spider_async.start_work_and_wait_done(fetcher_num=10)
    return
Ejemplo n.º 11
0
def test_spider():
    """
    test spider
    """
    # 定义fetcher, parser和saver, 你也可以重写这三个类中的任何一个
    fetcher = spider.Fetcher(max_repeat=3, sleep_time=0)
    parser = spider.Parser(max_deep=1)
    saver = spider.Saver(save_pipe=open("out_spider_thread.txt", "w"))

    # 定义Url过滤, UrlFilter使用Set, 适合Url数量不多的情况
    black_patterns = (
        spider.CONFIG_URLPATTERN_FILES,
        r"binding",
        r"download",
    )
    white_patterns = ("^http[s]{0,1}://(www\.){0,1}(zhushou\.360)\.(com|cn)", )
    url_filter = spider.UrlFilter(black_patterns=black_patterns,
                                  white_patterns=white_patterns,
                                  capacity=1000)

    # 初始化WebSpider
    web_spider = spider.WebSpider(fetcher,
                                  parser,
                                  saver,
                                  url_filter=url_filter,
                                  monitor_sleep_time=5)

    # 添加种子Url
    web_spider.set_start_url("http://zhushou.360.cn/", keys=("360web", ))

    # 开始抓取任务并等待其结束
    web_spider.start_work_and_wait_done(fetcher_num=10, is_over=True)
    return
Ejemplo n.º 12
0
def test_spider():
    """
    test spider
    """
    # initial fetcher / parser / saver / proxieser
    fetcher = MyFetcher(sleep_time=0, max_repeat=1)
    parser = MyParser(max_deep=1)
    saver = MySaver(save_pipe=open("out.txt", "w"))
    # proxieser = MyProxies(sleep_time=5)

    # define url_filter
    url_filter = spider.UrlFilter(white_patterns=(re.compile(r"^http[s]?://(www\.)?appinn\.com"), ), capacity=None)

    # initial web_spider
    web_spider = spider.WebSpider(fetcher, parser, saver, proxieser=None, url_filter=url_filter, queue_parse_size=-1, queue_save_size=-1)
    # web_spider = spider.WebSpider(fetcher, parser, saver, proxieser=proxieser, url_filter=url_filter, queue_parse_size=100, queue_proxies_size=100)

    # add start url
    web_spider.set_start_url("https://www.appinn.com/", priority=0, keys={"type": "index"}, deep=0)

    # start web_spider
    web_spider.start_working(fetcher_num=20)

    # wait for finished
    web_spider.wait_for_finished()
    return
Ejemplo n.º 13
0
def test_spider():
    """
    test spider
    """
    # initial fetcher / parser / saver
    fetcher = MyFetcher(max_repeat=3, sleep_time=1)
    parser = MyParser(max_deep=3)
    saver = spider.Saver(save_pipe=open("./spider/out_thread.txt", "w"))
    # define url_filter
    url_filter = spider.UrlFilter(black_patterns=black_patterns,
                                  white_patterns=white_patterns,
                                  capacity=None)

    # initial web_spider
    web_spider = spider.WebSpider(fetcher,
                                  parser,
                                  saver,
                                  proxieser=None,
                                  url_filter=url_filter,
                                  max_count=10,
                                  max_count_in_proxies=100)

    # add start url
    web_spider.set_start_url("http://blog.jobbole.com/all-posts/",
                             priority=0,
                             keys={},
                             deep=0)

    # start web_spider
    web_spider.start_working(fetcher_num=10)

    # wait for finished
    web_spider.wait_for_finished()
    return
Ejemplo n.º 14
0
Archivo: test.py Proyecto: djun/PSpider
def test_spider_distributed():
    """
    test distributed spider
    """
    # initial fetcher / parser / saver, you also can rewrite this three class
    fetcher = spider.Fetcher(max_repeat=3, sleep_time=0)
    parser = spider.Parser(max_deep=-1)
    saver = spider.Saver(save_pipe=open("out_spider_distributed.txt", "w"))

    # define url_filter
    url_filter = spider.UrlFilter(black_patterns=black_patterns,
                                  white_patterns=white_patterns)

    # initial web_spider
    web_spider_dist = spider.WebSpiderDist(fetcher,
                                           parser,
                                           saver,
                                           url_filter=url_filter,
                                           monitor_sleep_time=5)
    web_spider_dist.init_redis(host="localhost",
                               port=6379,
                               key_high_priority="spider.high",
                               key_low_priority="spider.low")

    # start web_spider
    web_spider_dist.start_work_and_wait_done(fetcher_num=10)
    return
Ejemplo n.º 15
0
def test_spider():
    """
    test spider
    """
    # initial fetcher / parser / saver / proxieser
    fetcher = MyFetcher(sleep_time=1, max_repeat=0)
    parser = MyParser(max_deep=2)
    saver = MySaver(save_pipe=open("out_thread.txt", "w"))
    # proxieser = MyProxies(sleep_time=5)

    # define url_filter
    url_filter = spider.UrlFilter(black_patterns=black_patterns, white_patterns=white_patterns, capacity=None)

    # initial web_spider
    web_spider = spider.WebSpider(fetcher, parser, saver, proxieser=None, url_filter=url_filter, queue_parse_size=-1)
    # web_spider = spider.WebSpider(fetcher, parser, saver, proxieser=None, url_filter=url_filter, queue_parse_size=100, queue_proxies_size=100)

    # add start url
    web_spider.set_start_url("http://zhushou.360.cn/", priority=0, keys={"type": "360"}, deep=0)

    # start web_spider
    web_spider.start_working(fetcher_num=20)

    # wait for finished
    web_spider.wait_for_finished()
    return
Ejemplo n.º 16
0
def test_spider_async():
    """
    test spider with asyncio
    """
    web_spider_async = spider.WebSpiderAsync(url_filter=spider.UrlFilter())
    web_spider_async.set_start_url("http://zhushou.360.cn/")
    web_spider_async.start_work_and_wait_done()
    return
Ejemplo n.º 17
0
def get_douban_movies():

    headers = {
        "User-Agent":
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36",
        "Host": "movie.douban.com",
        "Accept":
        "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
        "Accept-Encoding": "gzip, deflate, sdch, br",
        "Accept-Language": "zh-CN, zh; q=0.8, en; q=0.6",
        "Cache-Control": "max-age=0",
        "Connection": "keep-alive",
        "Upgrade-Insecure-Requests": "1",
        "Cookie": "bid=Pd48iLTpsf8"
    }

    # 获取初始url
    all_urls = set()

    resp = requests.get("https://movie.douban.com/tag/",
                        headers=headers,
                        verify=False)
    assert resp.status_code == 200, resp.status_code

    soup = BeautifulSoup(resp.text, "html5lib")
    a_list = soup.find_all("a",
                           href=re.compile(r"^/tag/", flags=re.IGNORECASE))
    all_urls.update([(a_soup.get_text(),
                      "https://movie.douban.com" + a_soup.get("href"))
                     for a_soup in a_list])

    resp = requests.get("https://movie.douban.com/tag/?view=cloud",
                        headers=headers,
                        verify=False)
    assert resp.status_code == 200, resp.status_code

    soup = BeautifulSoup(resp.text, "html5lib")
    a_list = soup.find_all("a",
                           href=re.compile(r"^/tag/", flags=re.IGNORECASE))
    all_urls.update([(a_soup.get_text(),
                      "https://movie.douban.com" + a_soup.get("href"))
                     for a_soup in a_list])

    # 构造爬虫
    dou_spider = spider.WebSpider(MovieFetcher(),
                                  MovieParser(max_deep=-1, max_repeat=1),
                                  MovieSaver(open("doubanmovie.txt", "w")),
                                  spider.UrlFilter())
    # dou_spider.set_start_url("https://movie.douban.com/tag/新海诚",  ("index", "test"), priority=0, critical=False)
    for tag, url in all_urls:
        dou_spider.set_start_url(url, ("index", tag),
                                 priority=1,
                                 critical=True)
        pass
    dou_spider.start_work_and_wait_done(fetcher_num=20)
    return
Ejemplo n.º 18
0
def test_spider(mysql, spider_type):
    """
    test spider
    """
    # 定义fetcher, parser和saver, 你也可以重写这三个类中的任何一个
    fetcher = spider.Fetcher(normal_max_repeat=3, normal_sleep_time=0, critical_max_repeat=5, critical_sleep_time=5)
    # parser = spider.Parser(max_deep=1, max_repeat=2)
    parser = MyParser(max_deep=1, max_repeat=2)

    # 定义Url过滤
    black_patterns = (spider.CONFIG_URLPATTERN_FILES, r"binding", r"download", )
    white_patterns = ("^http[s]{0,1}://(www\.){0,1}(wandoujia|(zhushou\.360)|duba_\d)\.(com|cn)", )

    if not mysql:
        saver = spider.Saver(save_pipe=open("out.txt", "w", encoding="utf-8"))

        # UrlFilter, 使用Set, 适合Url数量不多的情况
        url_filter = spider.UrlFilter(black_patterns=black_patterns, white_patterns=white_patterns, capacity=None)
    else:
        saver = spider.SaverMysql(host="localhost", user="******", passwd="123456", database="default")
        saver.change_sqlstr("insert into t_test(url, title, getdate) values (%s, %s, %s);")

        # UrlFilter, 使用BloomFilter, 适合Url数量巨大的情况
        url_filter = spider.UrlFilter(black_patterns=black_patterns, white_patterns=white_patterns, capacity=10000)

    # 确定使用ThreadPool还是ProcessPool
    if spider_type == "thread":
        web_spider = spider.WebSpiderT(fetcher, parser, saver, url_filter=url_filter, monitor_sleep_time=5)
    else:
        web_spider = spider.WebSpiderP(fetcher, parser, saver, url_filter=url_filter, monitor_sleep_time=5)

    parser_num = 1 if spider_type == "thread" else 3

    # 首先抓取一次豌豆荚页面,抓取完成之后不停止monitor
    web_spider.set_start_url("http://www.wandoujia.com/apps", ("wandoujia",), priority=0, deep=0, critical=False)
    web_spider.start_work_and_wait_done(fetcher_num=10, parser_num=parser_num, is_over=False)

    # 然后抓取360应用商店页面,并试验critical参数的作用,抓取完成之后停止monitor
    web_spider.set_start_url("http://zhushou.360.cn/", ("360app",), priority=0, deep=0, critical=False)
    for i in range(5):
        web_spider.set_start_url("https://www.duba_%d.com/" % i, ("critical",), priority=0, deep=0, critical=True)
    web_spider.start_work_and_wait_done(fetcher_num=10, parser_num=parser_num, is_over=True)
    return
Ejemplo n.º 19
0
def get_douban_movies():

    headers = {
        "User-Agent":
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36",
        "Host":
        "movie.douban.com",
        "Accept":
        "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
        "Accept-Encoding":
        "gzip, deflate, sdch, br",
        "Accept-Language":
        "zh-CN, zh; q=0.8, en; q=0.6",
        "Cache-Control":
        "max-age=0",
        "Connection":
        "keep-alive",
        "Upgrade-Insecure-Requests":
        "1",
        "Cookie":
        'bid=TWn93lyonNk; ll="118254"; gr_user_id=118696be-aa6a-42e9-a20f-932c29fcddac; viewed="5333562_5948760_4736118_4241826_1495763_1433583_2124114_6430747_24335672"; ps=y; _pk_ref.100001.8cb4=%5B%22%22%2C%22%22%2C1490076711%2C%22https%3A%2F%2Fmovie.douban.com%2Fsubject%2F1292052%2Freviews%22%5D; _ga=GA1.2.1671303578.1469101452; ue="*****@*****.**"; dbcl2="33045345:gXYCq8g9sy4"; ck=5VGo; __utmt=1; _vwo_uuid_v2=98306AEEC1B83E40741FF0A8A58DC180|c5bbf2b10ddb9854ac614269b546a464; ap=1; push_noty_num=0; push_doumail_num=0; _pk_id.100001.8cb4=88a4be0bc4943075.1469262289.53.1490077859.1490064764.; _pk_ses.100001.8cb4=*; __utma=30149280.1671303578.1469101452.1490062608.1490076712.73; __utmb=30149280.16.10.1490076712; __utmc=30149280; __utmz=30149280.1489996683.69.35.utmcsr=google|utmccn=(organic)|utmcmd=organic|utmctr=(not%20provided); __utmv=30149280.3304'
    }

    # 获取初始url
    all_urls = set()

    resp = requests.get("https://movie.douban.com/tag/",
                        headers=headers,
                        verify=False)
    assert resp.status_code == 200, resp.status_code

    soup = BeautifulSoup(resp.text, "html5lib")
    a_list = soup.find_all("a",
                           href=re.compile(r"^/tag/", flags=re.IGNORECASE))
    all_urls.update([(a_soup.get_text(),
                      "https://movie.douban.com" + a_soup.get("href"))
                     for a_soup in a_list])

    # resp = requests.get("https://movie.douban.com/tag/?view=cloud", headers=headers, verify=False)
    # assert resp.status_code == 200, resp.status_code

    # soup = BeautifulSoup(resp.text, "html5lib")
    # a_list = soup.find_all("a", href=re.compile(r"^/tag/", flags=re.IGNORECASE))
    # all_urls.update([(a_soup.get_text(), "https://movie.douban.com" + a_soup.get("href")) for a_soup in a_list])
    logging.warning("all urls: %s", len(all_urls))

    # 构造爬虫
    dou_spider = spider.WebSpider(MovieFetcher(), MovieParser(max_deep=-1),
                                  spider.Saver(), spider.UrlFilter())
    for tag, url in all_urls:
        print(tag + ":" + url)
        dou_spider.set_start_url(url, ("index", tag), priority=1)
    dou_spider.start_work_and_wait_done(fetcher_num=20)
    return
Ejemplo n.º 20
0
def get_douban_movies():

    headers = {
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36",
        "Host": "movie.douban.com",
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
        "Accept-Encoding": "gzip, deflate, sdch, br",
        "Accept-Language": "zh-CN, zh; q=0.8, en; q=0.6",
        "Cache-Control": "max-age=0",
        "Connection": "keep-alive",
        "Upgrade-Insecure-Requests": "1",
        "Cookie": "bid=Pd48iLTpsf8"
    }

    # 获取初始url
    all_urls = set()

    resp = requests.get("https://movie.douban.com/tag/", headers=headers, verify=False)
    assert resp.status_code == 200, resp.status_code

    soup = BeautifulSoup(resp.text, "html5lib")
    a_list = soup.find_all("a", href=re.compile(r"^/tag/", flags=re.IGNORECASE))
    all_urls.update([(a_soup.get_text(), "https://movie.douban.com" + a_soup.get("href")) for a_soup in a_list])

    resp = requests.get("https://movie.douban.com/tag/?view=cloud", headers=headers, verify=False)
    assert resp.status_code == 200, resp.status_code

    soup = BeautifulSoup(resp.text, "html5lib")
    a_list = soup.find_all("a", href=re.compile(r"^/tag/", flags=re.IGNORECASE))
    all_urls.update([(a_soup.get_text(), "https://movie.douban.com" + a_soup.get("href")) for a_soup in a_list])
    logging.warning("all urls: %s", len(all_urls))

    # 查询已有数据
    conn = pymysql.connect(host="xx.xx.xx.xx", user="******", password="", db="db_my", charset="utf8")
    cursor = conn.cursor()
    cursor.execute("select m_url from t_doubanmovies;")

    bloomfilter = spider.UrlFilter()
    bloomfilter.update([item[0] for item in cursor.fetchall()])
    logging.warning("update bloomfilter success: %s", cursor.rowcount)

    cursor.close()
    conn.close()

    # 构造爬虫
    dou_spider = spider.WebSpider(MovieFetcher(), MovieParser(max_deep=-1, max_repeat=1), MovieSaver(), bloomfilter)
    for tag, url in all_urls:
        dou_spider.set_start_url(url, ("index", tag), priority=1, critical=True)
    dou_spider.start_work_and_wait_done(fetcher_num=20)
    return
Ejemplo n.º 21
0
def test_spider_async():
    """
    test spider with asyncio
    """
    # 初始化WebSpiderAsync
    web_spider_async = spider.WebSpiderAsync(max_repeat=3,
                                             sleep_time=0,
                                             max_deep=1,
                                             save_pipe=open(
                                                 "out_spider_async.txt", "w"),
                                             url_filter=spider.UrlFilter())

    # 添加种子Url
    web_spider_async.set_start_url("http://zhushou.360.cn/")

    # 开始抓取任务并等待其结束
    web_spider_async.start_work_and_wait_done(fetcher_num=10)
    return
Ejemplo n.º 22
0
def test_spider():
    """
    测试函数
    """
    # 初始化 fetcher / parser / saver / proxieser
    fetcher = MyFetcher(sleep_time=0, max_repeat=1)
    parser = MyParser(max_deep=1)
    saver = MySaver(save_pipe=open("out.txt", "w"))
    # proxieser = MyProxies(sleep_time=5)

    # 定义url_filter
    url_filter = spider.UrlFilter(
        white_patterns=(re.compile(r"^http[s]?://docs\.rsshub\.app"), ),
        capacity=None)

    # 定义爬虫web_spider
    web_spider = spider.WebSpider(fetcher,
                                  parser,
                                  saver,
                                  proxieser=None,
                                  url_filter=url_filter,
                                  queue_parse_size=-1,
                                  queue_save_size=-1)
    # web_spider = spider.WebSpider(fetcher, parser, saver, proxieser=proxieser, url_filter=url_filter, queue_parse_size=100, queue_proxies_size=100)

    # 添加起始的url
    web_spider.set_start_url("https://docs.rsshub.app/",
                             priority=0,
                             keys={"type": "index"},
                             deep=0)

    # 开启爬虫web_spider
    web_spider.start_working(fetchers_num=20)

    # 等待爬虫结束
    web_spider.wait_for_finished()
    return
Ejemplo n.º 23
0
def test_spider():
    """
    测试函数
    """
    # 初始化 fetcher / parser / saver / proxieser
    fetcher = MyFetcher(sleep_time=1, max_repeat=3)
    parser = MyParser(max_deep=1)
    saver = MySaver(save_pipe=open("out.txt", "w"))
    # proxieser = MyProxies(sleep_time=5)

    # 定义url_filter
    url_filter = spider.UrlFilter(
        white_patterns=(re.compile(r"^https?://www\.appinn\.com"), ))

    # 定义爬虫web_spider
    web_spider = spider.WebSpider(fetcher,
                                  parser,
                                  saver,
                                  url_filter=url_filter,
                                  queue_parse_size=-1,
                                  queue_save_size=-1)
    # web_spider = spider.WebSpider(fetcher, parser, saver, proxieser=proxieser, queue_parse_size=100, queue_proxies_size=100)

    # 添加起始的Task
    web_spider.set_start_task(
        spider.TaskFetch(priority=0,
                         keys={"type": "index"},
                         deep=0,
                         url="https://www.appinn.com/"))

    # 开启爬虫web_spider
    web_spider.start_working(fetchers_num=5)

    # 等待爬虫结束
    web_spider.wait_for_finished()
    return
Ejemplo n.º 24
0
def test_spider_async():
    """
    test asyncio spider with asyncio
    """
    # 得到Loop
    loop = asyncio.get_event_loop()

    # 定义fetcher, parser和saver, 你也可以重写这三个类中的任何一个
    fetcher = spider.FetcherAsync(max_repeat=3, sleep_time=0)
    parser = spider.ParserAsync(max_deep=1)
    saver = spider.SaverAsync(save_pipe=open("out_spider_async.txt", "w"))

    # 定义Url过滤, UrlFilter使用BloomFilter, 适合Url数量较多的情况
    black_patterns = (
        spider.CONFIG_URLPATTERN_FILES,
        r"binding",
        r"download",
    )
    white_patterns = ("^http[s]{0,1}://(www\.){0,1}(zhushou\.360)\.(com|cn)", )
    url_filter = spider.UrlFilter(black_patterns=black_patterns,
                                  white_patterns=white_patterns,
                                  capacity=10000)

    # 初始化WebSpiderAsync
    web_spider_async = spider.WebSpiderAsync(fetcher,
                                             parser,
                                             saver,
                                             url_filter=url_filter,
                                             loop=loop)

    # 添加种子Url
    web_spider_async.set_start_url("http://zhushou.360.cn/", keys=("360web", ))

    # 开始抓取任务并等待其结束
    web_spider_async.start_work_and_wait_done(fetcher_num=10)
    return