예제 #1
0
def crawl_comment_page(mid):
    limit = get_max_comment_page()
    cur_page = 1
    next_url = ''
    while cur_page <= limit:
        cur_time = int(time.time()*1000)
        if cur_page == 1:
            url = start_url.format(mid, cur_time)
        else:
            url = base_url.format(next_url, cur_time)
        html = get_page(url, user_verify=False)
        comment_datas = comment.get_comment_list(html, mid)

        if not comment_datas and cur_page == 1:
            crawler.warning('微博id为{}的微博评论未采集成功,请检查原因'.format(mid))
            return

        save_comments(comment_datas)
        # 由于这里每一步都要根据上一步来迭代,所以不适合采用网络调用(主要是比较麻烦)
        next_url = comment.get_next_url(html)

        if not next_url:
            crawler.info('微博{}的评论采集已经完成'.format(mid))
            return
        cur_page += 1
예제 #2
0
파일: comment.py 프로젝트: zjlx/WeiboSpider
def crawl_comment_by_page(mid, page_num):
    cur_time = int(time.time() * 1000)
    cur_url = base_url.format(mid, page_num, cur_time)
    html = get_page(cur_url, user_verify=False)
    comment_datas = comment.get_comment_list(html, mid)
    save_comments(comment_datas)
    wb_data.set_weibo_comment_crawled(mid)
    return html
예제 #3
0
def crawl_comment_by_page(mid, page_num):
    cur_url = BASE_URL.format(mid, page_num)
    html = get_page(cur_url, auth_level=1, is_ajax=True)
    comment_datas = comment.get_comment_list(html, mid)
    CommentOper.add_all(comment_datas)
    if page_num == 1:
        WbDataOper.set_weibo_comment_crawled(mid)
    return html, comment_datas
예제 #4
0
def crawl_comment_by_page(mid, page_num):
    cur_time = int(time.time() * 1000)
    cur_url = base_url.format(mid, page_num, cur_time)
    html = get_page(cur_url, user_verify=False)
    comment_datas = comment.get_comment_list(html, mid)
    save_comments(comment_datas)
    if page_num == 1:
        wb_data.set_weibo_comment_crawled(mid)
    return html
예제 #5
0
 def test_parse_comment_page(self):
     """
     测试对评论页的解析
     :return: 
     """
     from page_parse import comment
     with open('tests/comment.html', encoding='utf-8') as f:
         html = f.read()
     comment_list = comment.get_comment_list(html, '1123331211')
     self.assertEqual(len(comment_list), 19)
예제 #6
0
 def test_parse_comment_page(self):
     """
     测试对评论页的解析
     :return: 
     """
     from page_parse import comment
     url = TEST_SERVER + 'comment.html'
     resp = requests.get(url)
     resp.encoding = 'utf-8'
     html = resp.text
     comment_list = comment.get_comment_list(html, '1123331211')
     self.assertEqual(len(comment_list), 19)
예제 #7
0
 def test_parse_comment_page(self):
     """
     测试对评论页的解析
     :return: 
     """
     from page_parse import comment
     url = TEST_SERVER + 'comment.html'
     resp = requests.get(url)
     resp.encoding = 'utf-8'
     html = resp.text
     comment_list = comment.get_comment_list(html, '1123331211')
     self.assertEqual(len(comment_list), 19)
예제 #8
0
def crawl_comment_by_page(mid, page_num, seeion):
    try:
        cur_url = BASE_URL.format(mid, page_num)
        html = get_page(cur_url, auth_level=1, is_ajax=True)
        comment_datas, seed_ids = comment.get_comment_list(html, mid)
    except SoftTimeLimitExceeded:
        crawler.error(
            "comment SoftTimeLimitExceeded    mid={mid} page_num={page_num}".
            format(mid=mid, page_num=page_num))
        crawl_comment_by_page(mid, page_num)
    CommentOper.add_all(comment_datas, seeion)
    SeedidsOper.insert_seeds(seed_ids, seeion)
    if page_num == 1:
        WbDataOper.set_weibo_comment_crawled(mid, seeion)
    return html, comment_datas
예제 #9
0
def crawl_comment_by_page(mid, page_num):
    try:
        cur_url = BASE_URL.format(mid, page_num)
        html = get_page(cur_url, auth_level=1, is_ajax=True)
        comment_datas = comment.get_comment_list(html, mid)
    except SoftTimeLimitExceeded:
        crawler.error(
            "comment SoftTimeLimitExceeded    mid={mid} page_num={page_num}".
            format(mid=mid, page_num=page_num))
        app.send_task('tasks.comment.crawl_comment_by_page',
                      args=(mid, page_num),
                      queue='comment_page_crawler',
                      routing_key='comment_page_info')
    CommentOper.add_all(comment_datas)
    if page_num == 1:
        WbDataOper.set_weibo_comment_crawled(mid)
    return html, comment_datas
예제 #10
0
def crawl_comment_by_page(mid, page_num):
    try:
        cur_url = BASE_URL.format(mid, page_num)
        html = get_page(cur_url, auth_level=1, is_ajax=True)
        comment_datas = comment.get_comment_list(html, mid)
    except SoftTimeLimitExceeded:
        crawler.error(
            "comment SoftTimeLimitExceeded    mid={mid} page_num={page_num}".
            format(mid=mid, page_num=page_num))
        app.send_task(
            'tasks.comment.crawl_comment_by_page',
            args=(mid, page_num),
            queue='comment_page_crawler',
            routing_key='comment_page_info')
    CommentOper.add_all(comment_datas)
    if page_num == 1:
        WbDataOper.set_weibo_comment_crawled(mid)
    return html, comment_datas
예제 #11
0
def test_parse_comment_info(cookies):
    url = 'http://weibo.com/aj/v6/comment/big?ajwvr=6&id=4141730615319112&page=4'
    content = requests.get(url, cookies=cookies).text
    assert len(comment.get_comment_list(content, '4141730615319112')) > 0
    time.sleep(REQUEST_INTERNAL)