예제 #1
0
 def video_page(self, url, channel=None):
     """
     Due to iqiyi import hot index instead of play count,
     the crawler is updated on 2018-11-23
     """
     url = self.rebuild_video_url(url)
     start = time.time()
     get_page = retry_get_url(url)
     end = time.time() - start
     print("first request costs %s seconds" % end)
     if get_page is None:
         print('Failed to get html page for url: %s' % url)
         return None
     get_page.encoding = 'utf-8'
     page = get_page.text
     soup = BeautifulSoup(page, 'html.parser')
     page_info = soup.find("div", {"is": "i71-play"})[":page-info"]
     page_dic = json.loads(page_info)
     title = page_dic["tvName"]
     url = page_dic["pageUrl"]
     dura_str = page_dic["duration"]
     duration = trans_duration(dura_str)
     try:
         releaser = page_dic["user"]["name"]
         releaserUrl = page_dic["user"]["profileUrl"]
     except:
         releaser = None
         releaserUrl = None
     video_info = soup.find("div", {"is": "i71-play"})[":video-info"]
     video_dic = json.loads(video_info)
     release_time = video_dic["firstPublishTime"]
     tvId = video_dic["tvId"]
     start1 = time.time()
     hot_idx_url = "https://pub.m.iqiyi.com/jp/h5/count/hotDisplay/?qipuId=%s" % tvId
     get_hot_idx = retry_get_url(hot_idx_url)
     end2 = time.time() - start1
     print("second request costs %s seconds" % end2)
     hot_idx_str = get_hot_idx.text
     hot_idx = int(
         re.findall("\d+", ' '.join(re.findall('"count":\d+',
                                               hot_idx_str)))[0])
     fetch_time = int(
         datetime.datetime.timestamp(datetime.datetime.now()) * 1e3)
     video_page_dict = copy.deepcopy(self.video_data)
     video_page_dict["title"] = title
     video_page_dict["url"] = url
     video_page_dict["duration"] = duration
     video_page_dict["releaser"] = releaser
     video_page_dict["releaserUrl"] = releaserUrl
     video_page_dict["release_time"] = release_time
     video_page_dict["hot_idx"] = hot_idx
     video_page_dict["fetch_time"] = fetch_time
     video_page_dict["tvId"] = tvId
     if channel is not None:
         video_page_dict["channel"] = channel
     return video_page_dict
예제 #2
0
 def get_hot_videos(self,url="", max_page=10,**kwargs):
     data_list = []
     headers = {
             "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
             "accept-encoding": "gzip, deflate",
             "accept-language": "zh,zh-CN;q=0.9",
             "cache-control": "max-age=0",
             # "cookie": "pgv_pvi=3517925376; pgv_pvid=3591400976; RK=sDRQYhGkF/; ptcz=8100687e80e810853d573a8a9ced1155a9a9683321075161f61b773de19ff4c5; pac_uid=0_bf3968e8e3157; ts_uid=1260359885; tvfe_boss_uuid=082fecb8ba01b06d; QQLivePCVer=50181223; video_guid=ce0aa0f8275ad435; video_platform=2; bucket_id=9231001; mobileUV=1_1707c108811_53c13; tvfe_search_uid=3c2fd48b-03f8-4f63-af8c-bb2bd367af2b; ts_refer=www.baidu.com/link; pgv_info=ssid=s7741803072; ad_play_index=80",
             # "if-modified-since": "Fri, 28 Feb 2020 08:00:00 GMT",
             "referer": "https://v.qq.com/biu/ranks/?t=hotsearch&channel=hot",
             "sec-fetch-mode": "navigate",
             "sec-fetch-site": "none",
             "sec-fetch-user": "******",
             "upgrade-insecure-requests": "1",
     }
     res = retry_get_url(url,headers=headers,timeout=10,proxies=3)
     page_text = res.content.decode("utf-8")
     html = etree.HTML(page_text)
     print(html)
     xpath_list = html.xpath("//body[@class='page_search']/div[@class='search_container']/div[@class='wrapper']/div[@class='wrapper_main']/div")
     for li in xpath_list:
         title_url = li.xpath("./a/@href")
         if title_url:
             print(title_url)
             data = crawler_qq_video_page(title_url[0])
             if not data:
                 continue
             data["is_hot"] = 1
             data_list.append(data)
     output_result(result_Lst=data_list,
                   platform=self.platform,
                   output_to_es_raw=True,
                   )
     data_list.clear()
예제 #3
0
 def get_releaser_follower_num(self, releaserUrl):
     headers = {
         "Accept-Encoding": "gzip",
         # "sdk-version": "1",
         "User-Agent": "ttnet okhttp/3.10.0.2",
         "Host": "aweme.snssdk.com",
         "Connection": "Keep-Alive",
     }
     releaser_id = self.get_releaser_id(releaserUrl)
     time.sleep(1)
     releaserUrl = 'https://{2}/aweme/v1/user/?ac=WIFI&device_id={1}&os_api=18&app_name=aweme&channel=App Store&device_platform=ipad&device_type=iPad6,11&app_version=8.7.1&js_sdk_version=1.17.2.0&version_code=8.7.1&os_version=13.2.3&screen_width=1536&user_id={0}'.format(
         releaser_id, str(random.randint(40000000000, 90000000000)),
         random.choice(self.api_list))
     count = 0
     while count < 3:
         try:
             count += 1
             time.sleep(random.random())
             get_page = retry_get_url(releaserUrl,
                                      headers=headers,
                                      proxies=10)
             page = get_page.json()
             follower_num = page["user"].get("follower_count")
             print('%s follower number is %s' % (releaserUrl, follower_num))
             releaser_img = page["user"].get("avatar_thumb").get(
                 "url_list")[0]
             return follower_num, releaser_img
         except:
             print("can't find followers")
             continue
     else:
         return None, None
예제 #4
0
 def get_hot_words(self):
     bulk_list = []
     url = "https://i.snssdk.com/api/feed/hotboard_online/v1/?fe_api_version=2&is_in_channel=1&count=50&fe_source=hot_board&tab_name=tab_hot_board&is_web_refresh=1&style_type=18&client_extra_params=%7B%22hot_board_source%22%3A%22hot_board%22%2C%22fe_version%22%3A%22v11%22%7D&extra=%7B%22CardStyle%22%3A0%2C%22JumpToWebList%22%3Atrue%7D&category=hotboard_online&stream_api_version=88&tt_daymode=1&iid=105857671701&device_id=70787469432&ac=wifi&mac_address=48%3AA4%3A72%3A58%3A86%3AD5&channel=store_yingyonghui_0107&aid=13&app_name=news_article&version_code=763&version_name=7.6.3&device_platform=android&ab_version=801968%2C1419043%2C668775%2C1462526%2C1512584%2C1190522%2C1489307%2C1157750%2C1157634%2C1419598%2C1493796%2C1439625%2C1469498%2C668779%2C1417597%2C662099%2C1403340%2C668774%2C1509255%2C1396151%2C821967%2C857803%2C660830%2C1434501%2C662176%2C1491631&ab_feature=102749%2C94563&device_type=OPPO%2BR11&device_brand=OPPO&language=zh&os_api=22&os_version=5.1.1&uuid=866174725888628&openudid=48a4725886d57203&manifest_version_code=7630&resolution=900*1600&dpi=320&update_version_code=76309&plugin=0&tma_jssdk_version=1.54.0.3&rom_version=coloros__r11-user%2B5.1.1%2Bnmf26x%2B500200210%2Brelease-keys&cdid=754b9ff9-5880-48b2-ac40-3880effd3f33"
     page_res = retry_get_url(url,
                              headers=self.headers,
                              proxies=3,
                              timeout=5)
     page_json = page_res.json()
     for data in page_json["data"]:
         contect = data["content"]
         data = json.loads(contect)
         schema = data["raw_data"]["schema"]
         # search_str = urllib.parse.unquote(schema)
         query = urlparse(schema).query  # wd=python&ie=utf-8
         params = parse_qs(query)  # {'wd': ['python'], 'ie': ['utf-8']}
         """所得的字典的value都是以列表的形式存在,若列表中都只有一个值"""
         result = {key: params[key][0] for key in params}
         search_title = result.get("keyword")
         # search_json = result.get("search_json")
         if search_title:
             dic = {
                 "platform": self.platform,
                 "title": search_title,
                 "fetch_time":
                 int(datetime.datetime.now().timestamp() * 1e3),
                 "search_json": schema
             }
             bulk_list.append(dic)
     hot_words_output_result(bulk_list)
     return True
예제 #5
0
 def releaser_video_sum(self, releaserUrl):
     get_page = retry_get_url(releaserUrl)
     get_page.encoding = 'utf-8'
     page = get_page.text
     soup = BeautifulSoup(page, 'html.parser')
     total_video_num_str = soup.find('div', {'class':'title'}).span.text
     total_video_num = total_video_num_str.replace('(', '').replace(')', '').replace(',', '')
     total_video_num = trans_play_count(total_video_num)
     return total_video_num
예제 #6
0
 def search_page(self, title=None, search_json=None, **kwargs):
     data_list = []
     timestamp = int(datetime.datetime.now().timestamp() * 1e3)
     title = urllib.parse.quote(title)
     headers = {
         "Accept-Encoding": "gzip",
         # "X-SS-REQ-TICKET": "1587102750860",
         "passport-sdk-version": "14",
         "sdk-version": "2",
         #"Cookie": "odin_tt=d5d96b2812637e9d20681530fbbe4d52e8f76ae1b6afa8c0a173260321611c507ac6eca10991b21fc4f023e94371d457df784f959e94db673ef29a5bd2137091; qh[360]=1; history=alrvlFic6pJZXJCTWBmSmZt6KW6mevZSz5LU3OJ7DEKX42Zw%2Bc84wMR3iYGBweFy3EzZsPcNTLyXWN1AvLYP8%2BQPMLFfEpUA8bo%2F7nNtYOK7xNwC4k3XmMHe5MtzSTiM48DluNr01dkNTDyXuHrApsi4ejkwsV%2BSmAPmSeXoMzDxXhKcAuIVrRfWAJnJJwA25fG1DoezvFBTZrzZeg6kT%2BwWSG7Gx3UJB5h4L%2FH4gXlVn%2BtAtkvFMQRcjpv%2B%2Be9TBib2S%2BwcYBuUn8xsYGK%2FJKMAkptgfXrDASaOS4yHQHJVPy6UOjDxXuI4BeJN26Fs6MDEcYn%2FEoMDAAAA%2F%2F8%3D; install_id=112651077855; ttreq=1$0b37d53ca5c301ce96959dc97a67886da420b294",
         # "X-Gorgon": "0401007140017aae019cc2020b1c48dbab0ba42839014487648a",
         #"X-Khronos": "1587102750",
         "Host": "is.snssdk.com",
         "Connection": "Keep-Alive",
         "User-Agent": "okhttp/3.10.0.1",
     }
     url = "https://is.snssdk.com/api/search/content/?os_api=23&device_type=oneplus+a5010&from_search_subtab=synthesis&manifest_version_code=7690&source=search_subtab_switch&offset=0&is_ttwebview=0&action_type&is_incognito=0&keyword_type&rom_version=23&app_name=news_article&format=json&version_name=7.6.9&ac=wifi&host_abi=armeabi-v7a&update_version_code=76909&channel=baidu_0411&is_native_req=1&loadId=1&longitude=116.40717530841052&isIncognito=0&plugin=2050&forum=1&latitude=39.904680919672145&language=zh&pd=video&cur_tab_title=search_tab&aid=13&dpi=270&qrecImprId&fetch_by_ttnet=1&count=10&plugin_enable=3&search_position&ab_group=100167%2C94569%2C102754&keyword={0}&scm_version=1.0.2.830&search_json=%7B%22comment_ids%22%3A%5B%5D%2C%22event_discussion%22%3A74123%2C%22event_impression%22%3A17270790%2C%22forum_id%22%3A1664181806902302%2C%22forum_recall_wtt%22%3A%5B1664190666034183%2C1664192273575943%2C1664184430218253%2C1664185769175051%2C1664184985139212%2C1664196237152267%2C1664186792648732%2C1664188755414019%2C1664187055838215%2C1664184182571022%2C1664185938950148%2C1664188041995268%2C1664188322863172%2C1664190185024520%2C1664185602828300%2C1664184276484099%2C1664188211399684%2C1664187870713868%2C1664184484958211%2C1664183864289288%2C1664186825487371%2C1664195548700686%2C1664186585780228%2C1664197296210947%2C1664188146725901%2C1664191748459523%5D%2C%22group_source%22%3Anull%2C%22hot_gid%22%3A6816255461172445703%2C%22log_pb%22%3A%7B%22cluster_type%22%3A%220%22%2C%22entrance_hotspot%22%3A%22channel%22%2C%22hot_board_cluster_id%22%3A%226816091697949180424%22%2C%22hot_board_impr_id%22%3A%22202004171352010100140411610B1A7741%22%2C%22location%22%3A%22hot_board%22%2C%22rank%22%3A%225%22%2C%22source%22%3A%22trending_tab%22%2C%22style_id%22%3A%2210005%22%7D%2C%22mix_stick_ids%22%3A%5B1664190666034183%2C1664192273575943%2C1664184430218253%2C1664185769175051%2C1664184985139212%2C1664196237152267%2C1664186792648732%2C1664188755414019%2C1664187055838215%2C1664184182571022%2C1664185938950148%2C1664188041995268%2C1664188322863172%2C1664190185024520%2C1664185602828300%2C1664184276484099%2C1664188211399684%2C1664187870713868%2C1664184484958211%2C1664183864289288%2C1664186825487371%2C1664195548700686%2C1664186585780228%2C1664197296210947%2C1664188146725901%2C1664191748459523%5D%2C%22stick_group_ids%22%3A%5B%5D%7D&device_platform=android&search_id&has_count=0&version_code=769&from=video&device_id={1}&resolution=1080*1920&os_version=6.0.1&device_brand=Oneplus&search_sug=1&qc_query".format(
         title, random.randint(69418800000, 69418899999))
     res = retry_get_url(url, headers=headers, timeout=5, proxies=3)
     page_text = res.json()
     for one_video in page_text["data"]:
         video_dic = {}
         try:
             video_dic['title'] = one_video.get('title')
             video_dic['url'] = one_video.get('display').get("info").get(
                 "url")
             releaser_id = re.findall("user_id=(\d+)",
                                      one_video.get('user_source_url'))[0]
             video_dic['releaser'] = one_video.get('media_name')
             video_dic[
                 'releaserUrl'] = "https://www.toutiao.com/c/user/%s/" % releaser_id
             release_time = int(one_video.get('create_time'))
             video_dic['release_time'] = int(release_time * 1e3)
             video_dic['duration'] = int(one_video.get('video_duration'))
             video_dic['play_count'] = trans_play_count(
                 one_video.get('play_effective_count'))
             video_dic['repost_count'] = 0
             video_dic['comment_count'] = one_video.get('comment_count')
             video_dic['favorite_count'] = one_video.get('digg_count')
             video_dic['fetch_time'] = int(
                 datetime.datetime.now().timestamp() * 1e3)
             video_dic['releaser_id_str'] = "toutiao_%s" % releaser_id
             video_dic['video_img'] = one_video.get('display').get(
                 'self_info').get('image_url')
             video_dic['platform'] = "toutiao"
             video_dic["is_hot"] = 1
             video_dic["data_provider"] = "CCR"
         except Exception as e:
             print(e)
             continue
         data_list.append(video_dic)
     output_result(
         result_Lst=data_list,
         platform=self.platform,
         output_to_es_raw=True,
     )
     data_list.clear()
예제 #7
0
 def search_page(self, title=None, **kwargs):
     data_list = []
     headers = {
         "Host": "aweme.snssdk.com",
         "Connection": "keep-alive",
         # "Cookie": "d_ticket=38c841789e38ea43c6338910dac65ffe192e3; odin_tt=82086544bb9028f027b5aea78724ccf512dead26658f45321be33bade615793782bf6ac7fe0c18b73b9592f4284413d5300974810d439b42ef0b3eaa761b1640; msh=cakLg8lvbK5CxiSWkIbD2UInwAI; sid_guard=09fe3dfd89dfbc79f081fb2db9dd81ee%7C1581832192%7C5184000%7CThu%2C+16-Apr-2020+05%3A49%3A52+GMT; uid_tt=da0b53b7563eca87c47da41f5f17c30f; uid_tt_ss=da0b53b7563eca87c47da41f5f17c30f; sid_tt=09fe3dfd89dfbc79f081fb2db9dd81ee; sessionid=09fe3dfd89dfbc79f081fb2db9dd81ee; sessionid_ss=09fe3dfd89dfbc79f081fb2db9dd81ee; install_id=104847319549; ttreq=1$51e484720311469c4b70f4754d730d538a074c4b",
         # "X-SS-REQ-TICKET": "1583139618192",
         # "X-Tt-Token": "0009fe3dfd89dfbc79f081fb2db9dd81ee013243f7134b3eb37249cc729a5276172df69a4391b56ae4bf253c3c6352322611",
         "sdk-version": "1",
         # "x-tt-trace-id": "00-9a797f160a107b431078db3e93480468-9a797f160a107b43-01",
         "User-Agent":
         "com.ss.android.ugc.aweme/990 (Linux; U; Android 5.1.1; zh_CN; OPPO R11; Build/NMF26X; Cronet/77.0.3844.0)",
         "Accept-Encoding": "gzip, deflate",
         # "X-Gorgon": "0401a0514001f64964a8ebef9f4305ccbef2df1aa3c92fdf955a",
         # "X-Khronos": "1583139618",
     }
     url = "https://aweme.snssdk.com/aweme/v1/hot/search/video/list/?hotword={0}&offset=0&count=12&source=trending_page&is_ad=0&item_id_list&is_trending=0&os_api=22&device_type=OPPO%20R11&ssmix=a&manifest_version_code=990&dpi=320&uuid=866174725888628&app_name=aweme&version_name=9.9.0&ts=1583139619&app_type=normal&ac=wifi&update_version_code=9902&channel=tengxun_new&_rticket=1583139618192&device_platform=android&iid=104847319549&version_code=990&cdid=fce00742-ccef-4b14-943d-1f62b6d637b0&openudid=48a4725886d57203&device_id=70787469432&resolution=900*1600&os_version=5.1.1&language=zh&device_brand=OPPO&aid=1128&mcc_mnc=46007".format(
         title)
     res = retry_get_url(url, headers=headers, timeout=5, proxies=3)
     page_text = res.json()
     for one_video in page_text["aweme_list"]:
         video_dic = {}
         video_dic['title'] = one_video.get('desc')
         video_dic['url'] = one_video.get('share_url')
         releaser_id = one_video.get('author_user_id')
         video_dic['releaser'] = one_video.get('author').get("nickname")
         video_dic[
             'releaserUrl'] = "https://www.iesdouyin.com/share/user/%s" % releaser_id
         release_time = one_video.get('create_time')
         video_dic['release_time'] = int(release_time * 1e3)
         video_dic['duration'] = int(one_video.get('duration') / 1000)
         video_dic['play_count'] = 0
         video_dic['repost_count'] = one_video.get('statistics').get(
             'share_count')
         video_dic['comment_count'] = one_video.get('statistics').get(
             'comment_count')
         video_dic['favorite_count'] = one_video.get('statistics').get(
             'digg_count')
         video_dic['video_id'] = one_video.get('aweme_id')
         video_dic['fetch_time'] = int(datetime.datetime.now().timestamp() *
                                       1e3)
         video_dic['releaser_id_str'] = "抖音_%s" % releaser_id
         video_dic['platform'] = "抖音"
         video_dic['video_img'] = one_video.get('video').get('cover').get(
             'url_list')[0]
         video_dic["is_hot"] = 1
         video_dic["data_provider"] = "CCR"
         data_list.append(video_dic)
     output_result(
         result_Lst=data_list,
         platform=self.platform,
         output_to_es_raw=True,
     )
     data_list.clear()
예제 #8
0
 def redirect_by_js(self, url_raw):
     get_raw = retry_get_url(url_raw)
     if get_raw is not None:
         raw_page = get_raw.text
         soup = BeautifulSoup(raw_page, 'html.parser')
         soupf = soup.find_all(name='div', attrs={'class': 'cms-qipuId'})
         if soupf != []:
             data_qipuId = soupf[0].attrs['data-qipuid']
             url_redirect = 'http://www.iqiyi.com/v_%s.html' % data_qipuId
             return url_redirect
         else:
             return None
     else:
         print('Failed to get redirect by js for raw url: %s' % url_raw)
         return None
예제 #9
0
    def get_hot_words(self):
        bulk_list = []

        url = "https://apis.tudou.com/search/v1/hot?_t_={0}&e=md5&_s_=9a4abf3a92efad0605f8e31481327014&operator=CHINA+MOBILE_46007&network=WIFI".format(
            int(datetime.datetime.now().timestamp()))
        res = retry_get_url(url, proxies=3, headers=self.headers)
        res_json = res.json()
        for title in res_json["result"]["search"]["data"]:
            dic = {
                "platform": self.platform,
                "title": title["keyword"],
                "fetch_time": int(datetime.datetime.now().timestamp() * 1e3)
            }
            bulk_list.append(dic)
        hot_words_output_result(bulk_list)
        return True
예제 #10
0
 def get_releaser_follower_num(self, releaserUrl):
     releaser_id = self.get_releaser_id(releaserUrl)
     releaserUrl = 'https://{2}/aweme/v1/user/?ac=WIFI&device_id={1}&os_api=18&app_name=aweme&channel=App Store&device_platform=ipad&device_type=iPad6,11&app_version=8.7.1&js_sdk_version=1.17.2.0&version_code=8.7.1&os_version=13.2.3&screen_width=1536&user_id={0}'.format(
             releaser_id, str(random.randint(40000000000, 90000000000)), random.choice(self.api_list))
     count = 0
     while count < 3:
         try:
             count += 1
             time.sleep(random.randint(1, 2))
             get_page = retry_get_url(releaserUrl, headers=self.headers, proxies=1)
             page = get_page.json()
             follower_num = page["user"].get("follower_count")
             print('%s follower number is %s' % (releaserUrl, follower_num))
             releaser_img = page["user"].get("avatar_thumb").get("url_list")[0]
             return follower_num, releaser_img
         except:
             print("can't find followers")
             continue
     else:
         return None, None
예제 #11
0
 def get_hot_words(self):
     bulk_list = []
     url = "http://c.m.163.com/nc/search/hotWord.html"
     page_res = retry_get_url(url,
                              headers=self.headers,
                              proxies=3,
                              timeout=5)
     page_json = page_res.json()
     for data in page_json["hotWordList"]:
         title = data["searchWord"]
         if title:
             dic = {
                 "platform": self.platform,
                 "title": title,
                 "fetch_time":
                 int(datetime.datetime.now().timestamp() * 1e3),
             }
             bulk_list.append(dic)
     hot_words_output_result(bulk_list)
     return True
예제 #12
0
 def get_hot_videos(self, title=None, max_page=10, **kwargs):
     page = 1
     while page <= max_page:
         get_dic = {
             "keyword": title,
             # "pid": "6c23a6957198fad2",
             # "guid": "2139ff131a8a7d9ef7d3014cc8b97010",
             "mac": "",
             "imei": "null",
             "ver": "6.39.1",
             "_t_": int(datetime.datetime.now().timestamp()),
             "e": "md5",
             # "_s_": "b905d3a9738d7d2f815687428563d8f7",
             "operator": "CHINA+MOBILE_46007",
             "network": "WIFI",
             "ftype": "0",
             "cateId": "0",
             "seconds": "0",
             "seconds_end": "0",
             "ob": "",
             "pg": str(page),
             "pz": "30",
             # "aaid": "1.58259884569785E+20",
             "brand": "OPPO",
             "btype": "OPPO+R11",
             "sdkver": "2",
             "apad": "0",
             # "utdid": "XkjV9GsfBysDACyQ2%2BiF8MOw",
             "srid": "1",
             "userType": "guest",
         }
         requests_res = retry_get_url(
             "https://apis.tudou.com/search/v2/integration?%s" %
             urllib.parse.urlencode(get_dic),
             headers=self.headers,
             proxies=3)
         requests_json = requests_res.json()
         page += 1
         print(requests_json)
         for data in requests_json["results"]["ugc"]["data"]:
             print(data)
예제 #13
0
 def get_hot_words(self):
     bulk_list = []
     url = "https://api3-normal-c-lf.amemv.com/aweme/v1/hot/search/list/?detail_list=1&mac_address=48%3AA4%3A72%3A58%3A86%3AD5&os_api=22&device_type=OPPO%20R11&ssmix=a&manifest_version_code=990&dpi=320&uuid=866174725888628&app_name=aweme&version_name=9.9.0&app_type=normal&ac=wifi&update_version_code=9902&channel=tengxun_new&device_platform=android&iid=104847319549&version_code=990&cdid=fce00742-ccef-4b14-943d-1f62b6d637b0&openudid=48a4725886d57203&device_id=70787469432&resolution=900*1600&os_version=5.1.1&language=zh&device_brand=OPPO&aid=1128&mcc_mnc=46007"
     page_res = retry_get_url(url,
                              headers=self.headers,
                              proxies=3,
                              timeout=5)
     page_json = page_res.json()
     for data in page_json["data"]["word_list"]:
         title = data["word"]
         if title:
             dic = {
                 "platform": self.platform,
                 "title": title,
                 "fetch_time":
                 int(datetime.datetime.now().timestamp() * 1e3),
                 "hot_value": data.get("hot_value"),
                 "top": data.get("position"),
             }
             bulk_list.append(dic)
     hot_words_output_result(bulk_list)
     return True
예제 #14
0
 def search_page(self, title):
     data_list = []
     encodestr = base64.b64encode(title.encode('utf-8'))
     encodestr = str(encodestr, 'utf-8')
     url = "http://c.m.163.com/search/comp2/Kg%3D%3D/20/{0}.html?".format(
         encodestr)
     para = "deviceId=2zx5YfHmoBb72ayxYpQVUg%3D%3D&version=newsclient.32.1.android&channel=VDEzNDg2NDc5MDkxMDc%3D&canal=bmV3c19sZl9jcGFfMg%3D%3D&dtype=0&tabname=shipin&position=5YiX6KGo6aG26YOo&ts={0}&sign=Di3opZw%2FFIPDdgreSK4VCKlnMSpm6FPoel5LeY88RgZ48ErR02zJ6%2FKXOnxX046I&spever=FALSE&open=scheme_%E9%BB%98%E8%AE%A4&openpath=/video/VT5O1KVCO".format(
         str(int(datetime.datetime.now().timestamp())))
     res = retry_get_url(url + para,
                         headers=self.headers,
                         timeout=5,
                         proxies=3)
     page_text = res.json()
     for data in page_text["doc"]["result"]:
         print(data)
         data_list.append(data)
     output_result(
         result_Lst=data_list,
         platform=self.platform,
         output_to_es_raw=True,
     )
     data_list.clear()
예제 #15
0
 def get_hot_words(self):
     bulk_list = []
     timestamp = int(datetime.datetime.now().timestamp() * 1e3)
     url = "https://w.inews.qq.com/searchPage?pagefrom=moreHotDetail&adcode=310112&is_special_device=0&mid=0&dpi=320.0&qqnetwork=wifi&rom_type=R11-user%205.1.1%20NMF26X%20500200210%20release-keys&isColdLaunch=1&real_device_width=2.81&net_proxy=DIRECT@&net_bssid=48:A4:72:58:86:D5&isMainUserLogin=0&currentChannelId=_qqnews_custom_search&isElderMode=0&apptype=android&islite=0&hw=OPPO_OPPOR11&baseid=&global_session_id={0}&screen_width=900&omgbizid=&isClosePersonalized=0&sceneid=&videoAutoPlay=1&imsi=460077203886213&fix_store=&cpuabi=armeabi-v7a&isoem=0&currentTabId=news_news&lite_version=&startTimestamp={1}&net_slot=0&qn-time={2}&pagestartfrom=icon&mac=48:A4:72:58:86:D5&activefrom=icon&net_ssid=R1148a4725886d57203&store=17&screen_height=1600&top_activity=DailyHotDetailActivity&real_device_height=5.0&origin_imei=866174725888628&network_type=wifi&origCurrentTab=top&global_info=1|1|1|1|1|14|4|1|0|6|1|1|1||0|J309P000000000:J902P000000000:J601P900000000:A601P800217702:A601P700321102:B601P600286205:A601P500154501:A601P400161601:J601P300000000:B601P200096102:A601P100272502:A601P000261102:J601P904000000:J601P903000000:A601P902266601:A601P901291001:J601P811000000:A601P701226201:A601P622269601:A601P621294101:A601P620269601:J601P111000000:J601P110000000:A601P109107102:A601P105118803:A601P019237403:A601P016212405:J601P006000000:J603P000000000:J401P100000000:A401P000050901:J602P900000000:J602P800000000:J602P700000000:J602P600000000:A602P500267502:B602P400286004:J602P300000000:J602P200000000:J602P100000000:B602P000315504:A602P901257901:J602P616000000:A602P615304801:A602P613271701:A602P611253801:A602P516234601:A602P414259901:A602P307160708:J602P302000000:A602P208205801:J602P117000000:A602P007272801:A602P003136401:J304P000000000:J310P700000000:A310P200210802:J310P100000000:B310P020314103:A310P010301701:B310P000267107:B701P000323002:A703P000322204:A704P000309801:J702P000000000:J405P000000000:J064P400000000:J064P300000000:B064P100243802:B064P020290902:J064P010000000:J064P000000000:A085P000087701:B074P200238202:J074P040000000:B074P030315703:A074P020315602:A074P010315401:B074P000142402:J903P000000000:A267P300215801:A267P200263601:A267P100299801:B267P000300102:A073P040317201:B073P030314503:A073P020313801:J073P010000000:B073P000313603:J060P700000000:J060P300000000:J060P200000000:B060P100299703:A060P090287301:J060P020000000:J060P010000000:B060P000311102:J060P099000000:J060P016000000:A406P000313203:J403P700000000:J403P600000000:A403P200206702:B403P100246105:J403P010000000:A403P000310401:A403P602218702:B404P200262402:A404P000263407:J055P200000000:J055P090000000:J055P080000000:J055P070000000:J055P060000000:J055P050000000:J055P010000000:A055P000265801:J402P100000000:J402P090000000:J402P080000000:J402P060000000:J402P020000000:A402P000301403:J054P400000000:J054P300000000:J054P200000000:A054P100269701:B054P090289604:A054P080289702:J054P050000000:J054P040000000:A054P030288501:J054P010000000:A054P000319901:J056P000000000:A901P200252304:B901P100226405:B901P000232405:J407P000000000|1402|0|1|25|25|0|0|0||3|3|1|1|1|1|1|1|-1|0|0|5|2|0|0|0|3|0|0|1|3|0|2|0|0|2|0|0|1|0|1|1|0|0|1|0|4|0|1|1|11|20|1|0|1|1|0|0|1|4|0|1|1|41|2|51|60|0|1|0|0|1|5|1|0|0|71|0|0|1|71&imsi_history=460077203886213&net_apn=0&uid=48a4725886d57203&omgid=&trueVersion=6.0.40&qimei=866174725888628&devid=866174725888628&appver=22_android_6.0.40&Cookie=lskey%3D;skey%3D;uin%3D;%20luin%3D;logintype%3D0;%20main_login%3D;%20&qn-sig=74f9daefb0544a8cff5f2d1e0b465fd0&qn-rid=1002_1218eb69-f164-474c-9330-04d620a35c93&qn-newsig=68896f7d5f840c8540a9dff8877c88c277879f095408ca81edffe1a8b05ba94f".format(
         timestamp, int(timestamp / 1000), timestamp)
     page_res = retry_get_url(url,
                              headers=self.headers,
                              proxies=3,
                              timeout=10)
     page_json = page_res.json()
     for data in page_json["showInfo"]:
         search_title = data["desc"]
         if search_title:
             dic = {
                 "platform": self.platform,
                 "title": search_title,
                 "fetch_time":
                 int(datetime.datetime.now().timestamp() * 1e3),
             }
             bulk_list.append(dic)
     hot_words_output_result(bulk_list)
     return True
예제 #16
0
    def get_hot_words(self):
        bulk_list = []
        timestamp = int(datetime.datetime.now().timestamp())
        url = "https://v.qq.com/biu/ranks/?t=hotsearch&channel=hot"
        headers = {

                "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
                "accept-encoding": "gzip, deflate",
                "accept-language": "zh,zh-CN;q=0.9",
                "cache-control": "max-age=0",
                # "cookie": "pgv_pvi=3517925376; pgv_pvid=3591400976; RK=sDRQYhGkF/; ptcz=8100687e80e810853d573a8a9ced1155a9a9683321075161f61b773de19ff4c5; pac_uid=0_bf3968e8e3157; ts_uid=1260359885; tvfe_boss_uuid=082fecb8ba01b06d; QQLivePCVer=50181223; video_guid=ce0aa0f8275ad435; video_platform=2; bucket_id=9231001; mobileUV=1_1707c108811_53c13; tvfe_search_uid=3c2fd48b-03f8-4f63-af8c-bb2bd367af2b; ts_refer=www.baidu.com/link; ad_play_index=71; pgv_info=ssid=s7741803072; ts_last=v.qq.com/biu/ranks/",
                "if-modified-since": "Fri, 28 Feb 2020 07:10:00 GMT",
                "sec-fetch-mode": "navigate",
                "sec-fetch-site": "none",
                "sec-fetch-user": "******",
                "upgrade-insecure-requests": "1",
                "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36",
        }

        page_res = retry_get_url(url, headers=headers, proxies=3, timeout=5)
        page_text = page_res.content.decode("utf-8")
        html = etree.HTML(page_text)
        print(html)
        xpath_list = html.xpath("//ul[@class='table_list']/li")
        for li in xpath_list:
            title = li.xpath("./div[1]/a/@title")
            title_url = li.xpath("./div[1]/a/@href")
            if title:
                dic = {
                        "platform": self.platform,
                        "title": title[0],
                        "fetch_time": int(datetime.datetime.now().timestamp() * 1e3),
                        "url":title_url[0]
                }
                bulk_list.append(dic)
        hot_words_output_result(bulk_list)
        return True
예제 #17
0
    def list_page_single(self, listurl, channel=None):
        """
        To be solved: video collection page, mainly include TV series
        and cartoon series, such as
        http://www.iqiyi.com/a_19rrh7z5vx.html#vfrm=2-4-0-1
        http://www.iqiyi.com/a_19rrk3ndgl.html#vfrm=2-4-0-1
        and zongyi show, such as
        http://www.iqiyi.com/a_19rrhbfb4d.html#vfrm=2-4-0-1
        """
        list_page_Lst = []
        get_page = retry_get_url(listurl)
        if get_page is None:
            print('Failed to get singe list page for url: %s' % listurl)
            return None
        print(listurl)
        get_page.encoding = 'utf-8'
        page = get_page.text
        soup = BeautifulSoup(page, 'html.parser')
        iqiyi = soup.find_all('div', {'class': 'site-piclist_pic'})
        for data_line in iqiyi:
            try:
                url = data_line.find('a')['href']
            except TypeError:
                url = None
            if url is not None:
                video_page_dict = self.video_page(url, channel)
                if video_page_dict is not None:
                    list_page_Lst.append(video_page_dict)
                else:
                    print('Got None on video page url: %s' % url)
#                    # for test
#                    video_page_dict = {'url': url}
#                    list_page_Lst.append(video_page_dict)
            else:
                pass
        return list_page_Lst
예제 #18
0
    def releaser_page_web(self,
                          releaserUrl,
                          output_to_file=False,
                          filepath=None,
                          releaser_page_num_max=30,
                          output_to_es_raw=False,
                          es_index=None,
                          doc_type=None,
                          output_to_es_register=False,
                          push_to_redis=False,
                          proxies_num=None):
        releaser_id = self.get_releaser_id(releaserUrl)
        # releaser = self.get_releaser_name(releaserUrl)
        releaserUrl = 'https://id.tudou.com/i/%s/videos' % releaser_id
        json_headers = {
            "accept": "application/json, text/javascript, */*; q=0.01",
            "accept-encoding": "gzip, deflate, br",
            "accept-language": "zh,zh-CN;q=0.9",
            # "cookie": "cna=W99aFOvX+QACAXL4fBJI3rAw; __ysuid=1541219939103JPW; ykss=e93bad5ef9c26af71c8e7ee5; P_ck_ctl=47F163FE35A5B1B2E479B158A12376A7; __ayvstp=16; __aysvstp=16; _zpdtk=ecd18a6d5d86a28b786b653356133cfb606dd1dc; isg=BOzsOnpUnhIGhYq8YxHgZ36EvcoepZBPH_JJJ0Yt-Rc6UY5bbrVJ3rr3dxdpWcin",
            "referer": releaserUrl,
            "sec-fetch-dest": "empty",
            "sec-fetch-mode": "cors",
            "sec-fetch-site": "same-origin",
            "user-agent":
            "Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1",
            "x-csrf-token": "ecd18a6d5d86a28b786b653356133cfb606dd1dc",
            "x-requested-with": "XMLHttpRequest",
        }
        json_cookies = {
            "cna":
            "W99aFOvX+QACAXL4fBJI3rAw",
            "__ysuid":
            "1541219939103JPW",
            "ykss":
            "e93bad5ef9c26af71c8e7ee5",
            "P_ck_ctl":
            "47F163FE35A5B1B2E479B158A12376A7",
            "__ayvstp":
            "16",
            "__aysvstp":
            "16",
            "_zpdtk":
            "ecd18a6d5d86a28b786b653356133cfb606dd1dc",
            "isg":
            "BOzsOnpUnhIGhYq8YxHgZ36EvcoepZBPH_JJJ0Yt-Rc6UY5bbrVJ3rr3dxdpWcin",
        }
        firsh_page_headers = {
            "accept":
            "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
            "accept-encoding":
            "gzip, deflate, br",
            "accept-language":
            "zh,zh-CN;q=0.9",
            # "cookie": "cna=W99aFOvX+QACAXL4fBJI3rAw; __ysuid=1541219939103JPW; ykss=e93bad5ef9c26af71c8e7ee5; P_ck_ctl=47F163FE35A5B1B2E479B158A12376A7; __ayvstp=16; __aysvstp=16; _zpdtk=9053e5d58ee0c51b1f3da8008dd4bda164ecd846; isg=BHl5FRo0A8WDkd_DnlItMBsXiOVThm042sF8-Juu9KAfIpu049ZUCb80oCjUmgVw",
            "referer":
            releaserUrl,
            "sec-fetch-dest":
            "document",
            "sec-fetch-mode":
            "navigate",
            "sec-fetch-site":
            "same-origin",
            "sec-fetch-user":
            "******",
            "upgrade-insecure-requests":
            "1",
            "user-agent":
            "Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1",
        }
        first_page_res = retry_get_url(releaserUrl,
                                       headers=firsh_page_headers,
                                       proxies=proxies_num)
        json_cookies.update(dict(first_page_res.cookies))
        user_id = re.findall('uid="(\d+)"', first_page_res.text)[0]
        zptk_url = "https://id.tudou.com/i/h5/id_%s/playlisttab?uid=%s" % (
            user_id, user_id)
        playlisttab_res = retry_get_url(zptk_url,
                                        headers=json_headers,
                                        proxies=proxies_num,
                                        cookies=json_cookies)
        # print(dict(playlisttab_res.cookies))
        json_cookies.update(dict(playlisttab_res.cookies))
        json_headers["x-csrf-token"] = dict(playlisttab_res.cookies)["_zpdtk"]
        count = 1
        retry_time = 0
        result_list = []

        self.video_data['releaserUrl'] = releaserUrl

        print("working on releaser_id: %s" % (releaser_id))
        while count <= releaser_page_num_max and retry_time < 5:
            proxies = get_proxy(proxies_num)
            api_url = 'https://id.tudou.com/i/h5/id_%s/videos?ajax=1&pn=%s&pl=20' % (
                user_id, count)
            print(api_url)
            if proxies:
                get_page = requests.get(api_url,
                                        headers=json_headers,
                                        proxies=proxies,
                                        timeout=3,
                                        cookies=json_cookies)
            else:
                get_page = requests.get(api_url,
                                        headers=json_headers,
                                        timeout=3,
                                        cookies=json_cookies)
            _zpdtk = dict(get_page.cookies)
            json_cookies.update(_zpdtk)
            # print(dict(get_page.cookies))
            json_headers["x-csrf-token"] = _zpdtk["_zpdtk"]
            page_dic = get_page.json()
            releaser_page_num_max = page_dic["page"]["pz"]
            releaser = page_dic['channelOwnerInfo']["data"]["nickname"]
            #            has_more = page_dic.get('has_more')
            try:
                data_list = page_dic['data']["data"]
                time.sleep(0.25)
            except:
                retry_time += 1
                time.sleep(0.25)
                print("no more data at  page: %s try_time: %s" %
                      (count, retry_time))
                continue
            if data_list == []:
                retry_time += 1
                time.sleep(0.25)
                print("no more data at page: %s try_time: %s" %
                      (count, retry_time))
                continue
            else:
                retry_time = 0
                print("get data at page: %s" % (count))
                count += 1
                for info_dic in data_list:
                    video_info = copy.deepcopy(self.video_data)
                    video_info['video_id'] = info_dic["videoid"]
                    video_info['title'] = info_dic["title"]
                    video_info['releaser'] = releaser
                    video_info[
                        'url'] = 'https://video.tudou.com/v/%s.html' % info_dic[
                            "videoid"]
                    video_info['duration'] = int(info_dic.get('seconds') / 1e3)
                    video_info['releaser_id_str'] = "new_tudou_%s" % (
                        releaser_id)
                    video_info['comment_count'] = int(
                        info_dic.get('total_comment'))
                    video_info['favorite_count'] = int(
                        info_dic.get('total_up'))
                    # favorite_count in database means 点赞数, while in web page the factor
                    # named praiseNumber
                    # in web page facorite_count means 收藏数
                    video_info['video_img'] = info_dic.get('thumburl')
                    video_info['play_count'] = info_dic.get('total_vv')
                    video_info['release_time'] = int(
                        info_dic.get('publishtime') * 1e3)
                    #                            print(video_info['release_time'])
                    # if '天前' in release_time_str:
                    #     video_info['release_time'] = self.video_page(video_info['url'])['release_time']
                    # else:
                    #     video_info['release_time'] = trans_strtime_to_timestamp(input_time=release_time_str,
                    #                                                             missing_year=True)
                    video_info['fetch_time'] = int(time.time() * 1e3)
                    yield video_info
예제 #19
0
    def search_short_video_page(self, title=None, search_json=None, **kwargs):
        data_list = []
        timestamp = int(datetime.datetime.now().timestamp() * 1e3)
        title = urllib.parse.quote(title)
        headers = {
            "Accept-Encoding": "gzip",
            # "X-SS-REQ-TICKET": "1587103224961",
            "passport-sdk-version": "14",
            "sdk-version": "2",
            #"Cookie": "odin_tt=d5d96b2812637e9d20681530fbbe4d52e8f76ae1b6afa8c0a173260321611c507ac6eca10991b21fc4f023e94371d457df784f959e94db673ef29a5bd2137091; qh[360]=1; history=alrvlFic6pJZXJCTWBmSmZt6KW6mevZSz5LU3OJ7DEKX42Zw%2Bc84wMR3iYGBweFy3EzZsPcNTLyXWN1AvLYP8%2BQPMLFfEpUA8bo%2F7nNtYOK7xNwC4k3XmMHe5MtzSTiM48DluNr01dkNTDyXuHrApsi4ejkwsV%2BSmAPmSeXoMzDxXhKcAuIVrRfWAJnJJwA25fG1DoezvFBTZrzZeg6kT%2BwWSG7Gx3UJB5h4L%2FH4gXlVn%2BtAtkvFMQRcjpv%2B%2Be9TBib2S%2BwcYBuUn8xsYGK%2FJKMAkptgfXrDASaOS4yHQHJVPy6UOjDxXuI4BeJN26Fs6MDEcYn%2FEoMDAAAA%2F%2F8%3D; install_id=112651077855; ttreq=1$0b37d53ca5c301ce96959dc97a67886da420b294",
            # "X-Gorgon": "0401e08b4001a628dcf96b16d01278ad842e915d905b213dc48f",
            # "X-Khronos": "1587103224",
            "Host": "is.snssdk.com",
            "Connection": "Keep-Alive",
            "User-Agent": "okhttp/3.10.0.1",
        }
        url = "https://is.snssdk.com/api/search/content/?os_api=23&device_type=oneplus%2Ba5010&from_search_subtab=video&manifest_version_code=7690&source=search_subtab_switch&offset=0&is_ttwebview=0&uuid=440000000189785&action_type&is_incognito=0&keyword_type&rom_version=23&app_name=news_article&format=json&version_name=7.6.9&ac=wifi&host_abi=armeabi-v7a&update_version_code=76909&channel=baidu_0411&is_native_req=1&loadId=1&longitude=113.40717530841052&isIncognito=0&plugin=2050&openudid=e44cc0264b92bcbf&forum=1&latitude=39.904680919672145&search_start_time=1587102733626&language=zh&pd=xiaoshipin&cur_tab_title=search_tab&aid=13&pos=5r_-9Onkv6e_eBEKeScxeCUfv7G_8fLz-vTp6Pn4v6esrKuzqa2qrKqorq2lqaytqK-xv_H86fTp6Pn4v6eupLOkramrpa2krKSrqq-sqaixv_zw_O3e9Onkv6e_eBEKeScxeCUfv7G__PD87dHy8_r06ej5-L-nrKyrs6mtqqyqqK6tpamsraivsb_88Pzt0fzp9Ono-fi_p66ks6StqaulraSspKuqr6ypqOA%253D&dpi=270&qrecImprId&fetch_by_ttnet=1&count=10&plugin_enable=3&search_position&ab_group=100167%252C94569%252C102754&keyword={0}&scm_version=1.0.2.830&search_json=%257B%2522comment_ids%2522%253A%255B%255D%252C%2522event_discussion%2522%253A74123%252C%2522event_impression%2522%253A17270790%252C%2522forum_id%2522%253A1664181806902302%252C%2522forum_recall_wtt%2522%253A%255B1664190666034183%252C1664192273575943%252C1664184430218253%252C1664185769175051%252C1664184985139212%252C1664196237152267%252C1664186792648732%252C1664188755414019%252C1664187055838215%252C1664184182571022%252C1664185938950148%252C1664188041995268%252C1664188322863172%252C1664190185024520%252C1664185602828300%252C1664184276484099%252C1664188211399684%252C1664187870713868%252C1664184484958211%252C1664183864289288%252C1664186825487371%252C1664195548700686%252C1664186585780228%252C1664197296210947%252C1664188146725901%252C1664191748459523%255D%252C%2522group_source%2522%253Anull%252C%2522hot_gid%2522%253A6816255461172445703%252C%2522log_pb%2522%253A%257B%2522cluster_type%2522%253A%25220%2522%252C%2522entrance_hotspot%2522%253A%2522channel%2522%252C%2522hot_board_cluster_id%2522%253A%25226816091697949180424%2522%252C%2522hot_board_impr_id%2522%253A%2522202004171352010100140411610B1A7741%2522%252C%2522location%2522%253A%2522hot_board%2522%252C%2522rank%2522%253A%25225%2522%252C%2522source%2522%253A%2522trending_tab%2522%252C%2522style_id%2522%253A%252210005%2522%257D%252C%2522mix_stick_ids%2522%253A%255B1664190666034183%252C1664192273575943%252C1664184430218253%252C1664185769175051%252C1664184985139212%252C1664196237152267%252C1664186792648732%252C1664188755414019%252C1664187055838215%252C1664184182571022%252C1664185938950148%252C1664188041995268%252C1664188322863172%252C1664190185024520%252C1664185602828300%252C1664184276484099%252C1664188211399684%252C1664187870713868%252C1664184484958211%252C1664183864289288%252C1664186825487371%252C1664195548700686%252C1664186585780228%252C1664197296210947%252C1664188146725901%252C1664191748459523%255D%252C%2522stick_group_ids%2522%253A%255B%255D%257D&device_platform=android&search_id&has_count=0&version_code=769&mac_address=08%253A00%253A27%253A1F%253A7E%253AA0&from=xiaoshipin&device_id={1}&resolution=810*1440&os_version=6.0.1&device_brand=Oneplus&search_sug=1&qc_query".format(
            title, random.randint(69418800000, 69418899999))

        res = retry_get_url(url, headers=headers, timeout=5, proxies=3)
        page_text = res.json()
        for one_video in page_text["data"]:
            video_dic = {}
            try:
                one_video = one_video["raw_data"]
                video_dic['title'] = one_video.get('title')
                video_dic['url'] = one_video.get('share').get("share_url")
                releaser_id = one_video.get('user').get("info").get("user_id")
                video_dic['releaser'] = one_video.get('user').get("info").get(
                    "name")
                video_dic[
                    'releaserUrl'] = "https://www.toutiao.com/c/user/%s/" % releaser_id
                release_time = int(one_video.get('create_time'))
                video_dic['release_time'] = int(release_time * 1e3)
                video_dic['duration'] = int(
                    one_video.get('video').get("duration"))
                video_dic['play_count'] = one_video.get('action').get(
                    "play_count")
                video_dic['repost_count'] = one_video.get('action').get(
                    "share_count")
                video_dic['comment_count'] = one_video.get('action').get(
                    "comment_count")
                video_dic['favorite_count'] = one_video.get('action').get(
                    "digg_count")
                video_dic['fetch_time'] = int(
                    datetime.datetime.now().timestamp() * 1e3)
                video_dic['releaser_id_str'] = "toutiao_%s" % releaser_id
                video_dic['video_img'] = one_video.get('video').get(
                    'origin_cover').get('url_list')[0]
                video_dic['platform'] = "toutiao"
                if "iesdouyin" in video_dic['url']:
                    video_dic[
                        'releaserUrl'] = "https://www.douyin.com/share/user/%s/" % releaser_id
                    video_dic['platform'] = "抖音"
                    video_dic['releaser_id_str'] = "抖音_%s" % releaser_id
                    video_dic['play_count'] = 0
                video_dic["is_hot"] = 1
                video_dic["data_provider"] = "CCR"
            except:
                continue
            data_list.append(video_dic)
        output_result(
            result_Lst=data_list,
            platform=self.platform,
            output_to_es_raw=True,
        )
        data_list.clear()
예제 #20
0
    def list_page(self,
                  rid,
                  page_num=1,
                  channel=None,
                  output_to_file=False,
                  filepath=None,
                  output_to_es_raw=False,
                  output_to_es_register=False,
                  push_to_redis=False,
                  page_num_max=34,
                  output_es_index=None,
                  output_doc_type=None,
                  proxy_dic=None):

        result_lst = []
        fail_time = 0
        while page_num <= page_num_max and fail_time < 5:
            lst_url = (
                'https://api.bilibili.com/x/web-interface/newlist?rid=' + rid +
                '&type=0&pn=' + str(page_num) + '&ps=20')
            if proxy_dic is not None:
                raw_proxy_dic = extract_data_to_use()
                record_id = raw_proxy_dic['id']
                proxy_dic = build_proxy_dic(raw_proxy_dic=raw_proxy_dic)
                print('get proxy_dic %s' % proxy_dic)
            try:
                get_page = retry_get_url(lst_url,
                                         proxies=proxy_dic,
                                         timeout=15)
                fail_time = 0
                page_num += 1
            except:
                update_status(record_id=record_id, availability=0)
                fail_time += 1
                print('%s has failed %s times' % (lst_url, fail_time))
                continue
            print('get page at %s' % (page_num - 1))
            page_dic = get_page.json()
            total_video = int(page_dic['data']['page']['count'])

            if page_num == 1:
                if int(total_video / 20) == total_video / 20:
                    total_page_num = int(total_video / 20)
                else:
                    total_page_num = int(total_video / 20) + 1
                if total_page_num <= page_num_max:
                    page_num_max = total_page_num

            video_dic = page_dic['data']['archives']

            for one_video in video_dic:
                video_dic = copy.deepcopy(self.video_data)
                video_dic['title'] = one_video['title']
                aid = one_video['aid']
                video_dic['aid'] = one_video['aid']
                try:
                    attribute = one_video['attribute']
                except:
                    attribute = 0
                video_dic['attribute'] = attribute
                video_dic['url'] = 'https://www.bilibili.com/video/av' + str(
                    aid)
                video_dic['releaser'] = one_video['owner']['name']
                video_dic['releaser_id'] = one_video['owner']['mid']
                video_dic['video_intro'] = one_video['desc']
                video_dic['duration'] = one_video['duration']
                video_dic['play_count'] = one_video['stat']['view']
                video_dic['danmuku'] = one_video['stat']['danmaku']
                video_dic['release_time'] = (one_video['pubdate']) * 1e3
                fetch_time = int(
                    datetime.datetime.timestamp(datetime.datetime.now()) * 1e3)
                video_dic['fetch_time'] = fetch_time
                result_lst.append(video_dic)

                if len(result_lst) >= 100:
                    if output_es_index is None and output_doc_type is None:
                        output_result(
                            result_Lst=result_lst,
                            platform=self.platform,
                            output_to_file=output_to_file,
                            filepath=filepath,
                            output_to_es_raw=output_to_es_raw,
                            output_to_es_register=output_to_es_register,
                            push_to_redis=push_to_redis)
                        result_lst.clear()

                    elif output_es_index is not None and output_doc_type is not None:
                        output_result(
                            result_Lst=result_lst,
                            platform=self.platform,
                            output_to_file=output_to_file,
                            filepath=filepath,
                            output_to_es_raw=output_to_es_raw,
                            output_to_es_register=output_to_es_register,
                            push_to_redis=push_to_redis,
                            es_index=output_es_index,
                            doc_type=output_doc_type)
                        result_lst.clear()
            time.sleep(0)
        if result_lst != []:
            if output_es_index is None and output_doc_type is None:
                output_result(result_Lst=result_lst,
                              platform=self.platform,
                              output_to_file=output_to_file,
                              filepath=filepath,
                              output_to_es_raw=output_to_es_raw,
                              output_to_es_register=output_to_es_register,
                              push_to_redis=push_to_redis)

            elif output_es_index is not None and output_doc_type is not None:
                output_result(result_Lst=result_lst,
                              platform=self.platform,
                              output_to_file=output_to_file,
                              filepath=filepath,
                              output_to_es_raw=output_to_es_raw,
                              output_to_es_register=output_to_es_register,
                              push_to_redis=push_to_redis,
                              es_index=output_es_index,
                              doc_type=output_doc_type)

        return result_lst
예제 #21
0
 def get_releaser_page(self, releaserUrl):
     headers = {
         "Accept-Encoding": "gzip",
         # "sdk-version": "1",
         "User-Agent":
         "Linux; U; Android 8.1.0; zh-CN; EML-AL00 Build/HUAWEIEML-AL00",
         "Host": "aweme.snssdk.com",
         "Connection": "Keep-Alive",
     }
     releaser_id = self.get_releaser_id(releaserUrl)
     time.sleep(1)
     releaserUrl = 'https://{2}/aweme/v1/user/?ac=WIFI&device_id={1}&os_api=18&app_name=aweme&channel=App Store&device_platform=ipad&device_type=iPad6,11&app_version=8.9.1&js_sdk_version=1.17.2.0&version_code=8.7.1&os_version=13.2.3&screen_width=1536&user_id={0}'.format(
         releaser_id, str(random.randint(40000000000, 90000000000)),
         random.choice(self.api_list))
     count = 0
     while count < 3:
         try:
             count += 1
             time.sleep(random.random())
             get_page = retry_get_url(releaserUrl,
                                      headers=headers,
                                      proxies=0)
             page = get_page.json()
             follower_num = page["user"].get("follower_count")
             age = page["user"].get('birthday')
             location = page["user"].get('location')
             province = page["user"].get('province')
             huozan = page["user"].get("total_favorited")
             sex = page["user"].get("gender")
             verify = page["user"].get("enterprise_verify_reason")
             signature = page["user"].get('signature').replace("\r",
                                                               "").replace(
                                                                   "\n", "")
             if page["user"].get('is_gov_media_vip'):
                 signature_type = "gov"
             elif page["user"].get('is_star'):
                 signature_type = "star"
             elif page["user"].get('is_effect_artist'):
                 signature_type = "artist"
             elif page["user"].get('is_verified'):
                 signature_type = "已认证"
             else:
                 signature_type = ""
             dic = {
                 "age": age,
                 "location": location,
                 "huozan": huozan,
                 "sex": sex,
                 "verify": verify,
                 "signature": signature,
                 "province": province,
                 "follower_num": follower_num,
                 "signature_type": signature_type,
             }
             print(dic)
             return dic
         except:
             print("can't find followers")
             continue
     else:
         return None
예제 #22
0
    def releaser_page(self,
                      releaserUrl,
                      output_to_file=False,
                      filepath=None,
                      output_to_es_raw=False,
                      output_to_es_register=False,
                      push_to_redis=False,
                      releaser_page_num_max=30,
                      es_index=None,
                      doc_type=None):
        videos_per_page = 42
        releaser_page_Lst = []
        releaser_id = self.rebuild_releaserUrl(releaserUrl)
        if releaser_id == '' or releaser_id is None:
            print('Failed to get releaser id: %s' % releaserUrl)
            return None
        real_releaserUrl = 'https://www.iqiyi.com/u/' + releaser_id + '/v'
        get_page = retry_get_url(real_releaserUrl)
        if get_page is None:
            print('Failed to get releaser page: %s' % releaserUrl)
            return None
        get_page.encoding = 'utf-8'
        page = get_page.text
        soup = BeautifulSoup(page, 'html.parser')
        try:
            videonum_str = soup.find('span', {'class': 'icon-num'}).text
            videonum_f = re.findall('[0-9]+', videonum_str)
        except:
            print('Failed to get total video number: %s' % releaserUrl)
            videonum_f = []
        if videonum_f != []:
            videonum = int(videonum_f[0])
            totalpage = videonum // videos_per_page + 1
        else:
            videonum = None
            totalpage = 1000  # assign an arbitary number

        def process_one_line(data_line):
            url = data_line.find('p', {
                'class': 'site-piclist_info_title_twoline'
            }).a['href']
            if url[:6] != 'https:' or url[:5] != 'http:':
                url = 'https:' + url
            get_video_dict = self.video_page(url)
            if get_video_dict is None:
                return None
            return get_video_dict

        releaser_url_body_f = re.findall('https://www.iqiyi.com/u/[0-9]+/v',
                                         releaserUrl)
        if releaser_url_body_f != []:
            releaser_url_body = releaser_url_body_f[0]
        else:
            releaser_url_body_f = re.findall('http://www.iqiyi.com/u/[0-9]+/v',
                                             releaserUrl)
        if releaser_url_body_f != []:
            releaser_url_body = releaser_url_body_f[0]
        else:
            return None

        if releaser_page_num_max > totalpage:
            releaser_page_num_max = totalpage
        else:
            pass
        video_page_url = [
            releaser_url_body + '?page={}&video_type=1'.format(str(i))
            for i in range(1, releaser_page_num_max + 1)
        ]
        for urls in video_page_url:
            get_page = retry_get_url(urls)
            if get_page is None:
                continue
            print("get %s successfully" % urls)
            page = get_page.text
            soup = BeautifulSoup(page, 'html.parser')
            iqiyi = soup.find_all('li', {'j-delegate': 'colitem'})
            for data_line in iqiyi:
                one_video_dic = process_one_line(data_line)
                releaser_page_Lst.append(one_video_dic)
                if len(releaser_page_Lst) >= 100:
                    output_result(releaser_page_Lst,
                                  self.platform,
                                  output_to_file=output_to_file,
                                  filepath=filepath,
                                  output_to_es_raw=output_to_es_raw,
                                  output_to_es_register=output_to_es_register,
                                  push_to_redis=push_to_redis,
                                  es_index=es_index,
                                  doc_type=doc_type)
                    releaser_page_Lst.clear()
        if releaser_page_Lst != []:
            output_result(releaser_page_Lst,
                          self.platform,
                          output_to_file=output_to_file,
                          filepath=filepath,
                          output_to_es_raw=output_to_es_raw,
                          output_to_es_register=output_to_es_register,
                          push_to_redis=push_to_redis,
                          es_index=es_index,
                          doc_type=doc_type)
예제 #23
0
    def releaser_page(self, releaserUrl,
                      output_to_file=False,
                      filepath=None,
                      releaser_page_num_max=5000,
                      output_to_es_raw=False,
                      es_index=None,
                      doc_type=None,
                      output_to_es_register=False,
                      push_to_redis=False, proxies_num=None, **kwargs):

        """
        get video info from api instead of web page html
        the most scroll page is 1000
        """

        result_list = []
        has_more = True
        count = 1
        count_false = 0
        releaser_id = self.find_releaser_id(releaserUrl)
        offset = "0"
        # vid = "AB5483CA-FCDC-42F1-AFB1-077A1%sDA" % random.randint(100000, 999999)
        # ccid = "F153594D-1310-4984-A4C3-A679D4D%s" % random.randint(10000, 99999)
        # openudid = "5d44f2ea1b74e3731b27e5ed8039ac29f%s" % random.randint(1000000, 9999999)
        # idfa = "E3FC9054-384B-485F-9B4C-936F33D7D%s" % random.randint(100, 999)
        # iid = str(random.randint(40000000000, 70000000000))
        device_id = str(random.randint(56884000000, 96890000000))
        # proxies = get_proxy(proxies_num)
        while has_more and count <= releaser_page_num_max:
            # print(str(releaser_id)+str(max_behot_time))
            # js_head = json.loads(get_js(str(releaser_id)+str(max_behot_time)))
            time.sleep(random.randint(1,2))
            print("get %s video on page %s" % (releaser_id, count))
            # url_dic = {
            #         "source": "0",
            #         "max_cursor": offset,
            #         "user_id": releaser_id,
            #         "count": "21",
            #         "os_api": "23",
            #         "device_type": "Huawei P20",
            #         "ssmix": "a",
            #         "manifest_version_code": "985",
            #         "dpi": "429",
            #         "uuid": "440000000189785",
            #         "app_name": "douyin",
            #         "version_name": "9.8.5",
            #         # "ts": "1585532172",
            #         "app_type": "normal",
            #         "ac": "wifi",
            #         "update_version_code": "9852",
            #         "channel": "baidu",
            #         # "_rticket": "1585532172572",
            #         "device_platform": "android",
            #         # "iid": "109688778422",
            #         "version_code": "985",
            #         # "cdid": "87cc1c77-cc3c-41a1-8df6-1e060b9c510b",
            #         # "openudid": "e44cc0264b92bcbf",
            #         "device_id": device_id,
            #         "resolution": "1080*2244",
            #         "os_version": "9.0.1",
            #         "language": "zh",
            #         "device_brand": "Huawei",
            #         "aid": "2329",
            #         "mcc_mnc": "46005",
            # }

            # url_dic = {
            #         "ac": "WIFI",
            #         # "iid": iid,
            #         "device_id": device_id,
            #         "os_api": "18",
            #         "app_name": "aweme",
            #         "channel": "App Store",
            #         # "idfa": "7AED33DD-0F97-418D-AFAA-72ED0578A44E",
            #         # "idfa": idfa,
            #         "device_platform": "iphone",
            #         "build_number": "92113",
            #         # "vid": "21B39A50-8C28-4E7E-AEB8-A67B12B1A82B",
            #         # "vid": vid,
            #         # "openudid": "b1021c76124449e0e9f0e43bdf51f3314aac263b",
            #         # "openudid": openudid,
            #         "device_type": "iPhone9,4",
            #         "app_version": "9.2.1",
            #         "js_sdk_version": "1.43.0.1",
            #         "version_code": "9.2.1",
            #         "os_version": "13.3",
            #         "screen_width": "1242",
            #         "aid": "2329",
            #         "mcc_mnc": "",
            #         "user_id": releaser_id,
            #         "max_cursor": offset,
            #         "count": "21",
            #         "source": "0",
            # }
            url_dic = {
                    "source": "0",
                    "max_cursor": offset,
                    "user_id": releaser_id,
                    "count": "21",
                    "os_api": "23",
                    "device_type": "Huawei P20",
                    "ssmix": "a",
                    "manifest_version_code": "10000%s" % random.randint(1,5),
                    "dpi": "429",
                    # "uuid": "440000000189785",
                    "app_name": "douyin_lite",
                    "version_name": "10.0.0",
                    # "ts": "1585532172",
                    "app_type": "normal",
                    "ac": "wifi",
                    "update_version_code": "10009900",
                    "channel": "baidu",
                    # "_rticket": "1585532172572",
                    "device_platform": "android",
                    # "iid": "1697284012668695",
                    "version_code": "100%s00" %random.randint(1,5),
                    # "cdid": "87cc1c77-cc3c-41a1-8df6-1e060b9c510b",
                    # "openudid": "e44cc0264b92bcbf",
                    "device_id": device_id,
                    # "device_id": 69418894872,
                    "resolution": "1080*2244",
                    "os_version": "9.0.1",
                    "language": "zh",
                    "device_brand": "Huawei",
                    "aid": "2329",
                    "mcc_mnc": "46001",
            }
            # self.headers["Host"] = host
            url = "https://{1}/aweme/v1/aweme/post/?{0}".format(urllib.parse.urlencode(url_dic),random.choice(self.api_list))
            try:
                #proxies = get_proxy(proxies_num)
                if proxies_num:
                    # get_page = requests.get(url, headers=self.headers, proxies=proxies, timeout=10)
                    get_page = retry_get_url(url, headers=self.headers, proxies=proxies_num, timeout=10)
                else:
                    get_page = requests.get(url, headers=self.headers, timeout=10)
            except Exception as e:
                proxies = get_proxy(proxies_num)
                print(e)
                continue

            page_dic = {}
            # print(get_page.text)
            try:
                page_dic = get_page.json()
                # print(get_page)
                # print(page_dic)
                data_list = page_dic.get('aweme_list')
                if not data_list:
                    get_page = requests.get(url, headers=self.headers, timeout=10)
                    page_dic = get_page.json()
                    data_list = page_dic.get('aweme_list')
                    if not data_list:
                        raise ValueError
                has_more = page_dic.get('has_more')
                offset = str(page_dic.get("max_cursor"))
            except:
                if not data_list:
                    proxies = get_proxy(proxies_num)
                    count_false += 1
                    if count_false >= 2:
                        break
                    else:
                        continue
            # offset = page_dic.get('offset')

            if has_more is None:
                has_more = False
            if data_list == []:
                print("no data in releaser %s page %s" % (releaser_id, count))
                # print(page_dic)
                # print(url)
                proxies = get_proxy(1)
                count_false += 1
                if count_false >= 2:
                    has_more = False
                continue

            else:
                count_false = 0
                count += 1
                for one_video in data_list:
                    # info_str = one_video.get('content')
                    video_dic = copy.deepcopy(self.video_data)
                    video_dic['title'] = one_video.get('desc')
                    video_dic['url'] = one_video.get('share_url')
                    video_dic['releaser'] = one_video.get('author').get("nickname")
                    video_dic['releaserUrl'] = releaserUrl
                    release_time = one_video.get('create_time')
                    video_dic['release_time'] = int(release_time * 1e3)
                    try:
                        video_dic['duration'] = int(one_video.get('duration') / 1000)
                    except:
                        video_dic['duration'] = 0
                    video_dic['play_count'] = 0
                    video_dic['repost_count'] = one_video.get('statistics').get('share_count')
                    video_dic['comment_count'] = one_video.get('statistics').get('comment_count')
                    video_dic['favorite_count'] = one_video.get('statistics').get('digg_count')
                    video_dic['video_id'] = one_video.get('aweme_id')
                    video_dic['fetch_time'] = int(datetime.datetime.now().timestamp() * 1e3)
                    video_dic['releaser_id_str'] = "抖音_%s" % releaser_id
                    try:
                        video_dic['video_img'] = one_video.get('video').get('cover').get('url_list')[0]
                    except:
                        pass
                    yield video_dic
예제 #24
0
 def key_customer(self,
                  releaserUrl,
                  releaser_page_num_max=1000,
                  output_to_es_raw=False,
                  es_index='crawler-data-raw',
                  doc_type='doc'):
     """
     input releaserUrl must be strict as https://id.tudou.com/i/UMjc5MzI5NDA==/videos?
     end with /videos otherwise when scrolling it will make mistakes
     """
     releaser_id = self.get_releaser_id(releaserUrl)
     print("working on releaser: %s" % releaser_id)
     releaserUrl = 'https://id.tudou.com/i/%s/videos' % releaser_id
     result_lst = []
     get_page = retry_get_url(releaserUrl)
     get_page.encoding = 'utf-8'
     page = get_page.text
     soup = BeautifulSoup(page, 'html.parser')
     try:
         releaser = soup.find('div', {'class': 'user-name'}).a.text
     except:
         releaser = None
     try:
         total_video_num_str = soup.find('div', {
             'class': 'title'
         }).span.text
         total_video_num = total_video_num_str.replace('(', '').replace(
             ')', '').replace(',', '')
         total_video_num = trans_play_count(total_video_num)
     except:
         print(releaserUrl)
     if total_video_num % 50 == 0:
         total_page_num = int(total_video_num / 50)
     else:
         total_page_num = int(total_video_num / 50) + 1
     if releaser_page_num_max > total_page_num:
         releaser_page_num_max = total_page_num
     print("releaser page num max is %s" % releaser_page_num_max)
     video_lst = soup.find_all('div', {'class': 'v'})
     for line in video_lst:
         video_info = self.process_one_video(line)
         video_info['releaserUrl'] = releaserUrl
         video_info['releaser'] = releaser
         result_lst.append(video_info)
     if releaser_page_num_max >= 2:
         page_num = 2
         try:
             partial_releaserUrl = soup.find('li', {
                 'class': 'next'
             }).a['href']
             new_releaserUrl = 'https://id.tudou.com%s' % partial_releaserUrl
         except:
             print(new_releaserUrl)
         while page_num <= releaser_page_num_max:
             get_page = retry_get_url(new_releaserUrl)
             get_page.encoding = 'utf-8'
             page = get_page.text
             soup = BeautifulSoup(page, 'html.parser')
             if page_num != releaser_page_num_max:
                 try:
                     new_releaserUrl = 'https://id.tudou.com' + soup.find(
                         'li', {
                             'class': 'next'
                         }).a['href']
                 except:
                     new_releaserUrl = (
                         'https://id.tudou.com/i/%s/videos?order=1&page=%s'
                         % (releaser_id, page_num))
             video_lst = soup.find_all('div', {'class': 'v'})
             for line in video_lst:
                 video_info = self.process_one_video(line)
                 video_info['releaserUrl'] = releaserUrl
                 video_info['releaser'] = releaser
                 result_lst.append(video_info)
             print('get page %s list length is %s' %
                   (page_num, len(result_lst)))
             page_num += 1
             output_result(result_Lst=result_lst,
                           platform=self.platform,
                           output_to_es_raw=output_to_es_raw,
                           es_index=es_index,
                           doc_type=doc_type)
             result_lst.clear()
     if result_lst != []:
         output_result(result_Lst=result_lst,
                       platform=self.platform,
                       output_to_es_raw=output_to_es_raw,
                       es_index=es_index,
                       doc_type=doc_type)
         result_lst.clear()
예제 #25
0
 def parse_video_page_single_process(self,
                                     output_to_file=False,
                                     filepath=None,
                                     push_to_redis=False,
                                     output_to_es_raw=True,
                                     es_index="crawler-data-raw",
                                     doc_type="doc",
                                     output_to_es_register=False):
     key = 'iqiyi_video_page_html'
     result_list = []
     pid = os.getpid()
     while connect_with_redis.length_of_lst(key) > 0:
         video_page_html = connect_with_redis.retrieve_video_page_html_from_redis(
             platform=self.platform)
         soup = BeautifulSoup(video_page_html, 'html.parser')
         try:
             page_info = soup.find("div", {"is": "i71-play"})[":page-info"]
             page_info = page_info.replace("'", '"')
             page_dic = json.loads(page_info)
         except:
             page_dic = None
         if page_dic is not None:
             title = page_dic["tvName"]
             url = page_dic["pageUrl"]
             dura_str = page_dic["duration"]
             duration = trans_duration(dura_str)
             try:
                 releaser = page_dic["user"]["name"]
                 releaserUrl = page_dic["user"]["profileUrl"]
             except:
                 releaser = None
                 releaserUrl = None
         else:
             title = None
             url = None
             duration = None
             releaser = None
             releaserUrl = None
         try:
             video_info = soup.find("div",
                                    {"is": "i71-play"})[":video-info"]
             video_dic = json.loads(video_info)
         except:
             video_dic = None
         if video_dic is not None:
             if title is None:
                 title = video_dic['name']
             if url is None:
                 url = video_dic['url']
             if releaser is None:
                 try:
                     releaser = video_dic["user"]["name"]
                     releaserUrl = video_dic["user"]["profileUrl"]
                 except:
                     releaser = None
                     releaserUrl = None
             release_time = video_dic["firstPublishTime"]
             tvId = video_dic["tvId"]
             hot_idx_url = "https://pub.m.iqiyi.com/jp/h5/count/hotDisplay/?qipuId=%s" % tvId
             get_hot_idx = retry_get_url(hot_idx_url)
             hot_idx_str = get_hot_idx.text
             hot_idx = int(
                 re.findall(
                     "\d+", ' '.join(re.findall('"count":\d+',
                                                hot_idx_str)))[0])
         fetch_time = int(
             datetime.datetime.timestamp(datetime.datetime.now()) * 1e3)
         if releaser is None:
             try:
                 releaser = soup.find('span', {
                     'class': 'intro-iterm__txt'
                 }).text
             except:
                 releaser = None
         video_page_dict = copy.deepcopy(self.video_data)
         video_page_dict["title"] = title
         video_page_dict["url"] = url
         video_page_dict["duration"] = duration
         video_page_dict["releaser"] = releaser
         video_page_dict["releaserUrl"] = releaserUrl
         video_page_dict["release_time"] = release_time
         video_page_dict["hot_idx"] = hot_idx
         video_page_dict["fetch_time"] = fetch_time
         video_page_dict["tvId"] = tvId
         result_list.append(video_page_dict)
         print(
             "platform: %s, action: parse video page, process_id: %s, has done: %s"
             % (self.platform, pid, len(result_list)))
         if len(result_list) >= 1000:
             output_result(result_Lst=result_list,
                           platform=self.platform,
                           output_to_file=output_to_file,
                           filepath=filepath,
                           push_to_redis=push_to_redis,
                           output_to_es_raw=output_to_es_raw,
                           es_index=es_index,
                           doc_type=doc_type,
                           output_to_es_register=output_to_es_register)
             result_list.clear()
     if result_list != []:
         output_result(result_Lst=result_list,
                       platform=self.platform,
                       output_to_file=output_to_file,
                       filepath=filepath,
                       push_to_redis=push_to_redis,
                       output_to_es_raw=output_to_es_raw,
                       es_index=es_index,
                       doc_type=doc_type,
                       output_to_es_register=output_to_es_register)
         result_list.clear()