def list_videos_bonus(plugin, item_id, category_url, page): replay_episodes_html = urlquick.get(category_url + '/page/%s' % str(page)).text replay_episodes_soup = bs(replay_episodes_html, 'html.parser') video_integral = replay_episodes_soup.find('div', class_='col-md-6') all_videos = replay_episodes_soup.find_all('div', class_='col-md-3') at_least_one_item = False if video_integral is not None: at_least_one_item = True item = Listitem() item.label = video_integral.find('img').get('alt') video_url = URL_ROOT(video_integral.find('a').get('href')) item.art['thumb'] = video_integral.find('img').get('src') item.context.script(get_video_url, plugin.localize(LABELS['Download']), item_id=item_id, video_url=video_url, video_label=LABELS[item_id] + ' - ' + item.label, download_mode=True) item.set_callback(get_video_url, item_id=item_id, video_url=video_url, item_dict=cqu.item2dict(item)) yield item for video in all_videos: at_least_one_item = True item = Listitem() item.label = video.find('img').get('alt') video_url = URL_ROOT(video.find('a').get('href')) item.art['thumb'] = video.find('img').get('src') item.context.script(get_video_url, plugin.localize(LABELS['Download']), item_id=item_id, video_url=video_url, video_label=LABELS[item_id] + ' - ' + item.label, download_mode=True) item.set_callback(get_video_url, item_id=item_id, video_url=video_url, item_dict=cqu.item2dict(item)) yield item if page is not None and at_least_one_item: # More videos... yield Listitem.next_page(item_id=item_id, category_url=category_url, page=page + 1) else: plugin.notify(plugin.localize(LABELS['No videos found']), '') yield False
def list_videos_bonus(plugin, item_id, category_url, page): resp = urlquick.get(category_url + '/page/%s' % str(page)) root = resp.parse() at_least_one_item = False video_integral = root.find(".//div[@class='col-md-6']") if video_integral is not None: at_least_one_item = True item = Listitem() item.label = video_integral.find('.//img').get('alt') video_url = URL_ROOT(video_integral.find('.//a').get('href')) item.art['thumb'] = video_integral.find('.//img').get('src') item.context.script(get_video_url, plugin.localize(LABELS['Download']), item_id=item_id, video_url=video_url, video_label=LABELS[item_id] + ' - ' + item.label, download_mode=True) item.set_callback(get_video_url, item_id=item_id, video_url=video_url, item_dict=cqu.item2dict(item)) yield item for video in root.iterfind(".//div[@class='col-md-3']"): at_least_one_item = True item = Listitem() item.label = video.find('.//img').get('alt') video_url = URL_ROOT(video.find('.//a').get('href')) item.art['thumb'] = video.find('.//img').get('src') item.context.script(get_video_url, plugin.localize(LABELS['Download']), item_id=item_id, video_url=video_url, video_label=LABELS[item_id] + ' - ' + item.label, download_mode=True) item.set_callback(get_video_url, item_id=item_id, video_url=video_url, item_dict=cqu.item2dict(item)) yield item if page is not None and at_least_one_item: # More videos... yield Listitem.next_page(item_id=item_id, category_url=category_url, page=page + 1) else: plugin.notify(plugin.localize(LABELS['No videos found']), '') yield False
def list_videos(plugin, item_id, category_url, page): resp = urlquick.get(category_url % page) root = resp.parse() if root.find(".//ul[@class='live-article-list']") is not None: list_videos_datas = root.find(".//ul[@class='live-article-list']") for video_data in list_videos_datas.findall(".//div[@class='img']"): video_title = video_data.find('.//img').get('alt') video_image = video_data.find('.//img').get('data-frz-src') video_url = URL_ROOT + video_data.find('.//a').get('href') item = Listitem() item.label = video_title item.art['thumb'] = video_image item.context.script(get_video_url, plugin.localize(LABELS['Download']), item_id=item_id, video_url=video_url, video_label=LABELS[item_id] + ' - ' + item.label, download_mode=True) item.set_callback(get_video_url, item_id=item_id, video_url=video_url, item_dict=cqu.item2dict(item)) yield item yield Listitem.next_page(item_id=item_id, category_url=category_url, page=str(int(page) + 1))
def list_videos(plugin, item_id, day_id): resp = urlquick.get(URL_DAYS) root = resp.parse("ul", attrs={"id": "slick-missed-day-%s" % (day_id)}) for broadcast in root.iterfind(".//li"): video_time = broadcast.find(".//time[@class='m-section__scroll__item__bottom__time']").text.replace('.', ':') video_title = video_time + " - " + broadcast.find(".//span[@class='m-section__scroll__item__bottom__title']").text subtitle = broadcast.find("span[@class='m-section__scroll__item__bottom__title--sub']") if subtitle is not None and subtitle.text is not None: video_title += ": " + subtitle video_image = URL_ROOT + broadcast.find('.//img').get('data-src') video_url = URL_ROOT + broadcast.find('.//a').get('href') item = Listitem() item.label = video_title item.art['thumb'] = video_image item.set_callback( get_video_url, item_id=item_id, video_url=video_url, item_dict=cqu.item2dict(item)) yield item
def list_video_more_infos(plugin, item_id, video_url_info): resp = urlquick.get(video_url_info) json_parser = json.loads(resp.text) video_title = json_parser["seoTitle"] video_image = json_parser["image"]["pathHorizontal"] video_plot = json_parser["seoDescription"] video_duration = int(json_parser["duration"]) video_url = json_parser["urlVideo"] item = Listitem() item.label = video_title item.art['thumb'] = video_image item.info['duration'] = video_duration item.info['plot'] = video_plot item.context.script(get_video_url, plugin.localize(LABELS['Download']), item_id=item_id, video_url=video_url, video_label=LABELS[item_id] + ' - ' + item.label, download_mode=True) item.set_callback(get_video_url, item_id=item_id, video_url=video_url, item_dict=cqu.item2dict(item)) yield item
def list_videos_seasons(plugin, item_id, next_url): resp = urlquick.get(next_url) json_parser = json.loads(resp.text) program_title = json_parser['currentPage']['displayName'] for video_datas in json_parser['episodes']['contents']: video_title = program_title + ' ' + video_datas[ 'title'] + ' ' + video_datas['subtitle'] video_image = video_datas['URLImage'] video_plot = video_datas['summary'] video_url = video_datas['URLMedias'] item = Listitem() item.label = video_title item.art['thumb'] = video_image item.info['plot'] = video_plot item.context.script(get_video_url, plugin.localize(LABELS['Download']), item_id=item_id, next_url=video_url, video_label=LABELS[item_id] + ' - ' + item.label, download_mode=True) item.set_callback(get_video_url, item_id=item_id, next_url=video_url, item_dict=cqu.item2dict(item)) yield item
def list_videos_last(plugin, item_id, page=1): url_last = "standard/publish/channels/%s/contents/" % item_id resp = urlquick.get(URL_API(url_last), params={ 'size': 20, 'page': page, 'filter': "with-no-vod,only-visible", 'sort': "begin_date:desc" }) json_parser = json.loads(resp.text) for video in json_parser['result']: item = Listitem() broadcast_id = populate_item(item, video, True) item.context.script(get_video_url, plugin.localize(LABELS['Download']), item_id=item_id, broadcast_id=broadcast_id, video_label=LABELS[item_id] + " - " + item.label, download_mode=True) item.set_callback(get_video_url, item_id=item_id, broadcast_id=broadcast_id, item_dict=cqu.item2dict(item)) yield item # More videos... if json_parser['cursor']['next'] is not None: yield Listitem.next_page(item_id=item_id, page=json_parser['cursor']['next'])
def list_videos(plugin, item_id, next_url, page): resp = urlquick.get(next_url % page) root = resp.parse() for video_data in root.iterfind(".//div[@class='ftve-thumbnail ']"): video_title = video_data.find('.//h4').find('.//a').get('title') video_image = video_data.find( ".//div[@class='thumbnail-img lazy']").get('data-original') video_data_contenu = video_data.get('data-contenu') item = Listitem() item.label = video_title item.art['thumb'] = video_image item.context.script(get_video_url, plugin.localize(LABELS['Download']), item_id=item_id, video_data_contenu=video_data_contenu, video_label=LABELS[item_id] + ' - ' + item.label, download_mode=True) item.set_callback(get_video_url, item_id=item_id, video_data_contenu=video_data_contenu, item_dict=cqu.item2dict(item)) yield item yield Listitem.next_page(item_id=item_id, next_url=next_url, page=str(int(page) + 1))
def list_videos(plugin, item_id, program_part_url, page=0): # URL example: http://api-front.yatta.francetv.fr/standard/publish/taxonomies/france-2_cash-investigation/contents/?size=20&page=0&sort=begin_date:desc&filter=with-no-vod,only-visible url_program = "standard/publish/taxonomies/%s/contents/" % program_part_url resp = urlquick.get(URL_API(url_program), params={ 'size': 20, 'page': page, 'filter': "with-no-vod,only-visible", 'sort': "sort = begin_date:desc" }) json_parser = json.loads(resp.text) for video in json_parser['result']: item = Listitem() broadcast_id = populate_item(item, video) item.context.script(get_video_url, plugin.localize(LABELS['Download']), item_id=item_id, broadcast_id=broadcast_id, video_label=LABELS[item_id] + " - " + item.label, download_mode=True) item.set_callback(get_video_url, item_id=item_id, broadcast_id=broadcast_id, item_dict=cqu.item2dict(item)) yield item # More videos... if json_parser['cursor']['next'] is not None: yield Listitem.next_page(item_id=item_id, program_part_url=program_part_url, page=json_parser['cursor']['next'])
def list_videos(plugin, item_id, category_url, page): resp = urlquick.get(category_url % page) root_soup = bs(resp.text, 'html.parser') if root_soup.find('ul', class_="live-article-list"): list_videos_datas = root_soup.find( 'ul', class_="live-article-list").find_all('div', class_="img") for video_data in list_videos_datas: video_title = video_data.find('img').get('alt') video_image = video_data.find('img').get('data-frz-src') video_url = URL_ROOT + video_data.find('a').get('href') item = Listitem() item.label = video_title item.art['thumb'] = video_image item.context.script(get_video_url, plugin.localize(LABELS['Download']), item_id=item_id, video_url=video_url, video_label=LABELS[item_id] + ' - ' + item.label, download_mode=True) item.set_callback(get_video_url, item_id=item_id, video_url=video_url, item_dict=cqu.item2dict(item)) yield item yield Listitem.next_page(item_id=item_id, category_url=category_url, page=str(int(page) + 1))
def list_videos(plugin, item_id, program_part_url, page=0): # URL example: http://api-front.yatta.francetv.fr/standard/publish/taxonomies/france-2_cash-investigation/contents/?size=20&page=0&sort=begin_date:desc&filter=with-no-vod,only-visible url_program = 'standard/publish/taxonomies/%s/contents/' % program_part_url resp = urlquick.get(URL_API(url_program), params={ 'size': 20, 'page': page, 'filter': 'with-no-vod,only-visible', 'sort': 'sort=begin_date:desc' }) json_parser = json.loads(resp.text) for video_datas in json_parser["result"]: item = Listitem() if video_datas["type"] == 'extrait': item.label = 'Extrait - ' + video_datas["title"] else: item.label = video_datas["title"] image = '' for video_media in video_datas["content_has_medias"]: if "main" in video_media["type"]: id_diffusion = video_media["media"]["si_id"] if video_datas["type"] != 'extrait': item.info['duration'] = int( video_media["media"]["duration"]) elif "image" in video_media["type"]: for image_datas in video_media["media"]["patterns"]: if "vignette_16x9" in image_datas["type"]: image = URL_API(image_datas["urls"]["w:1024"]) date_value = video_datas['first_publication_date'].split('T')[0] item.info.date(date_value, '%Y-%m-%d') if "text" in video_datas: item.info['plot'] = video_datas["text"] item.art['fanart'] = image item.art['thumb'] = image item.context.script(get_video_url, plugin.localize(LABELS['Download']), item_id=item_id, id_diffusion=id_diffusion, video_label=LABELS[item_id] + ' - ' + item.label, download_mode=True) item.set_callback(get_video_url, item_id=item_id, id_diffusion=id_diffusion, item_dict=cqu.item2dict(item)) yield item # More videos... if json_parser["cursor"]["next"] is not None: yield Listitem.next_page(item_id=item_id, program_part_url=program_part_url, page=json_parser["cursor"]["next"])
def list_videos(plugin, item_id, category_url): replay_episodes_html = urlquick.get(category_url).text replay_episodes_soup = bs(replay_episodes_html, 'html.parser') video_integral = replay_episodes_soup.find('div', class_='col-md-6') all_videos = replay_episodes_soup.find_all('div', class_='col-md-3') if video_integral is not None: item = Listitem() item.label = video_integral.find('img').get('alt') video_url = URL_ROOT(video_integral.find('a').get('href')) item.art['thumb'] = video_integral.find('img').get('src') item.context.script(get_video_url, plugin.localize(LABELS['Download']), item_id=item_id, video_url=video_url, video_label=LABELS[item_id] + ' - ' + item.label, download_mode=True) item.set_callback(get_video_url, item_id=item_id, video_url=video_url, item_dict=cqu.item2dict(item)) yield item for video in all_videos: item = Listitem() item.label = video.find('img').get('alt') video_url = URL_ROOT(video.find('a').get('href')) item.art['thumb'] = video.find('img').get('src') item.context.script(get_video_url, plugin.localize(LABELS['Download']), item_id=item_id, video_url=video_url, video_label=LABELS[item_id] + ' - ' + item.label, download_mode=True) item.set_callback(get_video_url, item_id=item_id, video_url=video_url, item_dict=cqu.item2dict(item)) yield item
def list_videos(plugin, item_id, category_url): resp = urlquick.get(category_url) root = resp.parse() video_integral = root.find(".//div[@class='col-md-6']") if video_integral is not None: item = Listitem() item.label = video_integral.find('.//img').get('alt') video_url = URL_ROOT(video_integral.find('.//a').get('href')) item.art['thumb'] = video_integral.find('.//img').get('src') item.context.script(get_video_url, plugin.localize(LABELS['Download']), item_id=item_id, video_url=video_url, video_label=LABELS[item_id] + ' - ' + item.label, download_mode=True) item.set_callback(get_video_url, item_id=item_id, video_url=video_url, item_dict=cqu.item2dict(item)) yield item for video in root.iterfind(".//div[@class='col-md-3']"): item = Listitem() item.label = video.find('.//img').get('alt') video_url = URL_ROOT(video.find('.//a').get('href')) item.art['thumb'] = video.find('.//img').get('src') item.context.script(get_video_url, plugin.localize(LABELS['Download']), item_id=item_id, video_url=video_url, video_label=LABELS[item_id] + ' - ' + item.label, download_mode=True) item.set_callback(get_video_url, item_id=item_id, video_url=video_url, item_dict=cqu.item2dict(item)) yield item
def list_videos(plugin, item_id, next_url, page): resp = urlquick.get(next_url + '/page/' + page) json_parser = json.loads(resp.text) if 'videos' in json_parser: list_id = 'videos' elif 'contents' in json_parser: list_id = 'contents' at_least_one_item = False for video_datas in json_parser[list_id]: at_least_one_item = True video_title = video_datas['title'] video_plot = video_datas['description'] date_epoch = video_datas['lastPublicationDate'] date_value = time.strftime('%Y-%m-%d', time.localtime(date_epoch)) video_url = URL_STREAM_ROOT + video_datas['url'] video_image = '' for media_datas in video_datas['medias']: if 'urlThumbnail' in media_datas: video_image = URL_STREAM_ROOT + media_datas['urlThumbnail'] break item = Listitem() item.label = video_title item.info['plot'] = video_plot item.art['thumb'] = video_image item.info.date(date_value, '%Y-%m-%d') item.context.script(get_video_url, plugin.localize(LABELS['Download']), item_id=item_id, video_url=video_url, video_label=LABELS[item_id] + ' - ' + item.label, download_mode=True) item.set_callback(get_video_url, item_id=item_id, video_url=video_url, item_dict=cqu.item2dict(item)) yield item if at_least_one_item: yield Listitem.next_page(item_id=item_id, next_url=next_url, page=str(int(page) + 1)) else: plugin.notify(plugin.localize(LABELS['No videos found']), '') yield False
def generic_menu(plugin, menu_id, item_module=None, item_dict=None): """ Build a generic addon menu with all not hidden items """ # TEMPO (waiting for the CodeQuick update) plugin.cache_to_disc = True menu = get_sorted_menu(menu_id) if not menu: # If the selected menu is empty just reload the current menu yield False for index, (item_order, item_id, item_infos) in enumerate(menu): item = Listitem() add_context_menus_to_item(plugin, item, index, item_id, menu_id, len(menu)) label = LABELS[item_id] if isinstance(label, int): label = plugin.localize(label) item.label = label # Get item path of icon and fanart if 'thumb' in item_infos: item.art["thumb"] = common.get_item_media_path(item_infos['thumb']) if 'fanart' in item_infos: item.art["fanart"] = common.get_item_media_path( item_infos['fanart']) # If this item requires a module to work, get # the module path to be loaded item.params['item_module'] = item_infos.get('module') # Get the next action to trigger if this # item will be selected by the user item.set_callback(eval(item_infos['callback']), item_id, item_dict=cqu.item2dict(item)) yield item
def list_videos(plugin, item_id, program_id, program_season_number): resp = urlquick.get(URL_VIDEOS % program_id) json_parser = json.loads(resp.text) at_least_one_item = False if 'episode' in json_parser["videos"]: if str(program_season_number) in json_parser["videos"]["episode"]: for video_datas in json_parser["videos"]["episode"][str(program_season_number)]: at_least_one_item = True video_title = video_datas["title"] video_duration = int(str(int(video_datas["videoDuration"]) / 1000)) video_plot = video_datas["description"] video_image = video_datas["image"]["src"] video_id = video_datas["path"] item = Listitem() item.label = video_title item.art["thumb"] = video_image item.art["fanart"] = video_image item.info["plot"] = video_plot item.info["duration"] = video_duration item.context.script( get_video_url, plugin.localize(LABELS['Download']), item_id=item_id, video_id=video_id, video_title=video_title, video_plot=video_plot, video_image=video_image, video_label=LABELS[item_id] + ' - ' + item.label, download_mode=True) item.set_callback( get_video_url, item_id=item_id, video_id=video_id, item_dict=cqu.item2dict(item) ) yield item if not at_least_one_item: plugin.notify(plugin.localize(LABELS['No videos found']), '') yield False
def list_videos(plugin, item_id, program_url): resp = urlquick.get(program_url) root_soup = bs(resp.text, 'html.parser') list_videos_datas = root_soup.find_all('a', class_='video_mosaic') for video_datas in list_videos_datas: video_title = video_datas.get('title') video_plot = video_datas.get('description') video_image = video_datas.find('img').get('src') id_diffusion = re.compile(r'video\/(.*?)\@Regions').findall( video_datas.get('href'))[0] video_duration = 0 if video_datas.find('p', class_='length').get_text(): duration_values = video_datas.find( 'p', class_='length').get_text().split(' : ')[1].split(':') video_duration = int(duration_values[0]) * 3600 + int( duration_values[1]) * 60 + int(duration_values[2]) item = Listitem() item.label = video_title item.art['thumb'] = video_image item.info['plot'] = video_plot item.info['duration'] = video_duration date_value = '' if video_datas.find('p', class_='date').get_text(): date_value = video_datas.find( 'p', class_='date').get_text().split(' : ')[1] item.info.date(date_value, '%d/%m/%Y') item.context.script(get_video_url, plugin.localize(LABELS['Download']), item_id=item_id, id_diffusion=id_diffusion, video_label=LABELS[item_id] + ' - ' + item.label, download_mode=True) item.set_callback(get_video_url, item_id=item_id, id_diffusion=id_diffusion, item_dict=cqu.item2dict(item)) yield item
def list_videos(plugin, item_id, program_url): resp = urlquick.get(program_url) root = resp.parse() for video_datas in root.iterfind( ".//a[@class='slider-inline-style--content video_mosaic']"): video_title = video_datas.get('title') video_plot = video_datas.get('description') if video_datas.find('.//img').get('data-srcset'): video_image = video_datas.find('.//img').get('data-srcset') else: video_image = video_datas.find('.//img').get('src') id_diffusion = re.compile(r'video\/(.*?)\@Regions').findall( video_datas.get('href'))[0] item = Listitem() item.label = video_title item.art['thumb'] = video_image item.info['plot'] = video_plot date_value = '' if video_datas.find( ".//p[@class='slider-inline-style--text text-light m-t-0']" ).text is not None: date_value = video_datas.find( ".//p[@class='slider-inline-style--text text-light m-t-0']" ).text.split(' du ')[1] item.info.date(date_value, '%d/%m/%Y') item.context.script(get_video_url, plugin.localize(LABELS['Download']), item_id=item_id, id_diffusion=id_diffusion, video_label=LABELS[item_id] + ' - ' + item.label, download_mode=True) item.set_callback(get_video_url, item_id=item_id, id_diffusion=id_diffusion, item_dict=cqu.item2dict(item)) yield item
def list_videos(plugin, item_id, program_url): resp = urlquick.get(program_url) root = resp.parse() list_videos_datas = root.findall(".//li[@class='push type-episode']") list_videos_datas += root.findall(".//li[@class='push type-episode active seen']") for video_datas in list_videos_datas: if video_datas.find(".//div[@class='description']").text is not None: video_title = video_datas.find( ".//div[@class='title']").text.strip() + ' - ' + \ video_datas.find( ".//div[@class='description']").text.strip() else: video_title = video_datas.find( ".//div[@class='title']").text.strip() video_url = URL_ROOT_NOUVELLES_ECRITURES % item_id + \ video_datas.find('.//a').get('href') video_image = '' if video_datas.find('.//img') is not None: video_image = video_datas.find('.//img').get('src') item = Listitem() item.label = video_title item.art['thumb'] = video_image item.context.script( get_video_url, plugin.localize(LABELS['Download']), item_id=item_id, video_url=video_url, video_label=LABELS[item_id] + ' - ' + item.label, download_mode=True) item.set_callback( get_video_url, item_id=item_id, video_url=video_url, item_dict=cqu.item2dict(item)) yield item
def list_videos(plugin, item_id, next_url, page): resp = urlquick.get(next_url % page) root_soup = bs(resp.text, 'html.parser') list_videos_datas = root_soup.find( 'div', class_='center-block bloc-thumbnails').find_all( 'div', class_=re.compile("col-xs-3")) for video_data in list_videos_datas: video_title = video_data.find('h4').find( 'a').get('title') video_image = video_data.find( 'div', class_='thumbnail-img lazy').get('data-original') video_data_contenu = video_data.find( 'div', class_='ftve-thumbnail ').get('data-contenu') item = Listitem() item.label = video_title item.art['thumb'] = video_image item.context.script( get_video_url, plugin.localize(LABELS['Download']), item_id=item_id, video_data_contenu=video_data_contenu, video_label=LABELS[item_id] + ' - ' + item.label, download_mode=True) item.set_callback( get_video_url, item_id=item_id, video_data_contenu=video_data_contenu, item_dict=cqu.item2dict(item)) yield item yield Listitem.next_page( item_id=item_id, next_url=next_url, page=str(int(page) + 1))
def list_videos(plugin, item_id, mode, page): resp = urlquick.get(URL_FRANCETV_SPORT % mode + '?page=%s' % page) json_parser = json.loads(resp.text) for video_datas in json_parser["page"]["flux"]: video_title = video_datas["title"] video_image = '' if 'image' in video_datas: video_image = video_datas["image"]["large_16_9"] video_duration = 0 if 'duration' in video_datas: video_duration = int(video_datas["duration"]) video_url = URL_ROOT_SPORT + video_datas["url"] date_value = time.strftime('%Y-%m-%d', time.localtime(video_datas["updated"])) item = Listitem() item.label = video_title item.info['duration'] = video_duration item.art['thumb'] = video_image item.info.date(date_value, '%Y-%m-%d') item.context.script(get_video_url, plugin.localize(LABELS['Download']), item_id=item_id, video_url=video_url, video_label=LABELS[item_id] + ' - ' + item.label, download_mode=True) item.set_callback(get_video_url, item_id=item_id, video_url=video_url, item_dict=cqu.item2dict(item)) yield item yield Listitem.next_page(item_id=item_id, mode=mode, page=str(int(page) + 1))
def list_videos(plugin, item_id, list_jt_name): region = utils.ensure_unicode(Script.setting['la_1ere.region']) region = LIVE_LA1ERE_REGIONS[region] resp = urlquick.get(URL_JT_JSON % region) json_parser = json.loads(resp.text) for video_datas in json_parser[list_jt_name]: video_title = video_datas["titre"] + ' - ' + video_datas["date"] video_image = video_datas["url_image"] id_diffusion = video_datas["id"] date_value = video_datas["date"].split(' ') day = date_value[1] try: month = CORRECT_MONTH[date_value[2]] except Exception: month = '00' year = date_value[3] date_value = '-'.join((year, month, day)) item = Listitem() item.label = video_title item.art['thumb'] = video_image item.info.date(date_value, '%Y-%m-%d') item.context.script( get_video_url, plugin.localize(LABELS['Download']), item_id=item_id, id_diffusion=id_diffusion, video_label=LABELS[item_id] + ' - ' + item.label, download_mode=True) item.set_callback( get_video_url, item_id=item_id, id_diffusion=id_diffusion, item_dict=cqu.item2dict(item)) yield item
def list_videos(plugin, item_id, program_url): resp = urlquick.get(program_url) root_soup = bs(resp.text, 'html.parser') list_videos_datas = root_soup.find_all( "li", class_=re.compile("push type-episode")) for video_datas in list_videos_datas: if video_datas.find('div', class_='description'): video_title = video_datas.find( 'div', class_='title').get_text().strip() + ' - ' + \ video_datas.find( 'div', class_='description').get_text().strip() else: video_title = video_datas.find('div', class_='title').get_text().strip() video_url = URL_ROOT_NOUVELLES_ECRITURES % item_id + \ video_datas.find('a').get('href') video_image = '' if video_datas.find('img'): video_image = video_datas.find('img').get('src') item = Listitem() item.label = video_title item.art['thumb'] = video_image item.context.script(get_video_url, plugin.localize(LABELS['Download']), item_id=item_id, video_url=video_url, video_label=LABELS[item_id] + ' - ' + item.label, download_mode=True) item.set_callback(get_video_url, item_id=item_id, video_url=video_url, item_dict=cqu.item2dict(item)) yield item
def list_videos(plugin, item_id, serie_id): resp = urlquick.get(URL_VIDEOS % serie_id) json_parser = json.loads(resp.text) for video_datas in json_parser["episodes"]: video_title = video_datas["brand_name"] + \ ' - ' ' S%sE%s' % (video_datas["series_number"], str(video_datas["episode_number"])) + ' - ' + video_datas["name"] video_image = video_datas["image"] video_plot = video_datas["synopsis"] video_duration = video_datas["duration"] * 60 video_id = video_datas["video_id"] item = Listitem() item.label = video_title item.art['thumb'] = video_image item.info['plot'] = video_plot item.info['duration'] = video_duration item.set_callback(get_video_url, item_id=item_id, data_video_id=video_id, item_dict=cqu.item2dict(item)) yield item
def list_videos(plugin, item_id, program_category_url): if 'meteo.tf1.fr/meteo-france' in program_category_url: resp = urlquick.get(program_category_url) root = resp.parse("td", attrs={"class": "textbase"}) title = root.find('.//h3').text program_id = re.compile(r'\; src \= \'(.*?)\'').findall(resp.text)[0] item = Listitem() item.label = title item.context.script(get_video_url, plugin.localize(LABELS['Download']), item_id=item_id, program_id=program_id, video_label=LABELS[item_id] + ' - ' + item.label, download_mode=True) item.set_callback(get_video_url, item_id=item_id, program_id=program_id, item_dict=cqu.item2dict(item)) yield item else: resp = urlquick.get(program_category_url) grid = resp.parse("div", attrs={"class": "content"}) if grid is not None: for li in grid.findall('.//li'): video_type_string = li.find( ".//div[@class='description']").find('.//a').get( 'data-xiti-libelle') video_type_string = video_type_string.split('-')[0] if 'Playlist' not in video_type_string: item = Listitem() item.label = li.find(".//p[@class='title']").text try: stitle = li.find(".//p[@class='stitle']").text item.info['plot'] = stitle except Exception: pass try: duration_soup = li.find(".//p[@class='uptitle']").find( ".//span[@class='momentDate']") duration = int(duration_soup.text) item.info['duration'] = duration except Exception: pass img = li.find('.//img') try: img = img.get('data-srcset') except Exception: img = img.get('srcset') item.art["thumb"] = 'http:' + \ img.split(',')[-1].split(' ')[0] try: date_value = li.find(".//div[@class='text']").find( ".//p[@class='uptitle']").find('.//span') aired = date_value.get('data-date').split('T')[0] item.info.date(aired, '%Y-%m-%d') except Exception: pass program_id = li.find('.//a').get('href') item.context.script(get_video_url, plugin.localize(LABELS['Download']), item_id=item_id, program_id=program_id, video_label=LABELS[item_id] + ' - ' + item.label, download_mode=True) item.set_callback(get_video_url, item_id=item_id, program_id=program_id, item_dict=cqu.item2dict(item)) yield item # Check for any next page pagination = resp.parse( u"div", attrs={'class': 'clearfix list_pagination'}) ol = pagination.find('.//ol') if ol is not None: li_list = ol.findall('.//li') for i in range(len(li_list) - 1): li = li_list[i] if 'selected' in li.find('.//a').get('class') and \ i < len(li_list): next_li = li_list[i + 1] yield Listitem.next_page( item_id=item_id, program_category_url=next_li.find('.//a').get( 'href'))
def list_videos(plugin, item_id, program_id, sub_category_id): url = '' if sub_category_id is None: url = URL_VIDEOS2 % program_id else: url = URL_VIDEOS % (program_id, sub_category_id) resp = urlquick.get(url) json_parser = json.loads(resp.text) # TO DO Playlist More one 'clips' if not json_parser: plugin.notify(plugin.localize(LABELS['No videos found']), '') yield False for video in json_parser: video_id = str(video['id']) title = video['title'] duration = video['clips'][0]['duration'] description = '' if 'description' in video: description = video['description'] try: aired = video['clips'][0]['product']['last_diffusion'] aired = aired aired = aired[:10] year = aired[:4] # date : string (%d.%m.%Y / 01.01.2009) # aired : string (2008-12-07) day = aired.split('-')[2] mounth = aired.split('-')[1] year = aired.split('-')[0] date = '.'.join((day, mounth, year)) except Exception: aired = '' year = '' date = '' img = '' program_imgs = video['clips'][0]['images'] program_img = '' for img in program_imgs: if img['role'] == 'vignette': external_key = img['external_key'] program_img = URL_IMG % (external_key) item = Listitem() item.label = title item.info['plot'] = description item.info['duration'] = duration item.art["thumb"] = program_img item.art["fanart"] = program_img try: item.info.date(aired, '%Y-%m-%d') except: pass xbmc_version = int( xbmc.getInfoLabel("System.BuildVersion").split('-')[0].split('.') [0]) if xbmc_version < 18: item.context.script(get_video_url, plugin.localize(LABELS['Download']), item_id=item_id, video_id=video_id, video_label=LABELS[item_id] + ' - ' + item.label, download_mode=True) item.set_callback(get_video_url, item_id=item_id, video_id=video_id, item_dict=cqu.item2dict(item)) yield item
def list_videos_search(plugin, item_id, search_query, page=0): at_least_one_item = False while not at_least_one_item: url_search = 'https://vwdlashufe-dsn.algolia.net/1/indexes/yatta_prod_contents/' resp = urlquick.get(url_search, params={ 'page': page, 'filters': 'class:video', 'query': search_query }, headers=HEADERS_YATTA) json_parser = json.loads(resp.text) nb_pages = json_parser['nbPages'] for hit in json_parser['hits']: item_id_found = False for channel in hit['channels']: if channel['url'] == item_id: item_id_found = True break if not item_id_found: continue at_least_one_item = True title = hit['title'] if 'program' in hit: label = hit['program']['label'] title = label + ' - ' + title headline = hit['headline_title'] desc = hit['text'] duration = hit['duration'] season = hit['season_number'] episode = hit['episode_number'] id_yatta = hit['id'] director = hit['director'] # producer = hit['producer'] presenter = hit['presenter'] casting = hit['casting'] # characters = hit['characters'] last_publication_date = hit['dates']['last_publication_date'] date_value = time.strftime('%Y-%m-%d', time.localtime(last_publication_date)) image_400 = '' image_1024 = '' if 'image' in hit: image_400 = hit['image']['formats']['vignette_16x9']['urls'][ 'w:400'] image_1024 = hit['image']['formats']['vignette_16x9']['urls'][ 'w:1024'] image_400 = URL_API(image_400) image_1024 = URL_API(image_1024) if headline and headline != '': desc = headline + '\n' + desc if not director: director = presenter item = Listitem() item.label = title item.art['fanart'] = image_1024 item.art['thumb'] = image_400 item.info['plot'] = desc item.info['duration'] = duration item.info['season'] = season item.info['episode'] = episode item.info['cast'] = casting.split(', ') item.info['director'] = director item.info.date(date_value, '%Y-%m-%d') item.context.script(get_video_url, plugin.localize(LABELS['Download']), item_id=item_id, id_yatta=id_yatta, video_label=LABELS[item_id] + ' - ' + item.label, download_mode=True) item.set_callback(get_video_url, item_id=item_id, id_yatta=id_yatta, item_dict=cqu.item2dict(item)) yield item page = page + 1 # More videos... if page != nb_pages - 1: yield Listitem.next_page(search_query=search_query, item_id=item_id, page=page + 1)
def list_videos(plugin, item_id, next_url, sub_program_title): resp = urlquick.get(next_url) json_parser = json.loads(resp.text) for sub_program_datas in json_parser["strates"]: if 'title' in sub_program_datas: if sub_program_title == sub_program_datas["title"]: if 'contents' in sub_program_datas: for video_datas in sub_program_datas["contents"]: if video_datas["type"] == 'quicktime' or video_datas[ "type"] == 'pfv' or video_datas[ "type"] == 'VoD' or video_datas[ "type"] == 'detailPage': if 'title' in video_datas: if 'subtitle' in video_datas: video_title = video_datas[ 'subtitle'] + ' - ' + video_datas[ 'title'] else: video_title = video_datas['title'] else: video_title = video_datas["onClick"][ "displayName"] video_image = video_datas['URLImage'] video_url = '' if video_datas["type"] == 'quicktime': video_url = video_datas["onClick"]["URLMedias"] else: resp2 = urlquick.get( video_datas["onClick"]["URLPage"]) json_parser2 = json.loads(resp2.text) video_url = json_parser2['detail'][ 'informations']['URLMedias'] item = Listitem() item.label = video_title item.art['thumb'] = video_image item.context.script(get_video_url, plugin.localize( LABELS['Download']), item_id=item_id, next_url=video_url, video_label=LABELS[item_id] + ' - ' + item.label, download_mode=True) item.set_callback(get_video_url, item_id=item_id, next_url=video_url, item_dict=cqu.item2dict(item)) yield item else: if sub_program_title == json_parser["currentPage"]["displayName"]: if 'contents' in sub_program_datas: for video_datas in sub_program_datas["contents"]: if video_datas["type"] == 'quicktime' or video_datas[ "type"] == 'pfv' or video_datas[ "type"] == 'VoD' or video_datas[ "type"] == 'detailPage': if 'title' in video_datas: if 'subtitle' in video_datas: video_title = video_datas[ 'subtitle'] + ' - ' + video_datas[ 'title'] else: video_title = video_datas['title'] else: video_title = video_datas["onClick"][ "displayName"] video_image = video_datas['URLImage'] video_url = '' if video_datas["type"] == 'quicktime': video_url = video_datas["onClick"]["URLMedias"] else: resp2 = urlquick.get( video_datas["onClick"]["URLPage"]) json_parser2 = json.loads(resp2.text) video_url = json_parser2['detail'][ 'informations']['URLMedias'] item = Listitem() item.label = video_title item.art['thumb'] = video_image item.context.script(get_video_url, plugin.localize( LABELS['Download']), item_id=item_id, next_url=video_url, video_label=LABELS[item_id] + ' - ' + item.label, download_mode=True) item.set_callback(get_video_url, item_id=item_id, next_url=video_url, item_dict=cqu.item2dict(item)) yield item
def tv_guide_menu(plugin, menu_id, item_module=None, item_dict=None): # TEMPO (waiting for the CodeQuick update) plugin.cache_to_disc = True # Move up and move down action only work with this sort method plugin.add_sort_methods(xbmcplugin.SORT_METHOD_UNSORTED) menu = get_sorted_menu(menu_id) channels_id = [] for index, (channel_order, channel_id, channel_infos) in enumerate(menu): channels_id.append(channel_id) # Load the graber module accroding to the country # (e.g. resources.lib.channels.tv_guides.fr_live) tv_guide_module_path = 'resources.lib.channels.tv_guides.' + menu_id tv_guide_module = importlib.import_module(tv_guide_module_path) # For each channel grab the current program according to the current time tv_guide = tv_guide_module.grab_tv_guide(channels_id) for index, (channel_order, channel_id, channel_infos) in enumerate(menu): item = Listitem() add_context_menus_to_item(plugin, item, index, channel_id, menu_id, len(menu)) label = LABELS[channel_id] if isinstance(label, int): label = plugin.localize(label) item.label = label # Get item path of icon and fanart if 'thumb' in channel_infos: item.art["thumb"] = common.get_item_media_path( channel_infos['thumb']) if 'fanart' in channel_infos: item.art["fanart"] = common.get_item_media_path( channel_infos['fanart']) # If this item requires a module to work, get # the module path to be loaded item.params['item_module'] = channel_infos.get('module') # If we have program infos from the grabber if channel_id in tv_guide: guide_infos = tv_guide[channel_id] if 'title' in guide_infos: item.label = item.label + ' — ' + guide_infos['title'] item.info['originaltitle'] = guide_infos.get('originaltitle') # e.g Divertissement, Documentaire, Film, ... item.info['genre'] = guide_infos.get('genre') plot = [] if 'specific_genre' in guide_infos: if 'genre' not in guide_infos: item.info['genre'] = guide_infos['specific_genre'] elif guide_infos.get('genre') in guide_infos['specific_genre']: item.info['genre'] = guide_infos['specific_genre'] else: plot.append(guide_infos['specific_genre']) # start_time and stop_time must be a string if 'start_time' in guide_infos and 'stop_time' in guide_infos: plot.append(guide_infos['start_time'] + ' - ' + guide_infos['stop_time']) elif 'start_time' in guide_infos: plot.append(guide_infos['start_time']) if 'subtitle' in guide_infos: plot.append(guide_infos['subtitle']) if 'plot' in guide_infos: plot.append(guide_infos['plot']) item.info['plot'] = '\n'.join(plot) item.info['episode'] = guide_infos.get('episode') item.info['season'] = guide_infos.get('season') item.info["rating"] = guide_infos.get('rating') item.info["duration"] = guide_infos.get('duration') if 'fanart' in guide_infos: item.art["fanart"] = guide_infos['fanart'] if 'thumb' in guide_infos: item.art["thumb"] = guide_infos['thumb'] # Get the next action to trigger if this # item will be selected by the user item.set_callback(eval(channel_infos['callback']), channel_id, item_dict=cqu.item2dict(item)) yield item
def list_sub_programs(plugin, item_id, next_url): resp = urlquick.get(next_url) json_parser = json.loads(resp.text) if 'strates' in json_parser: for sub_program_datas in json_parser["strates"]: if sub_program_datas['type'] == 'plainTextHTML': continue if sub_program_datas['type'] == 'carrousel': continue if 'title' in sub_program_datas: sub_program_title = sub_program_datas["title"] item = Listitem() item.label = sub_program_title item.set_callback( list_videos, item_id=item_id, next_url=next_url, sub_program_title=sub_program_title) yield item else: sub_program_title = json_parser["currentPage"]["displayName"] item = Listitem() item.label = sub_program_title item.set_callback( list_videos, item_id=item_id, next_url=next_url, sub_program_title=sub_program_title) yield item elif 'seasons' in json_parser['detail']: for seasons_datas in json_parser['detail']['seasons']: season_title = seasons_datas['onClick']['displayName'] season_url = seasons_datas['onClick']['URLPage'] item = Listitem() item.label = season_title item.set_callback( list_videos_seasons, item_id=item_id, next_url=season_url) yield item elif 'episodes' in json_parser: program_title = json_parser['currentPage']['displayName'] for video_datas in json_parser['episodes']['contents']: if 'subtitle' in video_datas: video_title = program_title + ' ' + video_datas['title'] + ' ' + video_datas['subtitle'] else: video_title = program_title + ' ' + video_datas['title'] video_image = video_datas['URLImage'] video_plot = video_datas['summary'] video_url = video_datas['URLMedias'] item = Listitem() item.label = video_title item.art['thumb'] = video_image item.info['plot'] = video_plot item.context.script( get_video_url, plugin.localize(LABELS['Download']), item_id=item_id, next_url=video_url, video_label=LABELS[item_id] + ' - ' + item.label, download_mode=True) item.set_callback( get_video_url, item_id=item_id, next_url=video_url, item_dict=cqu.item2dict(item)) yield item