def load_videos(lang, url): Episodes = [] src = download_page(url) regex = '<div class=\'txt_emission\'[^>]*><a href=\'([^\']*)\'[^>]*>[^<]*<b>([^<]*)</b>[^<]*</a>[^<]*<div><a [^>]*>([^<]*)</a></div><div id=\'txt_emission_date\'>([^>]*)</div></div></td><td[^>]*><a[^>]*><img src="([^"]*)"[^>]*>' Eps = re.findall(regex, src, flags=re.DOTALL | re.IGNORECASE | re.UNICODE) for Episode in Eps: SEp = {} regex = '([^-]*)-' # Episode's ID Res = re.split(regex, Episode[0]) SEp['id'] = Res[len(Res) - 2] # Episode's title SEp['title'] = Medi1Utils.unescape_page(Episode[1]) # Episode's description SEp['description'] = Episode[2] # Episode's title SEp['date'] = Episode[3] # Episode's link SEp['link'] = Medi1Utils.url_fix("http://www.medi1tv.com/" + lang + "/" + Episode[0]) # Episode's thumbnail SEp['thumbnail'] = Medi1Utils.url_fix(Medi1Utils.direct_thumb_link_large(Episode[4])) Episodes.append(SEp) return Episodes
def get_videos(query, start_index=1): '''Returns a tuple of (videos, total_videos) where videos is a list of dicts containing video information and total_videos is the toal number of videos available for the given query. The number of videos returned is specified by the given count.''' url_ptn = 'http://gdata.youtube.com/feeds/api/videos/?%s' params = { 'q': query, 'author': 'AlJazeeraEnglish', 'alt': 'json', # Ask YT to return JSON 'max-results': '12', 'start-index': str(start_index), 'orderby': 'published', 'prettyprint': 'true', # Makes debugging easier } url = url_ptn % urlencode(params) print url src = download_page(url) resp = json.loads(src) try: videos = resp['feed']['entry'] except: video_infos = [] total_results = 0 return videos_info, total_results video_infos = map(parse_video, videos) total_results = int(resp['feed']['openSearch$totalResults']['$t']) return video_infos, total_results
def Categories(): url = API_BASE + 'categories?lang=eng' src = download_page(url) categories = json.loads(src) for cat in categories['items'][0]['child_categories']: addDir(cat['title'],'search?lang=eng&category='+cat['id'],7,icon,fanart,'')
def Language(): url = API_BASE + 'languages?lang=eng' src = download_page(url) languages = json.loads(src) for lang in languages: addDir(lang['name'],'search?lang=eng&audio_language='+lang['iso_639_3'],7,icon,fanart,'')
def get_videos(count, list_id, start_index): '''Returns a tuple of (videos, total_videos) where videos is a list of dicts containing video information and total_videos is the toal number of videos available for the given list_id. The number of videos returned is specified by the given count. This function queris the gdata youtube API. The AlJazeera website uses the same API on the client side via javascript.''' params = { 'v': '2', 'author': 'AlJazeeraEnglish', 'alt': 'json', 'max-results': count, 'start-index': start_index, 'prettyprint': 'true', 'orderby': 'updated', } url_ptn = 'http://gdata.youtube.com/feeds/api/videos/-/%s?%s' url = url_ptn % (list_id, urlencode(params)) src = download_page(url) resp = json.loads(src) videos = resp['feed']['entry'] video_infos = map(parse_video, videos) total_results = resp['feed']['openSearch$totalResults']['$t'] return video_infos, total_results
def Sections(): url = API_BASE + 'sections?lang=eng' src = download_page(url) sections = json.loads(src) for sect in sections['items']: addDir(sect['name'],'search?lang=eng§ion[]='+sect['slug'],7,icon,fanart,'')
def get_videos(query, start_index = 1): '''Returns a tuple of (videos, total_videos) where videos is a list of dicts containing video information and total_videos is the toal number of videos available for the given query. The number of videos returned is specified by the given count.''' url_ptn = 'http://gdata.youtube.com/feeds/api/videos/?%s' params = { 'q': query, 'author': 'AlJazeeraEnglish', 'alt': 'json', # Ask YT to return JSON 'max-results': '12', 'start-index': str(start_index), 'orderby': 'published', 'prettyprint': 'true', # Makes debugging easier } url = url_ptn % urlencode(params) print url src = download_page(url) resp = json.loads(src) try: videos = resp['feed']['entry'] except: video_infos = [] total_results = 0 return videos_info, total_results video_infos = map(parse_video, videos) total_results = int(resp['feed']['openSearch$totalResults']['$t']) return video_infos, total_results
def show_programs(): '''Shows categories available for either Clips or Programs on the aljazeera video page. ''' url = full_url('video') src = download_page(url) # Fix shitty HTML so BeautifulSoup doesn't break src = src.replace('id"adSpacer"', 'id="adSpacer"') html = BS(src) tds = html.findAll('td', { 'id': re.compile('^mItem_'), 'onclick': re.compile(r"""SelectProgInfo(?!\('Selected'\))""") # programs # 'onclick': re.compile(r"""SelectProgInfo\('Selected'\)""") # news clips }) items = [] for td in tds: query = td.string items.append({ 'label': td.string, 'url': plugin.url_for('show_videos', query = query, start_index = '1') }) # TODO: Add images return plugin.add_items(items)
def show_programs(): '''Shows categories available for either Clips or Programs on the aljazeera video page. ''' url = full_url('video') src = download_page(url) # Fix shitty HTML so BeautifulSoup doesn't break src = src.replace('id"adSpacer"', 'id="adSpacer"') html = BS(src) tds = html.findAll( 'td', { 'id': re.compile('^mItem_'), 'onclick': re.compile( r"""SelectProgInfo(?!\('Selected'\))""") # programs # 'onclick': re.compile(r"""SelectProgInfo\('Selected'\)""") # news clips }) items = [] for td in tds: query = td.string items.append({ 'label': td.string, 'url': plugin.url_for('show_videos', query=query, start_index='1') }) # TODO: Add images return plugin.add_items(items)
def load_shows(lang): url = 'http://www.medi1tv.com/' + lang + '/emission.aspx' src = download_page(url) Shows = [] regex = '<div id="imgcurrentinfos_corps"[^>]*><a [^\(]*\([^\(]*\(([\d]*),[\d]*,\'([^\']*)\'[^>]*><img src="([^"]*?)"[^>]*?></a></div>[^<]*<div id="currentinfos_corps_bloc"[^<]*<div id="currentinfos_corps_titre"><b>([^<]*)</b></div><div id="currentinfos_corps_resume"[^>]*>(.*?)</div>' SShows = re.findall(regex, src, flags=re.DOTALL | re.IGNORECASE | re.UNICODE) for Show in SShows: SShow = {} # Show's ID SShow['id'] = Show[0] # Show's JS name (for program url generation) SShow['jsname'] = Show[1] # Show's Thumbnail SShow['thumbnail'] = Medi1Utils.url_fix(Medi1Utils.direct_thumb_link(Show[2])) # Show's title SShow['title'] = Medi1Utils.unescape_page(Show[3]) # Show's resume SShow['resume'] = Medi1Utils.clear_html_tags(Show[4]) # Show's link (Show[0] has the ID of the javascript calling parameter) SShow['link'] = Medi1Utils.url_fix(Medi1Shows.generate_link_show(Show[0], Show[1], lang)) Shows.append(SShow) # print "Len Total :" + str(len(self.__Shows)) return Shows
def doAuth(username, password): dict_name_value_pairs = {"u": username, "p": password, "x": "1"} url = 'https://www.bhtelecom.ba/index.php?id=6905&' + urllib.urlencode( dict_name_value_pairs) src = download_page(url) resp = json.loads(src) auth = resp['auth'] # xbmc.log(resp) return auth
def get_recordings(id): url = 'http://195.222.33.193/epg/' + id #xbmcgui.Dialog().ok("EPG URL", url) src = download_page(url) resp = json.loads(src) recs = resp['feed'] video_infos = map(parse_recs, recs) total_results = resp return video_infos, total_results
def get_recordings(id): url = 'http://webtv.bhtelecom.ba/epg/'+id #xbmcgui.Dialog().ok("EPG URL", url) src = download_page(url) resp = json.loads(src) recs = resp['feed'] video_infos = map(parse_recs, recs) total_results = resp return video_infos, total_results
def doAuth(username, password): dict_name_value_pairs = { "u" : username, "p" : password, "x" : "1" } url = 'https://www.bhtelecom.ba/index.php?id=6905&'+ urllib.urlencode(dict_name_value_pairs) src = download_page(url) resp = json.loads(src) auth = resp['auth'] # xbmc.log(resp) return auth
def Series(url): url = API_BASE + url src = download_page(url) series = json.loads(src) for a in series['items']: name = a['title'] url = a['link'] icon = a['images'][2]['link'] thumb = a['images'][1]['link'] banner = a['images'][3]['link'] fanart = a['images'][0]['link'] description = a['description'] addDir3(name,url,3,icon,fanart,thumb,banner,series['count'])
def Movies(url): url = API_BASE + url src = download_page(url) movies = json.loads(src) for movie in movies['items']: name = movie['title'] url = '' icon = movie['images'][2]['link'] thumb = movie['images'][1]['link'] banner = movie['images'][3]['link'] fanart = movie['images'][0]['link'] description = movie['description'] addDir(name,url,3,icon,fanart,description)
def Videos(url): url = API_BASE + url src = download_page(url) series = json.loads(src) for a in series['items']: name = a['title'] url = a['link'] icon = a['images'][2]['link'] thumb = a['images'][1]['link'] banner = a['images'][3]['link'] fanart = a['images'][0]['link'] description = a['description'] addDir(name,url,3,thumb,fanart,description) setView('videos', 'WideList')
def show_categories3(onclick_func, clips=False): '''Shows categories available for either Clips or Programs on the aljazeera video page. ''' url = full_url('video') src = download_page(url) # Fix shitty HTML so BeautifulSoup doesn't break src = src.replace('id"adSpacer"', 'id="adSpacer"') html = BS(src) tds = html.findAll('td', { 'id': re.compile('^mItem_'), 'onclick': onclick_func, }) items = [] # The first link for the 'Clips' section links directly to a video so we # must handle it differently. if clips: videos, total_results = get_videos('1', 'vod', '1') video = videos[0] items.append({ 'label': video['title'], 'thumbnail': video['thumbnail'], 'info': { 'plot': video['summary'], }, 'url': youtube_url(video['videoid']), 'is_folder': False, 'is_playable': True, }) tds = tds[1:] for td in tds: count, list_id, start_index, method = parse_queryvideo_args( td['onclick']) items.append({ 'label': td.string, 'url': plugin.url_for('show_videos', count=count, list_id=list_id, start_index=start_index), }) return plugin.add_items(items)
def play(url): plugin_url = resolve(download_page(url)) if plugin_url: return plugin.set_resolved_url(plugin_url) # Uh oh, things aren't working. Print the broken url to the log and ask if # we can submit the url to a google form. current_plugin_url = '?'.join([plugin._argv0, plugin._argv2]) xbmc.log('REPORT THIS URL: %s' % current_plugin_url) dialog = xbmcgui.Dialog() user_resp = dialog.yesno('Documentary Heaven Playback Problem.', 'There was an issue playing this video.', ('Would you like to report the URL to the' ' developer?')) if user_resp: report_broken_url(current_plugin_url)
def show_categories3(onclick_func, clips=False): '''Shows categories available for either Clips or Programs on the aljazeera video page. ''' url = full_url('video') src = download_page(url) # Fix shitty HTML so BeautifulSoup doesn't break src = src.replace('id"adSpacer"', 'id="adSpacer"') html = BS(src) tds = html.findAll('td', { 'id': re.compile('^mItem_'), 'onclick': onclick_func, }) items = [] # The first link for the 'Clips' section links directly to a video so we # must handle it differently. if clips: videos, total_results = get_videos('1', 'vod', '1') video = videos[0] items.append({ 'label': video['title'], 'thumbnail': video['thumbnail'], 'info': {'plot': video['summary'], }, 'url': youtube_url(video['videoid']), 'is_folder': False, 'is_playable': True, }) tds = tds[1:] for td in tds: count, list_id, start_index, method = parse_queryvideo_args(td['onclick']) items.append({ 'label': td.string, 'url': plugin.url_for('show_videos', count=count, list_id=list_id, start_index=start_index), }) return plugin.add_items(items)
def get_videos(list_id): '''Returns a tuple of (videos, total_videos) where videos is a list of dicts containing video information and total_videos is the toal number of videos available for the given list_id. The number of videos returned is specified by the given count.''' if list_id == 'sd': url = 'http://195.222.33.193/channels' if list_id == 'cam': url = 'http://195.222.33.193/channels_cat_4' if list_id == 'radio': url = 'http://195.222.33.193/channels_cat_11' if list_id == 'rec': url = 'http://195.222.33.193/channels_rec' src = download_page(url) resp = json.loads(src) videos = resp['feed'] video_infos = map(parse_video, videos) total_results = resp return video_infos, total_results
def get_videos(list_id): '''Returns a tuple of (videos, total_videos) where videos is a list of dicts containing video information and total_videos is the toal number of videos available for the given list_id. The number of videos returned is specified by the given count.''' if list_id == 'sd': url = 'http://webtv.bhtelecom.ba/channels' if list_id == 'cam': url = 'http://webtv.bhtelecom.ba/channels_cat_4' if list_id == 'radio': url = 'http://webtv.bhtelecom.ba/channels_cat_11' if list_id == 'rec': url = 'http://webtv.bhtelecom.ba/channels_rec' src = download_page(url) resp = json.loads(src) videos = resp['feed'] video_infos = map(parse_video, videos) total_results = resp return video_infos, total_results
def get_streams(): url = 'http://radioma.ma/radiomaxbmc/radios.json' src = download_page(url) resp = json.loads(src) #Return a JSON list of the streams return resp
def htmlify(url): return BS(download_page(url))
def episode_stream(idShow, idEpisode, lang='ar'): src = download_page(ShowEpisodes.generate_link_episode_randtitle(idShow, idEpisode, lang)) regex = '&file=([^&]*)' playpath = re.findall(regex, src, flags=re.DOTALL | re.IGNORECASE | re.UNICODE)[0] rtmpurl = 'rtmp://41.248.240.209:80/vod swfUrl=http://www.medi1tv.com/ar/player.swf playpath=' + playpath return rtmpurl