def links(): import urlresolver links_info = json.loads(h.extract_var(args, 'elem_id')) for link in links_info: url = link['url'] print url resp = None try: resp = h.make_request(url, cookie_file, cookie_jar) soup = BeautifulSoup(resp) if len(soup.findChildren()) == 1: meta = soup.find('meta', attrs={'http-equiv': 'refresh'}) if meta: c = dict(meta.attrs)['content'] idx4 = c.find('URL=') if idx4 != -1: _url = c[idx4 + 4:] soup = BeautifulSoup(h.make_request(_url, cookie_file, cookie_jar)) div = soup.find('div', {'id': 'content'}) url = dict(div.find('table').find('iframe').attrs)['src'] else: div = h.bs_find_with_class(soup, 'div', 'entry-content') if div: divs = div.findAll('div', recursive=False) url = dict(divs[1].find('iframe').attrs)['src'] except urllib2.HTTPError as e: # Hack. Avast blocks first url. Only for WatchVideo currently if e.code == 403 and e.msg == 'Malicious content': up = urlparse.urlparse(url) id = urlparse.parse_qs(up.query)['id'][0] f = 0 if up.path == '/idowatch.php': url = 'http://vidfile.xyz/embed-%s-1280x720.html' % id f = 1 elif up.path == '/watchvideo.php': url = 'http://watchvideo2.us/embed-%s-1280x720.html' % id f = 1 elif up.path == '/playu.php': url = 'http://playu.me/embed-%s-1280x720.html' % id f = 1 if f: resp = h.make_request(url, cookie_file, cookie_jar) if resp: video_url = urlresolver.resolve(url) if video_url: h.add_dir_video(addon_handle, link['name'], video_url, '', '')
def show(): url = h.extract_var(args, 'url') url = '%s%s' % (ZEEMARATHI_REFERRER, url) soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar)) ul = soup.find('ul', {'class': lambda x: x and 'show-videos-list' in x.split()}) for li in ul: div = li.find('div', {'class': lambda x: x and 'video-watch' in x.split()}) episode_url = div.find('a')['href'] name = li.find('div', {'class': 'video-episode'}).text img_src = 'DefaultFolder.png' img = li.find('img') if img: img_src = img['src'] h.add_dir(addon_handle, base_url, name, episode_url, 'episode', img_src, img_src) pager = soup.find('ul', {'class': lambda x: x and 'pager' in x.split()}) if pager: next_link = pager.find('li', {'class': lambda x: x and 'pager-next' in x.split()}) if next_link: next_url = next_link.find('a')['href'] if next_url: h.add_dir(addon_handle, base_url, 'Next >>', next_url, 'show')
def episode(): xbmc.log("Eposide") url = h.extract_var(args, 'url') xbmc.log("Eposide URL 1 : " + url) currentDisplayCounter = int(param2) if param1 == "old": url = CHANNEL_EPISODE_URL + url + "/" + str(currentDisplayCounter) + "/50/oldest/" else: url = CHANNEL_EPISODE_URL + url + "/" + str(currentDisplayCounter) + "/50/newest/" xbmc.log("Episode URL 2 : " + url) JSONObjs = json.loads(h.make_request(url, cookie_file, cookie_jar)) for JSONObj in JSONObjs: title = JSONObj["video_title"] img_src = JSONObj["video_image"] h.add_dir(addon_handle, base_url, title, JSONObj["slug"], 'show', img_src, img_src) if len(JSONObjs) >= 50 : currentDisplayCounter = currentDisplayCounter + 50 h.add_dir(addon_handle, base_url, 'Next >>', h.extract_var(args, 'url'), 'episode~' + param1 + '~' + str(currentDisplayCounter), img_src, img_src) elif len(JSONObjs) < 50 : currentDisplayCounter = -1
def current_shows(): url = h.extract_var(args, 'url') soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar)) # XXX: If want sorted # import operator # shows = {} # shows[a_attrs['href']] = a_attrs['title'] # shows = sorted(shows.items(), key=operator.itemgetter(1)) # XXX: View mode thumbnail supported in xbmcswift2 h2 = soup.findAll('h2') for h2 in soup.findAll('h2'): if h2.text == 'Shows': for li in h2.nextSibling.find('ul').findAll('li'): a = li.find('a') a_attrs = dict(a.attrs) title = '%s (%s)' % ( h.bs_find_with_class(a, 'div', 'zc-show-title').text, h.bs_find_with_class(a, 'div', 'zc-air-time').text) img_src = dict(a.find('img').attrs)['src'] h.add_dir(addon_handle, base_url, title, '%s/video/' % a_attrs['href'], 'show', img_src, img_src) break
def show(): url = h.extract_var(args, 'url') url = '%s%s' % (ZEEMARATHI_REFERRER, url) soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar)) ul = soup.find('ul', {'class': lambda x: x and 'show-videos-list' in x.split()}) for li in ul: div = li.find('div', {'class': lambda x: x and 'video-watch' in x.split()}) episode_url = div.find('a')['href'] name = li.find('div', {'class': 'video-episode'}).text img_src = 'DefaultFolder.png' img = li.find('img') if img: img_src = img['src'] h.add_dir(addon_handle, base_url, name, episode_url, 'episode', img_src, img_src) pager = soup.find('ul', {'class': lambda x: x and 'pager' in x.split()}) if pager: next_link = pager.find( 'li', {'class': lambda x: x and 'pager-next' in x.split()}) if next_link: next_url = next_link.find('a')['href'] if next_url: h.add_dir(addon_handle, base_url, 'Next >>', next_url, 'show')
def todays_show(): url = h.extract_var(args, 'url') soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar)) ul = soup.find('ul', {'class': lambda x: x and 'videos-list' in x.split()}) for li in ul.nextSibling.findAll('li'): episode_url = li.find('a')['href'] title = li.find('a')['title'] img_src = li.find('a').find('img')['src'] h.add_dir(addon_handle, base_url, title, episode_url, 'todays_episode', img_src, img_src)
def show_movies(): xbmc.log("Function : Show_Movies") url = h.extract_var(args, 'url') name = h.extract_var(args, 'name') JSONObj = json.loads(h.make_request(MOVIE_SHOW_URL + url, cookie_file, cookie_jar)) thumbnail = JSONObj["details"]["listing_image_small"] plot = JSONObj["details"]["seo_description"] h.add_dir_video(addon_handle, JSONObj["details"]["title"], JSONObj["playback_url"], thumbnail, plot)
def movie(): url = h.extract_var(args, 'url') title = h.extract_var(args, 'name') thumb = '' resp = h.make_request(url, cookie_file, cookie_jar) soup = BeautifulSoup(resp) div = h.bs_find_with_class(soup, 'div', 'entry-content') video_url = urlresolver.resolve(dict(div.find('iframe').attrs)['src']) if video_url: h.add_dir_video(addon_handle, title, video_url, thumb, '')
def show(): xbmc.log("Function : Show") url = h.extract_var(args, 'url') xbmc.log("URL : " + CHANNEL_SHOW_URL + url) name = h.extract_var(args, 'name') JSONObj = json.loads(h.make_request(CHANNEL_SHOW_URL + url, cookie_file, cookie_jar)) thumbnail = JSONObj["listing_image_small"] plot = JSONObj["description"] h.add_dir_video(addon_handle, JSONObj["title"], JSONObj["playback_url"], thumbnail, plot)
def archive_shows(): url = h.extract_var(args, 'url') soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar)) h2 = soup.findAll('h2') for h2 in soup.findAll('h2'): if h2.text == 'Archive Shows': for div in h.bs_find_all_with_class(h2.nextSibling, 'div', 'archive-show'): a = div.find('a') a_attrs = dict(a.attrs) h.add_dir(addon_handle, base_url, a_attrs['title'], '%s/video/' % a_attrs['href'], 'show') break
def shows_movies(): xbmc.log("Shows_Movies") url = h.extract_var(args, 'url') xbmc.log("URL : " + url) if mode == "Movies": JSONObjs = json.loads(h.make_request(url, cookie_file, cookie_jar)) else: JSONObjs = json.loads(h.make_request(url, cookie_file, cookie_jar)) for JSONObj in JSONObjs: title = JSONObj["title"] if mode == "Movies": img_src = JSONObj["image_medium"] h.add_dir(addon_handle, base_url, title, JSONObj["slug"], "Show_Movies", img_src, img_src) else: img_src = JSONObj["listing_image_medium"] h.add_dir(addon_handle, base_url, title, JSONObj["slug"], "Show_Music", img_src, img_src) currentDisplayCounter = int(param1) if len(JSONObjs) >= 50 : currentDisplayCounter = currentDisplayCounter + 50 h.add_dir(addon_handle, base_url, 'Next >>', h.extract_var(args, 'url'), mode + '~' + param1 + '~' + str(currentDisplayCounter), img_src, img_src) elif len(JSONObjs) < 50 : currentDisplayCounter = -1
def current_shows(): url = h.extract_var(args, 'url') soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar)) h2 = soup.findAll('h2') for h2 in soup.findAll('h2'): if h2.text == 'Shows': for li in h2.nextSibling.find('ul').findAll('li'): a = li.find('a') a_attrs = dict(a.attrs) title = '%s (%s)' % (h.bs_find_with_class(a, 'div', 'zc-show-title').text, h.bs_find_with_class(a, 'div', 'zc-air-time').text) img_src = dict(a.find('img').attrs)['src'] h.add_dir(addon_handle, base_url, title, '%s/video/' % a_attrs['href'], 'show', img_src, img_src) break
def shows_serials(): xbmc.log("Show_Serials") url = h.extract_var(args, 'url') xbmc.log("URL : " + url) if param1 == "shows": JSONObjs = json.loads(h.make_request(url, cookie_file, cookie_jar)) for JSONObj in JSONObjs: title = JSONObj["title"] img_src = JSONObj["listing_image_small"] h.add_dir(addon_handle, base_url, title, JSONObj["slug"], "episodemenu", img_src, img_src) elif param1 == "music": for Music in MusicLanguages: xbmc.log(Music["URL"]) h.add_dir(addon_handle, base_url, Music["Language"], Music["URL"], "Music~0") else: for Movie in MoviesLanguages: xbmc.log(Movie["URL"]) h.add_dir(addon_handle, base_url, Movie["Language"], Movie["URL"], "Movies~0")
def current_shows(): url = h.extract_var(args, 'url') soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar)) h2 = soup.findAll('h2') for h2 in soup.findAll('h2'): if h2.text == 'Shows': for li in h2.nextSibling.find('ul').findAll('li'): a = li.find('a') a_attrs = dict(a.attrs) title = '%s (%s)' % ( h.bs_find_with_class(a, 'div', 'zc-show-title').text, h.bs_find_with_class(a, 'div', 'zc-air-time').text) img_src = dict(a.find('img').attrs)['src'] h.add_dir(addon_handle, base_url, title, '%s/video/' % a_attrs['href'], 'show', img_src, img_src) break
def todays_episode(): url = h.extract_var(args, 'url') name = h.extract_var(args, 'name') soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar)) div = soup.find('div', {'id': 'block-gec-videos-videopage-videos'}) script = None scripts = div.findAll('script') for s in scripts: if 'babyenjoying' in s.text: script = s break master_m3u8 = script.text.split('babyenjoying = ', 2)[2].split(';')[0][1:-1] plot = soup.find('p', {'itemprop': 'description'}).text thumbnail = soup.find('meta', {'itemprop': 'thumbnailUrl'})['content'] h.add_dir_video(addon_handle, name, master_m3u8, thumbnail, plot)
def episode(): url = h.extract_var(args, 'url') name = h.extract_var(args, 'name') soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar)) div = soup.find('div', {'id': 'block-gec-videos-videopage-videos'}) script = None scripts = div.findAll('script') for s in scripts: if 'babyenjoying' in s.text: script = s break master_m3u8 = script.text.split('babyenjoying = ', 2)[2].split(';')[0][1:-1] plot = soup.find('p', {'itemprop': 'description'}).text thumbnail = soup.find('meta', {'itemprop': 'thumbnailUrl'})['content'] h.add_dir_video(addon_handle, name, master_m3u8, thumbnail, plot)
def movies_list(): url = h.extract_var(args, 'url') resp = h.make_request(url, cookie_file, cookie_jar) soup = BeautifulSoup(resp) div = h.bs_find_with_class(soup, 'div', 'loop-content') post_divs = h.bs_find_all_with_class(div, 'div', 'item-post') for post_div in post_divs: thumb = dict(post_div.find('img').attrs)['src'] a = h.bs_find_with_class(post_div, 'h2', 'entry-title').find('a') title = a.text u = dict(a.attrs)['href'] h.add_dir(addon_handle, base_url, title, u, 'movie', thumbnail=thumb) div_nav = h.bs_find_with_class(soup, 'div', 'loop-nav') if div_nav: a_next = h.bs_find_with_class(div_nav, 'a', 'nextpostslink') if a_next: u = dict(a_next.attrs)['href'] title = 'Next >> (%s)' % h.bs_find_with_class(div_nav, 'span', 'pages').text h.add_dir(addon_handle, base_url, title, u, 'movies_list')
def topic1(): videos = [] try: idx = content.index('Flash Player 720p HD Quality Online Links') if idx != -1: links_content = content[idx + 1].split('\n') print links_content for link_content in links_content: idx1 = link_content.find('[URL=') idx2 = link_content.find(']') url = link_content[idx1 + 5: idx2] idx3 = link_content.find('[', idx2) name = link_content[idx2 + 1: idx3] print name, url soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar)) json_url = dict(soup.find('script', {'src': '//cdn.playwire.com/bolt/js/embed.min.js'}).attrs)['data-config'] json_data = json.loads(h.make_request(json_url, cookie_file, cookie_jar)) poster = json_data['poster'] src = json_data['src'] soup = BeautifulSoup(h.make_request(src, cookie_file, cookie_jar)) base_url = soup.find('baseurl').text media_node = soup.find('media') media_url = dict(media_node.attrs)['url'] video_url = '%s/%s' % (base_url, media_url) videos.append({'url': video_url, 'thumbnail': poster, 'name': name}) h.add_dir_video(addon_handle, name, video_url, poster, '') except Exception as e: videos = [] links_content = '' print 'Flash Player', e print videos if not videos: try: idx = content.index('Letwatch 720p HD Quality Online Links') if idx != -1: links_content = content[idx + 1].split('\n') print links_content for link_content in links_content: idx1 = link_content.find('[URL=') idx2 = link_content.find(']') url = link_content[idx1 + 5: idx2] idx3 = link_content.find('[', idx2) name = link_content[idx2 + 1: idx3] print name, url soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar)) div = h.bs_find_with_class(soup, 'div', 'entry-content') divs = div.findAll('div', recursive=False) src = dict(divs[1].find('iframe').attrs)['src'] print src soup = BeautifulSoup(h.make_request(src, cookie_file, cookie_jar)) scripts = soup.findAll('script') rgx = re.compile(ur'.*}\(\'(.*)\',([0-9]+),([0-9]+),\'(.*)\'\.split.*') for script in scripts: if script.text.startswith('eval'): groups = re.search(rgx, script.text).groups() p = groups[0] base = int(groups[1]) c = int(groups[2]) k = groups[3].split('|') for x in reversed(xrange(0, c)): if k[x]: p = re.sub(r'\b%s\b' % h.int2base(x, base), k[x], p) arr = p.split(';') data_str = arr[0][26:-1] data = demjson.decode(data_str.replace("\\", "")) video_url = '' video_type = '' for source in data['sources']: if not video_url: video_url = source['file'] video_type = source['label'] else: if source['label'] == 'HD': video_url = source['file'] video_type = source['label'] print video_type, video_url poster = '' videos.append({'url': video_url, 'thumbnail': poster, 'name': name}) h.add_dir_video(addon_handle, name, video_url, poster, '') except Exception as e: videos = [] print 'LetWatch 720', e print videos if not videos: try: idx = content.index('Vidto Link') if idx != -1: links_content = content[idx + 1].split('\n') print links_content for link_content in links_content: idx1 = link_content.find('[URL=') idx2 = link_content.find(']') url = link_content[idx1 + 5: idx2] idx3 = link_content.find('[', idx2) name = link_content[idx2 + 1: idx3] print name, url soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar)) if len(soup.findChildren()) == 1: meta = soup.find('meta', attrs={'http-equiv': 'refresh'}) if meta: c = dict(meta.attrs)['content'] idx4 = c.find('URL=') if idx4 != -1: url = c[idx4 + 4:] print url soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar)) div = soup.find('div', {'id': 'content'}) src = dict(div.find('table').find('iframe').attrs)['src'] print src soup = BeautifulSoup(h.make_request(src, cookie_file, cookie_jar)) div = soup.find('body').find("div", {"id": "player_code"}) script = None scripts = div.findAll('script') rgx = re.compile(ur'.*}\(\'(.*)\',([0-9]+),([0-9]+),\'(.*)\'\.split.*') for s in scripts: if s.text.startswith('eval'): script = s break groups = re.search(rgx, script.text).groups() p = groups[0] base = int(groups[1]) c = int(groups[2]) k = groups[3].split('|') for x in reversed(xrange(0, c)): if k[x]: p = re.sub(r'\b%s\b' % h.int2base(x, base), k[x], p) idx5 = p.find('hd:[') idx6 = p.find(']') q = p[idx5 + 3:idx6 + 1] j = demjson.decode(q) print j video_url = '' size = 0 for _j in j: s = _j['label'][:-1] if s > size: size = s video_url = _j['file'] print video_url if video_url: poster = '' videos.append({'url': video_url, 'thumbnail': poster, 'name': name}) h.add_dir_video(addon_handle, name, video_url, poster, '') return except Exception as e: videos = [] print 'Vidto', e print videos if not videos: try: idx = content.index('Watchvideo Link') if idx != -1: links_content = content[idx + 1].split('\n') print links_content for link_content in links_content: idx1 = link_content.find('[URL=') idx2 = link_content.find(']') url = link_content[idx1 + 5: idx2] idx3 = link_content.find('[', idx2) name = link_content[idx2 + 1: idx3] print name, url soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar)) if len(soup.findChildren()) == 1: meta = soup.find('meta', attrs={'http-equiv': 'refresh'}) if meta: c = dict(meta.attrs)['content'] idx4 = c.find('URL=') if idx4 != -1: url = c[idx4 + 4:] print url soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar)) div = soup.find('div', {'id': 'content'}) src = dict(div.find('table').find('iframe').attrs)['src'] print src soup = BeautifulSoup(h.make_request(src, cookie_file, cookie_jar)) script = soup.find('body').find('script', recursive=False) idx5 = script.text.find('sources: ') idx6 = script.text.find(']') j = demjson.decode(script.text[idx5 + 9:idx6 + 1]) video_url = '' for _j in j: if 'label' not in _j: video_url = _j['file'] if video_url: poster = '' videos.append({'url': video_url, 'thumbnail': poster, 'name': name}) h.add_dir_video(addon_handle, name, video_url, poster, '') except Exception as e: videos = [] print 'Watchvideo', e print videos if not videos: try: idx = content.index('Letwatch Link') if idx != -1: links_content = content[idx + 1].split('\n') print links_content for link_content in links_content: idx1 = link_content.find('[URL=') idx2 = link_content.find(']') url = link_content[idx1 + 5: idx2] idx3 = link_content.find('[', idx2) name = link_content[idx2 + 1: idx3] print name, url soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar)) if len(soup.findChildren()) == 1: meta = soup.find('meta', attrs={'http-equiv': 'refresh'}) if meta: c = dict(meta.attrs)['content'] idx4 = c.find('URL=') if idx4 != -1: url = c[idx4 + 4:] print url soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar)) div = soup.find('div', {'id': 'content'}) src = dict(div.find('table').find('iframe').attrs)['src'] print src soup = BeautifulSoup(h.make_request(src, cookie_file, cookie_jar)) scripts = soup.findAll('script') rgx = re.compile(ur'.*}\(\'(.*)\',([0-9]+),([0-9]+),\'(.*)\'\.split.*') for script in scripts: if script.text.startswith('eval'): groups = re.search(rgx, script.text).groups() p = groups[0] base = int(groups[1]) c = int(groups[2]) k = groups[3].split('|') for x in reversed(xrange(0, c)): if k[x]: p = re.sub(r'\b%s\b' % h.int2base(x, base), k[x], p) arr = p.split(';') data_str = arr[0][26:-1] data = demjson.decode(data_str.replace("\\", "")) video_url = '' video_type = '' for source in data['sources']: if not video_url: video_url = source['file'] video_type = source['label'] else: if source['label'] == 'HD': video_url = source['file'] video_type = source['label'] print video_type, video_url poster = '' videos.append({'url': video_url, 'thumbnail': poster, 'name': name}) h.add_dir_video(addon_handle, name, video_url, poster, '') except Exception as e: videos = [] print 'LetWatch', e print videos if not videos: try: idx = content.index('Watch Online - Flash') if idx != -1: links_content = content[idx + 1].split('\n') print links_content for link_content in links_content: idx1 = link_content.find('[URL=') idx2 = link_content.find(']') url = link_content[idx1 + 5: idx2] idx3 = link_content.find('[', idx2) name = link_content[idx2 + 1: idx3] print name, url soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar)) script = soup.find('script', {'src': '//cdn.playwire.com/bolt/js/zeus/embed.js'}) if script: data_config = dict(script.attrs)['data-config'] json_info = json.loads(h.make_request(data_config, cookie_file, cookie_jar)) poster = json_info['content']['poster'] f4m = json_info['content']['media']['f4m'] soup = BeautifulSoup(h.make_request(f4m, cookie_file, cookie_jar)) base_url = soup.find('baseurl').text media_url = None bitrate = 0 medias = soup.findAll('media') for m in medias: attrs = dict(m.attrs) br = int(attrs['bitrate']) if br > bitrate: media_url = attrs['url'] bitrate = br video_url = '%s/%s' % (base_url, media_url) videos.append({'url': video_url, 'thumbnail': poster, 'name': name}) h.add_dir_video(addon_handle, name, video_url, poster, '') except Exception as e: videos = [] print 'Watch Online - Flash', e print videos if not videos: import pprint pprint.pprint(content)