def episode(): xbmc.log("Eposide") url = h.extract_var(args, 'url') xbmc.log("Eposide URL 1 : " + url) currentDisplayCounter = int(param2) if param1 == "old": url = CHANNEL_EPISODE_URL + url + "/" + str(currentDisplayCounter) + "/50/oldest/" else: url = CHANNEL_EPISODE_URL + url + "/" + str(currentDisplayCounter) + "/50/newest/" xbmc.log("Episode URL 2 : " + url) JSONObjs = json.loads(h.make_request(url, cookie_file, cookie_jar)) for JSONObj in JSONObjs: title = JSONObj["video_title"] img_src = JSONObj["video_image"] h.add_dir(addon_handle, base_url, title, JSONObj["slug"], 'show', img_src, img_src) if len(JSONObjs) >= 50 : currentDisplayCounter = currentDisplayCounter + 50 h.add_dir(addon_handle, base_url, 'Next >>', h.extract_var(args, 'url'), 'episode~' + param1 + '~' + str(currentDisplayCounter), img_src, img_src) elif len(JSONObjs) < 50 : currentDisplayCounter = -1
def movie(): url = h.extract_var(args, 'url') title = h.extract_var(args, 'name') thumb = '' resp = h.make_request(url, cookie_file, cookie_jar) soup = BeautifulSoup(resp) div = h.bs_find_with_class(soup, 'div', 'entry-content') video_url = urlresolver.resolve(dict(div.find('iframe').attrs)['src']) if video_url: h.add_dir_video(addon_handle, title, video_url, thumb, '')
def show_movies(): xbmc.log("Function : Show_Movies") url = h.extract_var(args, 'url') name = h.extract_var(args, 'name') JSONObj = json.loads(h.make_request(MOVIE_SHOW_URL + url, cookie_file, cookie_jar)) thumbnail = JSONObj["details"]["listing_image_small"] plot = JSONObj["details"]["seo_description"] h.add_dir_video(addon_handle, JSONObj["details"]["title"], JSONObj["playback_url"], thumbnail, plot)
def show(): xbmc.log("Function : Show") url = h.extract_var(args, 'url') xbmc.log("URL : " + CHANNEL_SHOW_URL + url) name = h.extract_var(args, 'name') JSONObj = json.loads(h.make_request(CHANNEL_SHOW_URL + url, cookie_file, cookie_jar)) thumbnail = JSONObj["listing_image_small"] plot = JSONObj["description"] h.add_dir_video(addon_handle, JSONObj["title"], JSONObj["playback_url"], thumbnail, plot)
def show(): url = h.extract_var(args, 'url') url = '%s%s' % (ZEEMARATHI_REFERRER, url) soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar)) ul = soup.find('ul', {'class': lambda x: x and 'show-videos-list' in x.split()}) for li in ul: div = li.find('div', {'class': lambda x: x and 'video-watch' in x.split()}) episode_url = div.find('a')['href'] name = li.find('div', {'class': 'video-episode'}).text img_src = 'DefaultFolder.png' img = li.find('img') if img: img_src = img['src'] h.add_dir(addon_handle, base_url, name, episode_url, 'episode', img_src, img_src) pager = soup.find('ul', {'class': lambda x: x and 'pager' in x.split()}) if pager: next_link = pager.find('li', {'class': lambda x: x and 'pager-next' in x.split()}) if next_link: next_url = next_link.find('a')['href'] if next_url: h.add_dir(addon_handle, base_url, 'Next >>', next_url, 'show')
def current_shows(): url = h.extract_var(args, 'url') soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar)) # XXX: If want sorted # import operator # shows = {} # shows[a_attrs['href']] = a_attrs['title'] # shows = sorted(shows.items(), key=operator.itemgetter(1)) # XXX: View mode thumbnail supported in xbmcswift2 h2 = soup.findAll('h2') for h2 in soup.findAll('h2'): if h2.text == 'Shows': for li in h2.nextSibling.find('ul').findAll('li'): a = li.find('a') a_attrs = dict(a.attrs) title = '%s (%s)' % ( h.bs_find_with_class(a, 'div', 'zc-show-title').text, h.bs_find_with_class(a, 'div', 'zc-air-time').text) img_src = dict(a.find('img').attrs)['src'] h.add_dir(addon_handle, base_url, title, '%s/video/' % a_attrs['href'], 'show', img_src, img_src) break
def show(): url = h.extract_var(args, 'url') url = '%s%s' % (ZEEMARATHI_REFERRER, url) soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar)) ul = soup.find('ul', {'class': lambda x: x and 'show-videos-list' in x.split()}) for li in ul: div = li.find('div', {'class': lambda x: x and 'video-watch' in x.split()}) episode_url = div.find('a')['href'] name = li.find('div', {'class': 'video-episode'}).text img_src = 'DefaultFolder.png' img = li.find('img') if img: img_src = img['src'] h.add_dir(addon_handle, base_url, name, episode_url, 'episode', img_src, img_src) pager = soup.find('ul', {'class': lambda x: x and 'pager' in x.split()}) if pager: next_link = pager.find( 'li', {'class': lambda x: x and 'pager-next' in x.split()}) if next_link: next_url = next_link.find('a')['href'] if next_url: h.add_dir(addon_handle, base_url, 'Next >>', next_url, 'show')
def todays_show(): url = h.extract_var(args, 'url') soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar)) ul = soup.find('ul', {'class': lambda x: x and 'videos-list' in x.split()}) for li in ul.nextSibling.findAll('li'): episode_url = li.find('a')['href'] title = li.find('a')['title'] img_src = li.find('a').find('img')['src'] h.add_dir(addon_handle, base_url, title, episode_url, 'todays_episode', img_src, img_src)
def todays_episode(): url = h.extract_var(args, 'url') name = h.extract_var(args, 'name') soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar)) div = soup.find('div', {'id': 'block-gec-videos-videopage-videos'}) script = None scripts = div.findAll('script') for s in scripts: if 'babyenjoying' in s.text: script = s break master_m3u8 = script.text.split('babyenjoying = ', 2)[2].split(';')[0][1:-1] plot = soup.find('p', {'itemprop': 'description'}).text thumbnail = soup.find('meta', {'itemprop': 'thumbnailUrl'})['content'] h.add_dir_video(addon_handle, name, master_m3u8, thumbnail, plot)
def episode(): url = h.extract_var(args, 'url') name = h.extract_var(args, 'name') soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar)) div = soup.find('div', {'id': 'block-gec-videos-videopage-videos'}) script = None scripts = div.findAll('script') for s in scripts: if 'babyenjoying' in s.text: script = s break master_m3u8 = script.text.split('babyenjoying = ', 2)[2].split(';')[0][1:-1] plot = soup.find('p', {'itemprop': 'description'}).text thumbnail = soup.find('meta', {'itemprop': 'thumbnailUrl'})['content'] h.add_dir_video(addon_handle, name, master_m3u8, thumbnail, plot)
def archive_shows(): url = h.extract_var(args, 'url') soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar)) h2 = soup.findAll('h2') for h2 in soup.findAll('h2'): if h2.text == 'Archive Shows': for div in h.bs_find_all_with_class(h2.nextSibling, 'div', 'archive-show'): a = div.find('a') a_attrs = dict(a.attrs) h.add_dir(addon_handle, base_url, a_attrs['title'], '%s/video/' % a_attrs['href'], 'show') break
def current_shows(): url = h.extract_var(args, 'url') soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar)) h2 = soup.findAll('h2') for h2 in soup.findAll('h2'): if h2.text == 'Shows': for li in h2.nextSibling.find('ul').findAll('li'): a = li.find('a') a_attrs = dict(a.attrs) title = '%s (%s)' % (h.bs_find_with_class(a, 'div', 'zc-show-title').text, h.bs_find_with_class(a, 'div', 'zc-air-time').text) img_src = dict(a.find('img').attrs)['src'] h.add_dir(addon_handle, base_url, title, '%s/video/' % a_attrs['href'], 'show', img_src, img_src) break
def shows_movies(): xbmc.log("Shows_Movies") url = h.extract_var(args, 'url') xbmc.log("URL : " + url) if mode == "Movies": JSONObjs = json.loads(h.make_request(url, cookie_file, cookie_jar)) else: JSONObjs = json.loads(h.make_request(url, cookie_file, cookie_jar)) for JSONObj in JSONObjs: title = JSONObj["title"] if mode == "Movies": img_src = JSONObj["image_medium"] h.add_dir(addon_handle, base_url, title, JSONObj["slug"], "Show_Movies", img_src, img_src) else: img_src = JSONObj["listing_image_medium"] h.add_dir(addon_handle, base_url, title, JSONObj["slug"], "Show_Music", img_src, img_src) currentDisplayCounter = int(param1) if len(JSONObjs) >= 50 : currentDisplayCounter = currentDisplayCounter + 50 h.add_dir(addon_handle, base_url, 'Next >>', h.extract_var(args, 'url'), mode + '~' + param1 + '~' + str(currentDisplayCounter), img_src, img_src) elif len(JSONObjs) < 50 : currentDisplayCounter = -1
def links(): import urlresolver links_info = json.loads(h.extract_var(args, 'elem_id')) for link in links_info: url = link['url'] print url resp = None try: resp = h.make_request(url, cookie_file, cookie_jar) soup = BeautifulSoup(resp) if len(soup.findChildren()) == 1: meta = soup.find('meta', attrs={'http-equiv': 'refresh'}) if meta: c = dict(meta.attrs)['content'] idx4 = c.find('URL=') if idx4 != -1: _url = c[idx4 + 4:] soup = BeautifulSoup(h.make_request(_url, cookie_file, cookie_jar)) div = soup.find('div', {'id': 'content'}) url = dict(div.find('table').find('iframe').attrs)['src'] else: div = h.bs_find_with_class(soup, 'div', 'entry-content') if div: divs = div.findAll('div', recursive=False) url = dict(divs[1].find('iframe').attrs)['src'] except urllib2.HTTPError as e: # Hack. Avast blocks first url. Only for WatchVideo currently if e.code == 403 and e.msg == 'Malicious content': up = urlparse.urlparse(url) id = urlparse.parse_qs(up.query)['id'][0] f = 0 if up.path == '/idowatch.php': url = 'http://vidfile.xyz/embed-%s-1280x720.html' % id f = 1 elif up.path == '/watchvideo.php': url = 'http://watchvideo2.us/embed-%s-1280x720.html' % id f = 1 elif up.path == '/playu.php': url = 'http://playu.me/embed-%s-1280x720.html' % id f = 1 if f: resp = h.make_request(url, cookie_file, cookie_jar) if resp: video_url = urlresolver.resolve(url) if video_url: h.add_dir_video(addon_handle, link['name'], video_url, '', '')
def forum(): forum_id = h.extract_var(args, 'elem_id') forums = proxy.get_forum(False, forum_id) for forum in forums: h.add_dir(addon_handle, base_url, forum['forum_name'].data, forum['forum_id'], 'forum', thumbnail=forum['logo_url']) if not forums: n = 19 topics = proxy.get_topic(forum_id, 0, n, 'TOP')['topics'] for topic in topics: h.add_dir(addon_handle, base_url, topic['topic_title'].data, topic['topic_id'], 'topic', thumbnail=topic['icon_url']) n -= 1 if n > 0: topics = proxy.get_topic(forum_id, 0, n)['topics'] for topic in topics: h.add_dir(addon_handle, base_url, topic['topic_title'].data, topic['topic_id'], 'topic', thumbnail=topic['icon_url'])
def shows_serials(): xbmc.log("Show_Serials") url = h.extract_var(args, 'url') xbmc.log("URL : " + url) if param1 == "shows": JSONObjs = json.loads(h.make_request(url, cookie_file, cookie_jar)) for JSONObj in JSONObjs: title = JSONObj["title"] img_src = JSONObj["listing_image_small"] h.add_dir(addon_handle, base_url, title, JSONObj["slug"], "episodemenu", img_src, img_src) elif param1 == "music": for Music in MusicLanguages: xbmc.log(Music["URL"]) h.add_dir(addon_handle, base_url, Music["Language"], Music["URL"], "Music~0") else: for Movie in MoviesLanguages: xbmc.log(Movie["URL"]) h.add_dir(addon_handle, base_url, Movie["Language"], Movie["URL"], "Movies~0")
def current_shows(): url = h.extract_var(args, 'url') soup = BeautifulSoup(h.make_request(url, cookie_file, cookie_jar)) h2 = soup.findAll('h2') for h2 in soup.findAll('h2'): if h2.text == 'Shows': for li in h2.nextSibling.find('ul').findAll('li'): a = li.find('a') a_attrs = dict(a.attrs) title = '%s (%s)' % ( h.bs_find_with_class(a, 'div', 'zc-show-title').text, h.bs_find_with_class(a, 'div', 'zc-air-time').text) img_src = dict(a.find('img').attrs)['src'] h.add_dir(addon_handle, base_url, title, '%s/video/' % a_attrs['href'], 'show', img_src, img_src) break
def show_episodes(): xbmc.log("Show Episodes Menu") showname = h.extract_var(args, 'url') v_data = "{\"detailsType\":\"basic\",\"searchSet\":[{\"pageSize\":50,\"pageNumber\":" + param1 + ",\"sortOrder\":\"START_DATE:DESC\",\"type\":\"search\",\"id\":\"Episodes\",\"data\":\"exact=true&all=type:Episodes&all=showname:" + showname + "\"}, {\"pageSize\":50,\"pageNumber\":" + param1 + ",\"sortOrder\":\"START_DATE:DESC\",\"type\":\"search\",\"id\":\"video\",\"data\":\"exact=true&all=type:video&all=showname:" + showname + "\"}],\"deviceDetails\":{\"mfg\":\"Google Chrome\",\"os\":\"others\",\"osVer\":\"XXX\",\"model\":\"Google Chrome\"}}" JSONObjs = json.loads(h.make_request_post(SHOW_EPISODE_URL, v_data, cookie_file, cookie_jar, TOKEN)) for searchSet in JSONObjs: for rows in searchSet["assets"]: title = rows["title"] if rows["releaseDate"] != "": title = title + " (" + str(rows["releaseDate"]) + ")" h.add_dir_video(addon_handle, title, rows["hlsUrl"], img_src, rows["shortDesc"], int(rows["duration"])/1000) currentDisplayCounter = int(param1) if len(JSONObjs[0]["assets"]) >= 50 : currentDisplayCounter = currentDisplayCounter + 1 h.add_dir(addon_handle, base_url, 'Next >>', showname, "episodemenu~" + str(currentDisplayCounter)) elif len(JSONObjs[0]["assets"]) < 50 : currentDisplayCounter = -1
def topic(): topic_id = h.extract_var(args, 'elem_id') posts = proxy.get_thread(topic_id)['posts'] post_content = posts[0]['post_content'].data if '\r\n' in post_content: content = post_content.split('\r\n\r\n') else: content = post_content.split('\n\n') links = [] current_links = [] for c in content: c = c.strip() if c.startswith('['): if c.upper().startswith('[URL'): links_content = c.split('\n') for link_content in links_content: idx1 = link_content.upper().find('[URL=') idx2 = link_content.find(']') url = link_content[idx1 + 5: idx2] if url: if url[0] == '"': url = url[1:] if url[-1] == '"': url = url[:-1] idx3 = link_content.find('[', idx2) name = link_content[idx2 + 1: idx3] current_links.append({'name': name, 'url': url}) else: current_links = [] links.append({'name': c, 'links': current_links}) for link in links: title = link['name'] + ' (links: ' + str(len(link['links'])) + ')' h.add_dir(addon_handle, base_url, title, json.dumps(link['links']), 'links')
def movies_list(): url = h.extract_var(args, 'url') resp = h.make_request(url, cookie_file, cookie_jar) soup = BeautifulSoup(resp) div = h.bs_find_with_class(soup, 'div', 'loop-content') post_divs = h.bs_find_all_with_class(div, 'div', 'item-post') for post_div in post_divs: thumb = dict(post_div.find('img').attrs)['src'] a = h.bs_find_with_class(post_div, 'h2', 'entry-title').find('a') title = a.text u = dict(a.attrs)['href'] h.add_dir(addon_handle, base_url, title, u, 'movie', thumbnail=thumb) div_nav = h.bs_find_with_class(soup, 'div', 'loop-nav') if div_nav: a_next = h.bs_find_with_class(div_nav, 'a', 'nextpostslink') if a_next: u = dict(a_next.attrs)['href'] title = 'Next >> (%s)' % h.bs_find_with_class(div_nav, 'span', 'pages').text h.add_dir(addon_handle, base_url, title, u, 'movies_list')
def shows_serials_menu(): xbmc.log("Show_Serials_Menu") url = h.extract_var(args, 'url') xbmc.log("Serial URL : " + url) h.add_dir(addon_handle, base_url, "Newest", url, 'episode~new~' + str(currentDisplayCounter)) h.add_dir(addon_handle, base_url, "Oldest", url, 'episode~old~' + str(currentDisplayCounter))