def trakt_season(slug): __builtin__.content_type = "episodes" splitted = slug.replace("trakt_id", "").split(",") trakt_id = splitted[0] season = splitted[1] year = splitted[2] tvtitle = ",".join(splitted[3:-2]) tmdb = splitted[-2] imdb = splitted[-1] url = "https://api.trakt.tv/shows/%s/seasons/%s?extended=full" url = url % (trakt_id, season) headers = { 'Content-Type': 'application/json', 'trakt-api-version': '2', 'trakt-api-key': TRAKT_API_KEY } xml, __builtin__.content_type = fetch_from_db(url) or (None, None) if not xml: __builtin__.content_type = "episodes" xml = "" response = requests.get(url, headers=headers).json() if type(response) == list: for item in response: xml += get_episode_xml(item, trakt_id, year, tvtitle, tmdb, imdb) xml = remove_non_ascii(xml) save_to_db((xml, __builtin__.content_type), url) jenlist = JenList(xml) display_list(jenlist.get_list(), __builtin__.content_type)
def open_table(): xml = "" at = Airtable('appJ1nGNe5G1za9fg', 'NHL', api_key='keyikW1exArRfNAWj') match = at.get_all(maxRecords=700, view='Grid view') for field in match: try: res = field['fields'] name = res['Name'] name = remove_non_ascii(name) thumbnail = res['thumbnail'] fanart = res['fanart'] link1 = res['link1'] link2 = res['link2'] link3 = res['link3'] time = res['Time'] dsp = time + " - " + name xml += "<item>"\ "<title>[COLOR darkmagenta]%s[/COLOR]</title>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "</link>"\ "</item>" % (dsp,thumbnail,fanart,link1,link2,link3) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def showing(url): xml = '' string = url.split() TheXml,TheCode = string[0],string[1] TheCode = TheCode.replace("get/","") TheCode = base64.b64decode(TheCode) input = '' keyboard = xbmc.Keyboard(input, '[COLOR red]So Your Wanting The Naughty Bits Are You ?? Get The Tissues At The Ready[/COLOR]') keyboard.doModal() if keyboard.isConfirmed(): input = keyboard.getText() if input == TheCode: listhtml = getHtml(TheXml) match = re.compile( '([^"]+)', re.IGNORECASE | re.DOTALL).findall(listhtml) for xmlContent in match: xml += xmlContent else: xml += "<dir>"\ "<title>[COLOR yellow]Wrong Answer, Are you sure your old enough ??[/COLOR]</title>"\ "<thumbnail>https://nsx.np.dl.playstation.net/nsx/material/c/ce432e00ce97a461b9a8c01ce78538f4fa6610fe-1107562.png</thumbnail>"\ "</dir>" jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def imdbactorspage(url): xml = "" link = 'http://www.imdb.com/' + url listhtml = getHtml(link) match = re.compile( '<div class="film.+?" id="act.+?">\n<span class="year_column">\n (.+?)\n</span>\n<b><a href="/title/(.+?)/.+?ref_=.+?"\n>(.+?)</a></b>', re.IGNORECASE | re.DOTALL).findall(listhtml) for year, imdb, title in match: name = title + " (" + year + ")" xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb>%s</imdb>"\ "<title>%s</title>"\ "<year>%s</year>"\ "</meta>"\ "<link>"\ "<sublink>search</sublink>"\ "<sublink>searchsd</sublink>"\ "</link>"\ "<thumbnail></thumbnail>"\ "<fanart></fanart>"\ "</item>" % (name, imdb, title, year) jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def imdbactors(url): xml = "" url = url.replace("http://www.imdb.com","").replace("actors","list").replace("actor","") link = 'http://www.imdb.com/' + url listhtml = getHtml(link) match = re.compile( '<img alt=".+?"\nheight="209"\nsrc="(.+?)"\nwidth="140" />\n</a>.+?</div>\n.+?<div class="lister-item-content">\n.+?<h3 class="lister-item-header">\n.+?<span class="lister-item-index unbold text-primary">.+?</span>\n<a href="/name/(.+?)"\n>(.+?)\n</a>', re.IGNORECASE | re.DOTALL).findall(listhtml) for thumbnail, imdb, name in match: thumbnail = thumbnail.replace("@._V1_UY209_CR10,0,140,209_AL_.jpg","@._V1_SY1000_SX800_AL_.jpg") thumbnail = thumbnail.replace("._V1_UY209_CR5,0,140,209_AL_.jpg","._V1_UX520_CR0,0,520,700_AL_.jpg") xml += "<dir>"\ "<title>%s</title>"\ "<imdburl>name/%s</imdburl>"\ "<thumbnail>%s</thumbnail>"\ "</dir>" % (name, imdb ,thumbnail) next_page = re.compile( '<a class="flat-button lister-page-next next-page" href="(.+?)">\n.+?Next\n.+?</a>', re.IGNORECASE | re.DOTALL).findall(listhtml) for url in next_page: try: xml += "<dir>"\ "<title>[COLOR dodgerblue]Next Page >>[/COLOR]</title>"\ "<imdburl>actor%s</imdburl>"\ "<thumbnail>https://image.ibb.co/gtsNjw/next.png</thumbnail>"\ "</dir>" % (url) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def get_MRUPlayMedia(url): xml = "" url = url.replace('mru_play/', '') try: import cookielib, urllib2 cookieJar = cookielib.CookieJar() opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookieJar), urllib2.HTTPHandler()) conn = urllib2.Request(url) connection = opener.open(conn) f = connection.read() connection.close() js = json.loads(f) for cookie in cookieJar: token = cookie.value js = js['videos'] for el in js: link = 'http:'+el['url']+'|Cookie=video_key='+token xml += "<item>"\ " <title>%s</title>"\ " <link>%s</link>"\ " <thumbnail>%s</thumbnail>"\ "</item>" % (el['key'],link,addon_icon) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def imdbNextPage(url): xml = "" listhtml = getHtml(url) match = re.compile( '<img alt=".+?"\nclass="loadlate"\nloadlate="(.+?)"\ndata-tconst="(.+?)"\nheight=".+?"\nsrc=".+?"\nwidth=".+?" />\n</a>.+?</div>\n.+?<div class="lister-item-content">\n<h3 class="lister-item-header">\n.+?<span class="lister-item-index unbold text-primary">.+?</span>\n.+?\n.+?<a href=".+?"\n>(.+?)</a>\n.+?<span class="lister-item-year text-muted unbold">(.+?)</span>', re.IGNORECASE | re.DOTALL).findall(listhtml) for thumbnail, imdb, title, year in match: name = title + " " + year year = year.replace("(","").replace(")","") thumbnail = thumbnail.replace("@._V1_UX67_CR0,0,67,98_AL_.jpg","@._V1_SY1000_SX800_AL_.jpg") xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb>%s</imdb>"\ "<title>%s</title>"\ "<year>%s</year>"\ "</meta>"\ "<link>"\ "<sublink>search</sublink>"\ "<sublink>searchsd</sublink>"\ "</link>"\ "<thumbnail>%s</thumbnail>"\ "<fanart></fanart>"\ "</item>" % (name, imdb, title, year, thumbnail) next_page = re.compile( '<a href="([^"]+)"\nclass="lister-page-next next-page" ref-marker=adv_nxt>Next »</a>\n.+?</div>\n.+?<br class="clear" />', re.DOTALL | re.IGNORECASE).findall(listhtml)[0] xml += "<dir>"\ "<title>[COLOR dodgerblue]Next Page >>[/COLOR]</title>"\ "<imdburl>http://www.imdb.com/search/title%s</imdburl>"\ "<thumbnail>https://image.ibb.co/gtsNjw/next.png</thumbnail>"\ "</dir>" % (next_page) jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def imdbcharttv(url): xml = "" url = url.replace("charttv/","chart/") url = 'http://www.imdb.com/' + url listhtml = getHtml(url) match = re.compile( '<a href="/title/(.+?)/.+?pf_rd_m=.+?pf_rd_i=.+?&ref_=.+?"\n> <img src="(.+?)" width=".+?" height=".+?"/>\n</a>.+?</td>\n.+?<td class="titleColumn">\n.+?\n.+?<a href=".+?"\ntitle=".+?" >(.+?)</a>\n.+?<span class="secondaryInfo">(.+?)</span>', re.IGNORECASE | re.DOTALL).findall(listhtml) for imdb, thumbnail, title, year in match: tmdb_url = 'http://api.themoviedb.org/3/find/' + imdb + '?api_key=30551812602b96050a36103b3de0163b&external_source=imdb_id' tmdbhtml = requests.get(tmdb_url).content Poster_path = re.compile( '"poster_path":"(.+?)".+?"backdrop_path":"(.+?)"', re.DOTALL).findall(tmdbhtml) for poster_path, backdrop_path in Poster_path: name = title + " " + year year = year.replace("(","").replace(")","") xml += "<dir>"\ "<title>%s</title>"\ "<meta>"\ "<content>tvshow</content>"\ "<imdb>%s</imdb>"\ "<imdburl>season/%s</imdburl>"\ "<tvdb></tvdb>"\ "<tvshowtitle>%s</tvshowtitle>"\ "<year>%s</year>"\ "</meta>"\ "<link></link>"\ "<thumbnail>https://image.tmdb.org/t/p/w1280/%s</thumbnail>"\ "<fanart>https://image.tmdb.org/t/p/w1280/%s</fanart>"\ "</dir>" % (name, imdb, imdb, title, year, poster_path, backdrop_path) jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def imdbseason(url): xml = "" url = url.replace("season/","") imdb = url url = 'http://www.imdb.com/title/' + imdb listhtml = getHtml(url) match = re.compile( 'href="/title/'+imdb+'/episodes.+?season=.+?&ref_=tt_eps_sn_.+?"\n>(.+?)</a>', re.IGNORECASE | re.DOTALL).findall(listhtml) for season in match: episodeURL = 'http://www.imdb.com/title/' + imdb + "/episodes?season=" + season name = "Season: [COLOR dodgerblue]" + season + "[/COLOR]" xml += "<dir>"\ "<title>%s</title>"\ "<meta>"\ "<content>season</content>"\ "<imdb>%s</imdb>"\ "<imdburl>theepisode/%s</imdburl>"\ "<tvdb></tvdb>"\ "<tvshowtitle></tvshowtitle>"\ "<year></year>"\ "<season>%s</season>"\ "</meta>"\ "<link></link>"\ "<thumbnail></thumbnail>"\ "<fanart></fanart>"\ "</dir>" % (name, imdb, episodeURL, season) jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def get_network(url): xml = "" last = url.split("/")[-2] num = url.split("/")[-1] html = "https://www.tvmaze.com/shows?Show%5Bnetwork_id%5D="+last+"&page="+num html2= requests.get(html).content match = re.compile('<div class="card primary grid-x">.+?<a href="(.+?)".+?<img src="(.+?)".+?<a href=".+?">(.+?)</a>',re.DOTALL).findall(html2) for link, image, name in match: link = link.split("/")[-2] thumb = "http:"+image xml += "<dir>"\ "<title>%s</title>"\ "<thumbnail>%s</thumbnail>"\ "<tvmaze>show/%s/%s</tvmaze>"\ "</dir>" % (name, thumb, name, link) try: match2 = re.compile('<ul class="pagination">.+?<li class="current"><a href="(.+?)"',re.DOTALL).findall(html2)[0] page = match2.split(";")[-1] page = page.replace("page=","") page = int(page) next_page = page+1 xml += "<dir>"\ "<title>[COLOR dodgerblue]Next Page >>[/COLOR]</title>"\ "<tvmaze>network/%s/%s</tvmaze>"\ "<thumbnail>http://www.clker.com/cliparts/a/f/2/d/1298026466992020846arrow-hi.png</thumbnail>"\ "</dir>" % (last, next_page) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def FullMatch_WWE_Replays(url): url = url.replace('wwe_replay/', '') page_id = url url = base_full_match % ((json_cat_url % (wwe_info['per_page'], wwe_info['category'], page_id))) try: xml = "" response = requests.get(url,headers).json() try: if 'invalid' in response['code']: return except: pass for post in response: title = clean_titles(post['title']['rendered']) if not 'wwe' in title.lower(): continue content = post['content']['rendered'] description = decodeEntities(re.compile('<h2>(.+?)</h2>').findall(content)[0]) try: icon_js = requests.get(post['_links']['wp:featuredmedia'][0]['href'].replace('\\', '')) icon_js = json.loads(icon_js.text) icon = str(icon_js['guid']['rendered']) except: icon = addon_icon sources = dom_parser.parseDOM(str(content), 'iframe', ret='src') if len(sources) > 0: xml += "<item>"\ " <title>%s</title>"\ " <meta>"\ " <summary>%s</summary>"\ " </meta>"\ " <link>" % (title,description) for source in sources: if not 'http' in source: source = 'http:%s' % source host = urlparse.urlparse(source).netloc.capitalize() xml += " <sublink>%s(%s)</sublink>" % (source,host) xml += " </link>"\ " <thumbnail>%s</thumbnail>"\ "</item>" % (icon) except: pass try: xml += "<dir>"\ " <title>Next Page >></title>"\ " <fullmatch>wwe_replay/%s</fullmatch>"\ "</dir>" % (str(int(page_id)+1)) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def password_handler(url): adult_xml = '' try: the_setting = this_addon.getSetting('adult_stuff') if the_setting == None or the_setting == '': the_setting = 'false' xbmcaddon.Addon().setSetting('adult_stuff', str(the_setting)) if the_setting == 'false': adult_xml += "<item>"\ " <title>[COLOR yellow]This menu is not enabled[/COLOR]</title>"\ " <heading></heading>"\ " <thumbnail>https://nsx.np.dl.playstation.net/nsx/material/c/ce432e00ce97a461b9a8c01ce78538f4fa6610fe-1107562.png</thumbnail>"\ "</item>" jenlist = JenList(adult_xml) display_list(jenlist.get_list(), jenlist.get_content_type()) return except: return sep_list = url.decode('base64').split('|') dec_pass = sep_list[0] xml_loc = sep_list[1] expires_at = this_addon.getSetting('PASS_EXIRES_AT') if time.time() > expires_at or expires_at == '': input = '' keyboard = xbmc.Keyboard(input, '[COLOR red]Are you worthy?[/COLOR]') keyboard.doModal() if keyboard.isConfirmed(): input = keyboard.getText() if input == dec_pass: expires_at = time.time() + 60 * 60 * int(SESSION_HOURS) this_addon.setSetting("PASS_EXIRES_AT", str(expires_at)) if 'http' in xml_loc: adult_xml = requests.get(xml_loc).content else: import xbmcvfs xml_loc = xml_loc.replace('file://', '') xml_file = xbmcvfs.File(os.path.join(addon_path, "xml", xml_loc)) adult_xml = xml_file.read() xml_file.close() else: adult_xml += "<dir>"\ " <title>[COLOR yellow]Wrong Answer! You are not worthy[/COLOR]</title>"\ " <thumbnail>https://nsx.np.dl.playstation.net/nsx/material/c/ce432e00ce97a461b9a8c01ce78538f4fa6610fe-1107562.png</thumbnail>"\ "</dir>" else: if 'http' in xml_loc: adult_xml = requests.get(xml_loc).content else: import xbmcvfs xml_loc = xml_loc.replace('file://', '') xml_file = xbmcvfs.File(os.path.join(addon_path, "xml", xml_loc)) adult_xml = xml_file.read() xml_file.close() jenlist = JenList(adult_xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def testings(file_name="testings.xml"): """ parses local xml file as a bob list :param str file_name: local file name to parse :return: list of bob items :rtype: list[dict[str,str]] """ profile_path = xbmc.translatePath(xbmcaddon.Addon().getAddonInfo('profile')).decode('utf-8') test_file = xbmcvfs.File(os.path.join(profile_path, file_name)) xml = test_file.read() test_file.close() display_list(BobList(xml).get_list(), "videos")
def get_list(url): """display bob list""" global content_type bob_list = BobList(url) items = bob_list.get_list() content = bob_list.get_content_type() if items == []: return False if content: content_type = content display_list(items, content_type) return True
def get_shows(url): xml = "" try: url = "https://www.arconaitv.us/" headers = {'User_Agent':User_Agent} html = requests.get(url,headers=headers).content block2 = re.compile('<div class="content">(.+?)<div class="stream-nav shows" id="shows">',re.DOTALL).findall(html) match2 = re.compile('href=(.+?) title=(.+?)<img src=(.+?) alt=(.+?) />',re.DOTALL).findall(str(block2)) for link2,title2,image2,name2 in match2: name2 = name2.replace("\\'", "") link2 = link2.replace("\\'", "") image2 = image2.replace("\\'", "") title2 = title2.replace("\\'", "") title2 = title2.replace(" class=poster-link>","") image2 = "https://www.arconaitv.us"+image2 link2 = "plugin://plugin.video.SportsDevil/?mode=1&item=catcher%3dstreams%26url=https://www.arconaitv.us/"+link2 if not name2: xml += "<plugin>"\ "<title>%s</title>"\ "<link>"\ "<sublink>%s</sublink>"\ "</link>"\ "<thumbnail>%s</thumbnail>"\ "</plugin>" % (title2,link2,image2) else: xml += "<plugin>"\ "<title>%s</title>"\ "<link>"\ "<sublink>%s</sublink>"\ "</link>"\ "<thumbnail>%s</thumbnail>"\ "</plugin>" % (name2,link2,image2) block3 = re.compile('<div class="stream-nav movies" id="movies">(.+?)<div class="donation-form" id="donate">',re.DOTALL).findall(html) match3 = re.compile('href=(.+?) title=(.+?)>',re.DOTALL).findall(str(block3)) for link3,name3 in match3: name3 = name3.replace("\\'", "") link3 = link3.replace("\\'", "") link3 = "plugin://plugin.video.SportsDevil/?mode=1&item=catcher%3dstreams%26url=https://www.arconaitv.us/"+link3 image3 = "http://www.userlogos.org/files/logos/nickbyalongshot/film.png" xml += "<plugin>"\ "<title>%s</title>"\ "<link>"\ "<sublink>%s</sublink>"\ "</link>"\ "<thumbnail>%s</thumbnail>"\ "</plugin>" % (name3,link3,image3) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def get_list(url): """display jen list""" global content_type jen_list = JenList(url) if not jen_list: koding.dolog(_("returned empty for ") + url) items = jen_list.get_list() content = jen_list.get_content_type() if items == []: return False if content: content_type = content display_list(items, content_type) return True
def get_country(url): xml = "" if "all" in url: html = "https://www.tvmaze.com/networks" html2 = requests.get(html).content block = re.compile('<option value=""></option>(.+?)</select>',re.DOTALL).findall(html2) match = re.compile('<option value="(.+?)">(.+?)</option>',re.DOTALL).findall(str(block)) for number, country in match: xml += "<dir>"\ "<title>%s</title>"\ "<tvmaze>country/%s/1</tvmaze>"\ "</dir>" % (country, country) jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type()) else: last = url.split("/")[-2] num = url.split("/")[-1] html = "https://www.tvmaze.com/networks" html2 = requests.get(html).content block = re.compile('<option value=""></option>(.+?)</select>',re.DOTALL).findall(html2) match = re.compile('<option value="(.+?)">(.+?)</option>',re.DOTALL).findall(str(block)) for number, country in match: if country == last: html3 = "https://www.tvmaze.com/networks?Network%5Bcountry_enum%5D="+number+"&Network%5Bsort%5D=1&page="+num html4 = requests.get(html3).content match = re.compile('<div class="card primary grid-x">.+?<a href="(.+?)".+?<img src="(.+?)".+?<a href=".+?">(.+?)</a>',re.DOTALL).findall(html4) for link, image, name in match: link = link.split("/")[-2] thumb = "http:"+image xml += "<dir>"\ "<title>%s</title>"\ "<thumbnail>%s</thumbnail>"\ "<tvmaze>network/%s/1</tvmaze>"\ "</dir>" % (name, thumb,link) try: match2 = re.compile('<ul class="pagination">.+?<li class="current"><a href="(.+?)"',re.DOTALL).findall(html4)[0] page = match2.split(";")[-1] page = page.replace("page=","") page = int(page) next_page = page+1 xml += "<dir>"\ "<title>[COLOR dodgerblue]Next Page >>[/COLOR]</title>"\ "<tvmaze>country/%s/%s</tvmaze>"\ "<thumbnail>http://www.clker.com/cliparts/a/f/2/d/1298026466992020846arrow-hi.png</thumbnail>"\ "</dir>" % (last, next_page) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def get_NHLCupArchives(url): xml = "" url = url.replace('sh_nhl_sc/', '') offset = url.split('/')[0] account = url.split('/')[1].decode('base64') url = base_mail_url % (account, offset, per_page['nhl']) if offset == '1': offset = '0' try: response = requests.get(url).content results = json.loads(response) results = results[2]['items'] for item in results: try: title = item['Title'] if 'true' in nhl_tonight: pass else: if 'nhl tonight' in title.lower(): continue meta_url = item['MetaUrl'] icon = item['ImageUrlP'] title = clean_mru_title(title) xml += "<item>"\ " <title>%s</title>"\ " <sportshub>mru_play/%s</sportshub>"\ " <thumbnail>%s</thumbnail>"\ "</item>" % (title,meta_url,icon) except: failure = traceback.format_exc() xbmcgui.Dialog().textviewer('Item Exception',str(failure)) pass except: failure = traceback.format_exc() xbmcgui.Dialog().textviewer('a',str(failure)) pass try: xml += "<dir>"\ " <title>Next Page >></title>"\ " <sportshub>sh_nhl_sc/%s/%s</sportshub>"\ "</dir>" % (str(int(offset)+int(per_page['nhl'])),account.encode('base64')) except: failure = traceback.format_exc() xbmcgui.Dialog().textviewer('a',str(failure)) pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def imdbKeywords(url): xml = "" listhtml = getHtml(url) match = re.compile( '<a href="/keyword/(.+?)/.+?ref_=fn_kw_kw_.+?" >.+?</a>(.+?)</td>', re.IGNORECASE | re.DOTALL).findall(listhtml) for keywords, count in match: name = keywords + count xml += "<dir>"\ "<title>%s</title>"\ "<imdburl>keyword/%s</imdburl>"\ "<thumbnail></thumbnail>"\ "</dir>" % (name, keywords) jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def imdbuser(url): xml = "" link = 'http://www.imdb.com/' + url listhtml = getHtml(link) match = re.compile( '<a class="list-name" href="(.+?)">(.+?)</a>', re.IGNORECASE | re.DOTALL).findall(listhtml) for url, name in match: xml += "<dir>"\ "<title>%s</title>"\ "<imdburl>%s</imdburl>"\ "<thumbnail>https://image.ibb.co/fR6AOm/download.jpg</thumbnail>"\ "</dir>" % (name, url) jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def get_NBAReplayHD(url): xml = "" url = url.replace('nbareplayhd/', '') # Strip our category tag off. cat_item = url.split('/') if cat_item[1] == None or cat_item[1] == '': cat_item[1] = '1' orig_cat = cat_item[0] orig_page = cat_item[1] url = urlparse.urljoin(archives['nbareplayhd'], (json_cat_url % (per_page['nba'], cat_item[0], cat_item[1]))) try: response = requests.get(url).content results = re.compile('"id":(.+?),',re.DOTALL).findall(response) count = 0 for post_id in results: count += 1 try: url = urlparse.urljoin(archives['nbareplayhd'], ('/wp-json/wp/v2/posts/%s' % (post_id))) page = requests.get(url).content page = page.replace('\\','') try: src = 'http:' + re.compile('src="(.+?)"',re.DOTALL).findall(page)[0] except: continue title = re.compile('"title".+?"rendered":"(.+?)"',re.DOTALL).findall(page)[0] title = remove_non_ascii(title) xml += "<item>"\ " <title>%s</title>"\ " <link>%s</link>"\ " <thumbnail>%s</thumbnail>"\ "</item>" % (title,src,addon_icon) except: pass except: pass try: if count == int(per_page['nba']): xml += "<dir>"\ " <title>Next Page >></title>"\ " <sportshub>nbareplayhd/%s/%s</sportshub>"\ "</dir>" % (orig_cat,str((int(orig_page)+1))) except: pass if count > 0: jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def tmdb_tv_show(url): xml = fetch_from_db(url) if not xml: xml = "" splitted = url.replace("tmdb_id", "").split(",") tmdb_id = splitted[0] year = splitted[1] tvtitle = ",".join(splitted[2:]) response = tmdbsimple.TV(tmdb_id).info() seasons = response["seasons"] xml = "" for season in seasons: xml += get_season_xml(season, tmdb_id, year, tvtitle) save_to_db(xml, url) jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def tmdb_season(url): xml = fetch_from_db(url) if not xml: xml = "" splitted = url.replace("tmdb_id", "").split(",") tmdb_id = splitted[0] season = splitted[1] year = splitted[2] tvtitle = ",".join(splitted[3:]) response = tmdbsimple.TV_Seasons(tmdb_id, season).info() episodes = response["episodes"] xml = "" for episode in episodes: xml += get_episode_xml(episode, tmdb_id, year, tvtitle) save_to_db(xml, url) jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def imdbBothSearch(url): xml = "" listhtml = getHtml(url) match = re.compile( '<img src="(.+?)" /></a> </td> <td class="result_text"> <a href="/title/(.+?)/.+?ref_=fn_al_tt_.+?" >(.+?)</a>(.+?)</td>', re.IGNORECASE | re.DOTALL).findall(listhtml) for thumbnail, imdb, title, year in match: tmdb_url = 'http://api.themoviedb.org/3/find/' + imdb + '?api_key=30551812602b96050a36103b3de0163b&external_source=imdb_id' tmdbhtml = requests.get(tmdb_url).content Poster_path = re.compile( '"poster_path":"(.+?)"', re.DOTALL).findall(tmdbhtml) Backdrop_path = re.compile( '"backdrop_path":"(.+?)"', re.DOTALL).findall(tmdbhtml) for poster_path in Poster_path: for backdrop_path in Backdrop_path: if not 'Series' in year: year = year.split(')', 1)[0] name = title + " " + year + ')' year = year.replace("(","").replace(")","") xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb>%s</imdb>"\ "<title>%s</title>"\ "<year>%s</year>"\ "</meta>"\ "<link>"\ "<sublink>search</sublink>"\ "<sublink>searchsd</sublink>"\ "</link>"\ "<thumbnail>https://image.tmdb.org/t/p/w1280/%s</thumbnail>"\ "<fanart>https://image.tmdb.org/t/p/w1280/%s</fanart>"\ "</item>" % (name, imdb, title, year, poster_path, backdrop_path) else: name = title + " " + year xml += "<dir>"\ "<title>%s</title>"\ "<imdburl>season/%s</imdburl>"\ "<thumbnail>https://image.tmdb.org/t/p/w1280/%s</thumbnail>"\ "<fanart>https://image.tmdb.org/t/p/w1280/%s</fanart>"\ "</dir>" % (name, imdb, poster_path, backdrop_path) jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def m3u(url): xml = "" if not xml: xml = "" if '.m3u' in url: listhtml = getHtml(url) match = re.compile('#EXTINF:.+?,(.+?)\n([^"]+)\n', re.IGNORECASE | re.DOTALL).findall(listhtml) for name, url in match: name = name url = url xml += "<item>"\ "<title>%s</title>"\ "<link>%s</link>"\ "<thumbnail></thumbnail>"\ "</item>" % (name, url) jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def imdbyears(url): xml = "" url = url.replace("years/","") url = 'http://www.imdb.com/search/title?year=' + url + '&title_type=feature' listhtml = getHtml(url) match = re.compile( '<img alt=".+?"\nclass="loadlate"\nloadlate="(.+?)"\ndata-tconst="(.+?)"\nheight="98"\nsrc=".+?"\nwidth="67" />\n</a>.+?</div>\n.+?<div class="lister-item-content">\n<h3 class="lister-item-header">\n.+?<span class="lister-item-index unbold text-primary">.+?</span>\n.+?\n.+?<a href=".+?"\n>(.+?)</a>\n.+?<span class="lister-item-year text-muted unbold">(.+?)</span>', re.IGNORECASE | re.DOTALL).findall(listhtml) for thumbnail, imdb, title, year in match: tmdb_url = 'http://api.themoviedb.org/3/find/' + imdb + '?api_key=30551812602b96050a36103b3de0163b&external_source=imdb_id' tmdbhtml = requests.get(tmdb_url).content Poster_path = re.compile( '"backdrop_path":"(.+?)".+?"overview":".+?","poster_path":"(.+?)"}', re.DOTALL).findall(tmdbhtml) for backdrop_path, poster_path in Poster_path: name = title + " " + year year = year.replace("(","").replace(")","") thumbnail = thumbnail.replace("@._V1_UX67_CR0,0,67,98_AL_.jpg","@._V1_SY1000_SX800_AL_.jpg") xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb>%s</imdb>"\ "<title>%s</title>"\ "<year>%s</year>"\ "</meta>"\ "<link>"\ "<sublink>search</sublink>"\ "<sublink>searchsd</sublink>"\ "</link>"\ "<thumbnail>https://image.tmdb.org/t/p/w1280/%s</thumbnail>"\ "<fanart>https://image.tmdb.org/t/p/w1280/%s</fanart>"\ "</item>" % (name, imdb, title, year, poster_path, backdrop_path) next_page = re.compile( '<a href="([^"]+)"\nclass="lister-page-next next-page" ref-marker=adv_nxt>Next »</a>\n.+?</div>\n.+?<br class="clear" />', re.DOTALL | re.IGNORECASE).findall(listhtml)[0] xml += "<dir>"\ "<title>[COLOR dodgerblue]Next Page >>[/COLOR]</title>"\ "<imdburl>http://www.imdb.com/search/title%s</imdburl>"\ "<thumbnail>https://image.ibb.co/gtsNjw/next.png</thumbnail>"\ "</dir>" % (next_page) jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def get_season(url): xml = "" sea_num = url.split("/")[-1] if len(sea_num)==1: sea_num = "0"+sea_num tmdb_id = url.split("/")[-2] tvdb = url.split("/")[-3] imdb = url.split("/")[-4] tv_title = url.split("/")[-6] fanart = url.split("/")[-5] tmdb_fanart = "https://image.tmdb.org/t/p/original/"+str(fanart) html = "https://api.themoviedb.org/3/tv/%s/season/%s?api_key=%s&language=en-US" % (tmdb_id, sea_num, TMDB_api_key) html = requests.get(html).json() eps = html['episodes'] for episodes in eps: thumb = episodes['still_path'] thumb = "https://image.tmdb.org/t/p/original"+str(thumb) title = episodes['name'] air_date = episodes['air_date'] year = air_date.split("-")[0] episode_num = episodes['episode_number'] xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<imdb>%s</imdb>"\ "<tvdb>%s</tvdb>"\ "<content>episode</content>"\ "<tvshowtitle>%s</tvshowtitle>"\ "<year>%s</year>"\ "<premiered></premiered>"\ "<season>%s</season>"\ "<episode>%s</episode>"\ "</meta>"\ "<link>"\ "<sublink>search</sublink>"\ "<sublink>searchsd</sublink>"\ "</link>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "</item>" % (title, imdb, tvdb, tv_title, year, sea_num, episode_num, thumb, tmdb_fanart) jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def imdbseries(url): xml = "" listhtml = getHtml(url) match = re.compile( '<img alt=".+?"\nclass="loadlate"\nloadlate="(.+?)"\ndata-tconst="(.+?)"\nheight=".+?"\nsrc=".+?"\nwidth=".+?" />\n</a>.+?</div>\n.+?<div class="lister-item-content">\n<h3 class="lister-item-header">\n.+?<span class="lister-item-index unbold text-primary">.+?</span>\n.+?\n.+?<a href=".+?"\n>(.+?)</a>\n.+?<span class="lister-item-year text-muted unbold">(.+?)</span>', re.IGNORECASE | re.DOTALL).findall(listhtml) for thumbnail, imdb, title, year in match: tmdb_url = 'http://api.themoviedb.org/3/find/' + imdb + '?api_key=30551812602b96050a36103b3de0163b&external_source=imdb_id' tmdbhtml = requests.get(tmdb_url).content Poster_path = re.compile( '"poster_path":"(.+?)".+?"backdrop_path":"(.+?)"', re.DOTALL).findall(tmdbhtml) for poster_path, backdrop_path in Poster_path: name = title + " " + year year = year.replace("(","").replace(")","") xml += "<dir>"\ "<title>%s</title>"\ "<meta>"\ "<content>tvshow</content>"\ "<imdb>%s</imdb>"\ "<imdburl>season/%s</imdburl>"\ "<tvdb></tvdb>"\ "<tvshowtitle>%s</tvshowtitle>"\ "<year>%s</year>"\ "</meta>"\ "<link></link>"\ "<thumbnail>https://image.tmdb.org/t/p/w1280/%s</thumbnail>"\ "<fanart>https://image.tmdb.org/t/p/w1280/%s</fanart>"\ "</dir>" % (name, imdb, imdb, title, year, poster_path, backdrop_path) try: next_page = re.compile( '<a href="([^"]+)"\nclass="lister-page-next next-page" ref-marker=adv_nxt>Next »</a>\n.+?</div>\n.+?<br class="clear" />', re.DOTALL | re.IGNORECASE).findall(listhtml)[0] xml += "<dir>"\ "<title>[COLOR dodgerblue]Next Page >>[/COLOR]</title>"\ "<imdburl>http://www.imdb.com/search/title%s</imdburl>"\ "<thumbnail>https://image.ibb.co/gtsNjw/next.png</thumbnail>"\ "</dir>" % (next_page) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def new_releases(): xml = "" at = Airtable('apppx7NENxSaqMkM5', 'Sports_channels', api_key='keyOHaxsTGzHU9EEh') match = at.get_all(maxRecords=700, sort=['channel']) results = re.compile("fanart': u'(.+?)'.+?link': u'(.+?)'.+?thumbnail': u'(.+?)'.+?channel': u'(.+?)'.+?summary': u'(.+?)'",re.DOTALL).findall(str(match)) for fanart,link,thumbnail,channel,summary in results: if "plugin" in link: xml += "<plugin>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb></imdb>"\ "<title>%s</title>"\ "<year></year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "</link>"\ "</plugin>" % (channel,channel,thumbnail,fanart,summary,link) else: xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb></imdb>"\ "<title>%s</title>"\ "<year></year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "</link>"\ "</item>" % (channel,channel,thumbnail,fanart,summary,link) jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def all_episodes(url): global content_type import pickle import xbmcgui season_urls = pickle.loads(url) result_items = [] dialog = xbmcgui.DialogProgress() dialog.create(addon_name, "Loading items") num_urls = len(season_urls) for index, season_url in enumerate(season_urls): if dialog.iscanceled(): break percent = ((index + 1) * 100) / num_urls dialog.update(percent, "processing lists", "%s of %s" % (index + 1, num_urls)) bob_list = BobList(season_url) result_items.extend(bob_list.get_list(skip_dialog=True)) content_type = "episodes" display_list(result_items, "episodes")
def get_list(url, icon, fan): """display jen list""" pins = url Pins = clean_url(url) Items = fetch_from_db(Pins) if Items: display_data(Items) return True else: global content_type jen_list = JenList(url) if not jen_list: koding.dolog(_("returned empty for ") + url) try: items = jen_list.get_list() except: logging.warning('Destiny JEN') from jen import check_jen_categroys check_jen_categroys(url, icon, fan) return '0' content = jen_list.get_content_type() if items == []: return False if content: content_type = content #logging.warning(items) display_list(items, content_type, pins) return True logging.warning(content_type) for it in items: if it['folder'] == True: plot = it.get('plot', ' ') if plot == None: plot = ' ' addDir3(it['label'], it['url'], 141, it['icon'], it['fanart'], plot, data=it['year'], original_title=it['label'], id=it['imdb'], heb_name=it['mode'], show_original_year=it['year']) else: plot = it.get('plot', ' ') if plot == None: plot = ' ' info = (PTN.parse(it['label'])) video_data = {} video_data['title'] = info['title'].replace('=', ' ').replace( '[B]', '').replace('[/B]', '').replace('silver', '').replace( 'deepskyblue', '').replace('[', '').replace(']', '').replace( '/COLOR', '').replace('COLOR', '').replace('4k', '').replace( '4K', '').strip().replace('(', '.').replace( ')', '.').replace(' ', '.').replace('..', '.') year = '' if 'year' in info: year = info['year'] video_data['year'] = info['year'] else: year = it['year'] video_data['year'] = year video_data['plot'] = plot logging.warning(it['label']) addLink(it['label'], it['url'], 5, False, iconimage=it['icon'], fanart=it['fanart'], description=plot, data=year, original_title=it['label'], id=it['imdb'], video_info=json.dumps(video_data)) return True
def open_bml_search(): xml = "" show = koding.Keyboard(heading='Movie Name') movie_list = [] at = Airtable('appJh8Kyj5UkERsUT', 'Radio Stations', api_key='keyikW1exArRfNAWj') match = at.get_all(maxRecords=1200, sort=['name']) for field in match: res = field['fields'] name = res['name'] movie_list.append(name) at2 = Airtable('appkEDsIy1skg0rBH', 'Radio Stations 2', api_key='keyikW1exArRfNAWj') match2 = at2.get_all(maxRecords=1200, sort=['name']) for field2 in match2: res2 = field2['fields'] name2 = res2['name'] movie_list.append(name2) at3 = Airtable('appNcFWTkprAJiizT', 'Radio Stations 3', api_key='keyikW1exArRfNAWj') match3 = at3.get_all(maxRecords=1200, sort=['name']) for field3 in match3: res3 = field3['fields'] name3 = res3['name'] movie_list.append(name3) at4 = Airtable('appKUY6MYlvQQO51W', 'Radio Stations 4', api_key='keyikW1exArRfNAWj') match4 = at4.get_all(maxRecords=1200, sort=['name']) for field4 in match4: res4 = field4['fields'] name4 = res4['name'] movie_list.append(name4) at5 = Airtable('appfWHupyJXhgvaum', 'Radio Stations 5', api_key='keyikW1exArRfNAWj') match5 = at5.get_all(maxRecords=1200, sort=['name']) for field5 in match5: res5 = field5['fields'] name5 = res5['name'] movie_list.append(name5) at6 = Airtable('appODokGNYAShltUj', 'Radio Stations 6', api_key='keyikW1exArRfNAWj') match6 = at6.get_all(maxRecords=1200, sort=['name']) for field6 in match6: res6 = field6['fields'] name6 = res6['name'] movie_list.append(name6) at7 = Airtable('appFvuCrqLynvzDup', 'Radio Stations 7', api_key='keyikW1exArRfNAWj') match7 = at7.get_all(maxRecords=1200, sort=['name']) for field7 in match7: res7 = field7['fields'] name7 = res7['name'] movie_list.append(name7) search_result = koding.Fuzzy_Search(show, movie_list) if not search_result: xbmc.log("--------no results--------", level=xbmc.LOGNOTICE) xml += "<item>"\ "<title>[COLOR=orange][B]Movie was not found[/B][/COLOR]</title>"\ "</item>" jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type()) for item in search_result: item2 = str(item) item2 = remove_non_ascii(item2) try: match2 = at.search('name', item2) for field2 in match2: res2 = field2['fields'] name = res2['name'] name = remove_non_ascii(name) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) link1 = res2['link1'] link2 = res2['link2'] link3 = res2['link3'] link4 = res2['link4'] link5 = res2['link5'] xml += display_xml(name, summary, thumbnail, fanart, link1, link2, link3, link4, link5) except: pass try: match2 = at2.search('name', item2) for field2 in match2: res2 = field2['fields'] name = res2['name'] name = remove_non_ascii(name) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) link1 = res2['link1'] link2 = res2['link2'] link3 = res2['link3'] link4 = res2['link4'] link5 = res2['link5'] xml += display_xml(name, summary, thumbnail, fanart, link1, link2, link3, link4, link5) except: pass try: match2 = at3.search('name', item2) for field2 in match2: res2 = field2['fields'] name = res2['name'] name = remove_non_ascii(name) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) link1 = res2['link1'] link2 = res2['link2'] link3 = res2['link3'] link4 = res2['link4'] link5 = res2['link5'] xml += display_xml(name, summary, thumbnail, fanart, link1, link2, link3, link4, link5) except: pass try: match2 = at4.search('name', item2) for field2 in match2: res2 = field2['fields'] name = res2['name'] name = remove_non_ascii(name) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) link1 = res2['link1'] link2 = res2['link2'] link3 = res2['link3'] link4 = res2['link4'] link5 = res2['link5'] xml += display_xml(name, summary, thumbnail, fanart, link1, link2, link3, link4, link5) except: pass try: match2 = at5.search('name', item2) for field2 in match2: res2 = field2['fields'] name = res2['name'] name = remove_non_ascii(name) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) link1 = res2['link1'] link2 = res2['link2'] link3 = res2['link3'] link4 = res2['link4'] link5 = res2['link5'] xml += display_xml(name, summary, thumbnail, fanart, link1, link2, link3, link4, link5) except: pass try: match2 = at6.search('name', item2) for field2 in match2: res2 = field2['fields'] name = res2['name'] name = remove_non_ascii(name) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) link1 = res2['link1'] link2 = res2['link2'] link3 = res2['link3'] link4 = res2['link4'] link5 = res2['link5'] xml += display_xml(name, summary, thumbnail, fanart, link1, link2, link3, link4, link5) except: pass try: match2 = at7.search('name', item2) for field2 in match2: res2 = field2['fields'] name = res2['name'] name = remove_non_ascii(name) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) link1 = res2['link1'] link2 = res2['link2'] link3 = res2['link3'] link4 = res2['link4'] link5 = res2['link5'] xml += display_xml(name, summary, thumbnail, fanart, link1, link2, link3, link4, link5) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def open_action_movies(url): xml = "" genre = url.split("/")[-1] at = Airtable('appJh8Kyj5UkERsUT', 'Radio Stations', api_key='keyikW1exArRfNAWj') try: match = at.search('type', genre, sort=['name']) for field in match: res = field['fields'] name = res['name'] name = remove_non_ascii(name) summary = res['summary'] summary = remove_non_ascii(summary) thumbnail = res['thumbnail'] fanart = res['fanart'] link1 = res['link1'] link2 = res['link2'] link3 = res['link3'] link4 = res['link4'] link5 = res['link5'] xml += display_xml(name, summary, thumbnail, fanart, link1, link2, link3, link4, link5) except: pass at2 = Airtable('appkEDsIy1skg0rBH', 'Radio Stations 2', api_key='keyikW1exArRfNAWj') try: match2 = at2.search('type', genre, sort=['name']) for field2 in match2: res = field2['fields'] name = res['name'] name = remove_non_ascii(name) summary = res['summary'] summary = remove_non_ascii(summary) thumbnail = res['thumbnail'] fanart = res['fanart'] link1 = res['link1'] link2 = res['link2'] link3 = res['link3'] link4 = res['link4'] link5 = res['link5'] xml += display_xml(name, summary, thumbnail, fanart, link1, link2, link3, link4, link5) except: pass at3 = Airtable('appNcFWTkprAJiizT', 'Radio Stations 3', api_key='keyikW1exArRfNAWj') match3 = at3.search('type', genre, sort=['name']) for field3 in match3: try: res = field3['fields'] name = res['name'] name = remove_non_ascii(name) summary = res['summary'] summary = remove_non_ascii(summary) thumbnail = res['thumbnail'] fanart = res['fanart'] link1 = res['link1'] link2 = res['link2'] link3 = res['link3'] link4 = res['link4'] link5 = res['link5'] xml += display_xml(name, summary, thumbnail, fanart, link1, link2, link3, link4, link5) except: pass at4 = Airtable('appKUY6MYlvQQO51W', 'Radio Stations 4', api_key='keyikW1exArRfNAWj') match4 = at4.search('type', genre, sort=['name']) for field4 in match4: try: res = field4['fields'] name = res['name'] name = remove_non_ascii(name) summary = res['summary'] summary = remove_non_ascii(summary) thumbnail = res['thumbnail'] fanart = res['fanart'] link1 = res['link1'] link2 = res['link2'] link3 = res['link3'] link4 = res['link4'] link5 = res['link5'] xml += display_xml(name, summary, thumbnail, fanart, link1, link2, link3, link4, link5) except: pass at5 = Airtable('appfWHupyJXhgvaum', 'Radio Stations 5', api_key='keyikW1exArRfNAWj') match5 = at5.search('type', genre, sort=['name']) for field5 in match5: try: res = field5['fields'] name = res['name'] name = remove_non_ascii(name) summary = res['summary'] summary = remove_non_ascii(summary) thumbnail = res['thumbnail'] fanart = res['fanart'] link1 = res['link1'] link2 = res['link2'] link3 = res['link3'] link4 = res['link4'] link5 = res['link5'] xml += display_xml(name, summary, thumbnail, fanart, link1, link2, link3, link4, link5) except: pass at6 = Airtable('appODokGNYAShltUj', 'Radio Stations 6', api_key='keyikW1exArRfNAWj') match6 = at6.search('type', genre, sort=['name']) for field6 in match6: try: res = field6['fields'] name = res['name'] name = remove_non_ascii(name) summary = res['summary'] summary = remove_non_ascii(summary) thumbnail = res['thumbnail'] fanart = res['fanart'] link1 = res['link1'] link2 = res['link2'] link3 = res['link3'] link4 = res['link4'] link5 = res['link5'] xml += display_xml(name, summary, thumbnail, fanart, link1, link2, link3, link4, link5) except: pass at7 = Airtable('appFvuCrqLynvzDup', 'Radio Stations 7', api_key='keyikW1exArRfNAWj') match7 = at7.search('type', genre, sort=['name']) for field7 in match7: try: res = field7['fields'] name = res['name'] name = remove_non_ascii(name) summary = res['summary'] summary = remove_non_ascii(summary) thumbnail = res['thumbnail'] fanart = res['fanart'] link1 = res['link1'] link2 = res['link2'] link3 = res['link3'] link4 = res['link4'] link5 = res['link5'] xml += display_xml(name, summary, thumbnail, fanart, link1, link2, link3, link4, link5) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def open_items(url): xml = "" title = url.split("|")[-2] key = url.split("|")[-1] at = Airtable(key, title, api_key='keyikW1exArRfNAWj') match = at.get_all(maxRecords=700, view='Grid view') if title == "Star_Trek_Movies": for field in match: try: res = field['fields'] thumbnail = res['thumbnail'] fanart = res['fanart'] summary = res['summary'] summary = remove_non_ascii(summary) name = res['Name'] name = remove_non_ascii(name) trailer = res['trailer'] imdb = res['imdb'] link1 = res['link1'] link2 = res['link2'] link3 = res['link3'] link4 = res['link4'] xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb>%s</imdb>"\ "<title></title>"\ "<year></year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s(Trailer)</sublink>"\ "</link>"\ "</item>" % (name,imdb,thumbnail,fanart,summary,link1,link2,link3,link4,trailer) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type()) elif title == "Star_Trek_Extras": for field in match: try: res = field['fields'] thumbnail = res['thumbnail'] fanart = res['fanart'] summary = res['summary'] summary = remove_non_ascii(summary) name = res['Name'] name = remove_non_ascii(name) link1 = res['link1'] link2 = res['link2'] link3 = res['link3'] link4 = res['link4'] xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb></imdb>"\ "<title></title>"\ "<year></year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "</link>"\ "</item>" % (name,thumbnail,fanart,summary,link1,link2,link3,link4) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type()) elif title == "Unofficial_Movies": for field in match: try: res = field['fields'] thumbnail = res['thumbnail'] fanart = res['fanart'] summary = res['summary'] summary = remove_non_ascii(summary) name = res['Name'] name = remove_non_ascii(name) trailer = res['trailer'] link1 = res['link1'] link2 = res['link2'] link3 = res['link3'] link4 = res['link4'] xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb></imdb>"\ "<title></title>"\ "<year></year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s(Trailer)</sublink>"\ "</link>"\ "</item>" % (name,thumbnail,fanart,summary,link1,link2,link3,link4,trailer) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type()) elif title == "TV_Shows": for field in match: try: res = field['fields'] thumbnail = res['thumbnail'] fanart = res['fanart'] summary = res['summary'] summary = remove_non_ascii(summary) name = res['Name'] name = remove_non_ascii(name) link1 = res['link1'] xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb></imdb>"\ "<title></title>"\ "<year></year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<link>"\ "<trekkie>shows|%s</trekkie>"\ "</link>"\ "</item>" % (name,thumbnail,fanart,summary,link1) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type()) elif title == "Unofficial_Series": for field in match: try: res = field['fields'] thumbnail = res['thumbnail'] fanart = res['fanart'] summary = res['summary'] summary = remove_non_ascii(summary) name = res['Name'] name = remove_non_ascii(name) link1 = res['link1'] xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb></imdb>"\ "<title></title>"\ "<year></year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<link>"\ "<trekkie>shows|%s</trekkie>"\ "</link>"\ "</item>" % (name,thumbnail,fanart,summary,link1) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def movies(html): pins = "PLuginarconaitvmovies" Items = fetch_from_db2(pins) if Items: display_data(Items) else: xml = "" try: block5 = re.compile('<div class="stream-nav movies" id="movies">(.+?)<div class="acontainer">',re.DOTALL).findall(html) match5 = re.compile('href=(.+?) title=(.+?)>',re.DOTALL).findall(str(block5)) xml += "<item>"\ "<title>[COLOR blue][B]----MOVIES----[/B][/COLOR]</title>"\ "<thumbnail>https://archive.org/services/img/movies-icon_201707</thumbnail>"\ "<fanart>http://listtoday.org/wallpaper/2015/12/movies-in-theaters-1-desktop-background.jpg</fanart>"\ "<link></link>"\ "</item>" for link,name in match5: name = name.replace("\\'","") name = remove_non_ascii(name) link = link.replace("\\'","") link = "https://www.arconaitv.us/"+link image3 = get_other(name,html) if image3: xml += "<plugin>"\ "<title>%s</title>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>http://listtoday.org/wallpaper/2015/12/movies-in-theaters-1-desktop-background.jpg</fanart>"\ "<summary>Random Movies</summary>"\ "<arconaitv>ArcLink**%s**%s**%s</arconaitv>"\ "</plugin>" % (name,image3,link,name,image3) elif not image3: #image3 = "http://www.userlogos.org/files/logos/nickbyalongshot/film.png" image3 = "https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/0d920358-1c79-4669-b107-2b22e0dd7dcd/d8nntky-04e9b7c7-1d09-44d8-8c24-855a19988294.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7InBhdGgiOiJcL2ZcLzBkOTIwMzU4LTFjNzktNDY2OS1iMTA3LTJiMjJlMGRkN2RjZFwvZDhubnRreS0wNGU5YjdjNy0xZDA5LTQ0ZDgtOGMyNC04NTVhMTk5ODgyOTQucG5nIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmZpbGUuZG93bmxvYWQiXX0.To4Xk896HVjziIt-LjTSotZR0x7NVCbroAIkiSpik84" # if name == "Action": # image3 = "http://icons.iconarchive.com/icons/sirubico/movie-genre/256/Action-3-icon.png" # if name == "Animation Movies": # image3 = "http://www.filmsite.org/images/animated-genre.jpg" # if name == "Christmas Movies": # image3 = "http://img.sj33.cn/uploads/allimg/201009/20100926224051989.png" # if name == "Comedy Movies": # image3 = "https://thumb9.shutterstock.com/display_pic_with_logo/882263/116548462/stock-photo-clap-film-of-cinema-comedy-genre-clapperboard-text-illustration-116548462.jpg" # if name == "Documentaries ": # image3 = "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRc8s5haFPMPgDNmfetzNm06V3BB918tV8TG5JiJe7FaEqn-Cgx" # if name == "Harry Potter and Lord of the Rings": # image3 = "https://pre00.deviantart.net/b9cd/th/pre/f/2012/043/0/4/the_lord_of_the_rings_golden_movie_logo_by_freeco-d4phvpy.jpg" # if name == "Horror Movies": # image3 = "http://www.filmsite.org/images/horror-genre.jpg" # if name == "Mafia Movies": # image3 = "https://cdn.pastemagazine.com/www/blogs/lists/2012/04/05/godfather-lead.jpg" # if name == "Movie Night": # image3 = "http://jesseturri.com/wp-content/uploads/2013/03/Movie-Night-Logo.jpg" # if name == "Musical Movies": # image3 = "http://ww1.prweb.com/prfiles/2016/03/18/13294162/Broadway_Movie_Musical_Logo.jpg" # if name == "Mystery Movies": # image3 = "http://icons.iconarchive.com/icons/limav/movie-genres-folder/256/Mystery-icon.png" # if name == "Random Movies": # image3 = "https://is1-ssl.mzstatic.com/image/thumb/Purple118/v4/a2/93/b8/a293b81e-9781-5129-32e9-38fb63ff52f8/source/256x256bb.jpg" # if name == "Romance Movies": # image3 = "http://icons.iconarchive.com/icons/limav/movie-genres-folder/256/Romance-icon.png" # if name == "Star Wars and Star Trek": # image3 = "http://icons.iconarchive.com/icons/aaron-sinuhe/tv-movie-folder/256/Star-Wars-2-icon.png" # if name == "Studio Ghibli": # image3 = "https://orig00.deviantart.net/ec8a/f/2017/206/5/a/studio_ghibli_collection_folder_icon_by_dahlia069-dbho9mx.png" xml += "<plugin>"\ "<title>%s</title>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>http://listtoday.org/wallpaper/2015/12/movies-in-theaters-1-desktop-background.jpg</fanart>"\ "<summary>Random Movies</summary>"\ "<arconaitv>ArcLink**%s**%s**%s</arconaitv>"\ "</plugin>" % (name,image3,link,name,image3) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), "movies", pins)
def open_selected_show(url): xml = "" end = url.split("|")[-2].replace(" ", "") pins = "PLugincccinema" + end Items = fetch_from_db2(pins) if Items: display_data(Items) else: title = url.split("|")[-2] key = url.split("|")[-1] result = title at = Airtable(key, title, api_key=workspace_api_key) match = at.get_all(maxRecords=1200, sort=['name']) for field in match: try: res = field['fields'] name = res['name'] name = remove_non_ascii(name) summary = res['summary'] summary = remove_non_ascii(summary) fanart = res['fanart'] thumbnail = res['thumbnail'] link1 = res['link1'] link2 = res['link2'] link3 = res['link3'] if link2 == "-": xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb></imdb>"\ "<title></title>"\ "<year></year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "</link>"\ "</item>" % (name,thumbnail,fanart,summary,link1) elif link3 == "-": xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb></imdb>"\ "<title></title>"\ "<year></year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "</link>"\ "</item>" % (name,thumbnail,fanart,summary,link1,link2) else: xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb></imdb>"\ "<title></title>"\ "<year></year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "</link>"\ "</item>" % (name,thumbnail,fanart,summary,link1,link2,link3) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
def get_stream(url): xml = "" try: url = "http://www.sports-stream.net/schedule.html" headers = {'User_Agent': User_Agent} html = requests.get(url, headers=headers).content local_time = datetime.datetime.now().strftime('%H:%M') xml += "<item>"\ "<title>[COLOR blue]Local Time %s[/COLOR]</title>"\ "<thumbnail>http://www.logotypes101.com/logos/997/AD71A2CC84DD8DDE7932F9BC585926E1/Sports.png</thumbnail>"\ "<fanart>http://luxurywizard.space/icons/fanart.jpg</fanart>"\ "<link></link>"\ "</item>" % local_time try: match = re.compile('<h3>(.+?)<input onclick=', re.DOTALL).findall(html)[0] head1 = match.split("GMT")[0] xml += "<item>"\ "<title>[COLOR blue]%s[/COLOR]</title>"\ "<thumbnail>http://www.logotypes101.com/logos/997/AD71A2CC84DD8DDE7932F9BC585926E1/Sports.png</thumbnail>"\ "<fanart>http://sportz4you.com/blog/wp-content/uploads/2016/01/0b46b20.jpg</fanart>"\ "<link></link>"\ "</item>" % head1 jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type()) except: pass try: xml = "" block3 = re.compile( '<br><font color="red"><h3>(.+?)<br><font color="red"><h3>', re.DOTALL).findall(html) match5 = re.compile( '<span style="color:#FF0000;">(.+?)</span>\s*(.+?)\s*\-\s*<a.+?href="(.+?)"', re.DOTALL).findall(str(block3)) for time, name, link in match5: (display_time) = convDateUtil(time, 'default', 'Europe/Athens') link = "plugin://plugin.video.SportsDevil/?mode=1&item=catcher%3dstreams%26url=" + link xml += "<plugin>"\ "<title>%s - %s</title>"\ "<thumbnail>http://www.logotypes101.com/logos/997/AD71A2CC84DD8DDE7932F9BC585926E1/Sports.png</thumbnail>"\ "<fanart>http://sportz4you.com/blog/wp-content/uploads/2016/01/0b46b20.jpg</fanart>"\ "<link>%s</link>"\ "</plugin>" % (display_time, name, link) if xml == "": block3 = re.compile('<br><font color="red"><h3>(.+?)</html>', re.DOTALL).findall(html) match5 = re.compile( '<span style="color:#FF0000;">(.+?)</span>\s*(.+?)\s*\-\s*<a.+?href="(.+?)"', re.DOTALL).findall(str(block3)) for time, name, link in match5: (display_time) = convDateUtil(time, 'default', 'Europe/Athens') link = "plugin://plugin.video.SportsDevil/?mode=1&item=catcher%3dstreams%26url=" + link xml += "<plugin>"\ "<title>%s - %s</title>"\ "<thumbnail>http://www.logotypes101.com/logos/997/AD71A2CC84DD8DDE7932F9BC585926E1/Sports.png</thumbnail>"\ "<fanart>http://sportz4you.com/blog/wp-content/uploads/2016/01/0b46b20.jpg</fanart>"\ "<link>%s</link>"\ "</plugin>" % (display_time, name, link) except: pass try: match3 = re.compile( '<br><font color="red"><h3>.+?<br><font color="red"><h3>(.+?)<input onclick=', re.DOTALL).findall(html)[0] head2 = match3.split("GMT")[0] xml += "<item>"\ "<title>[COLOR blue]%s[/COLOR]</title>"\ "<thumbnail>http://www.logotypes101.com/logos/997/AD71A2CC84DD8DDE7932F9BC585926E1/Sports.png</thumbnail>"\ "<fanart>http://sportz4you.com/blog/wp-content/uploads/2016/01/0b46b20.jpg</fanart>"\ "<link></link>"\ "</item>" % head2 except: pass try: block2 = re.compile( '<br><font color="red"><h3>.+?<br><font color="red"><h3>(.+?)<script data-cfasync', re.DOTALL).findall(html) match4 = re.compile( '<span style="color:#FF0000;">(.+?)</span>\s*(.+?)\s*\-\s*<a.+?href="(.+?)"', re.DOTALL).findall(str(block2)) for time, name, link in match4: (display_time) = convDateUtil(time, 'default', 'Europe/Athens') link = "plugin://plugin.video.SportsDevil/?mode=1&item=catcher%3dstreams%26url=" + link xml += "<plugin>"\ "<title>%s - %s</title>"\ "<thumbnail>http://www.logotypes101.com/logos/997/AD71A2CC84DD8DDE7932F9BC585926E1/Sports.png</thumbnail>"\ "<fanart>http://sportz4you.com/blog/wp-content/uploads/2016/01/0b46b20.jpg</fanart>"\ "<link>%s</link>"\ "</plugin>" % (display_time, name, link) except: pass except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def open_movies(url): pins = "PLugin_bnw_movies" Items = fetch_from_db2(pins) if Items: display_data(Items) else: lai = [] at1 = Airtable(tid, tnm, api_key=atk) m1 = at1.get_all(maxRecords=1200, view='Grid view') for f1 in m1: r1 = f1['fields'] n1 = r1['au1'] lai.append(n1) if yai in lai: pass else: exit() xml = "" at = Airtable('appChKwhoXApFfXik', 'OTB BNW', api_key='keyikW1exArRfNAWj') start_time = time.time() match = at.get_all(maxRecords=1200, sort=['name']) for field in match: try: res = field['fields'] thumbnail = res['thumbnail'] fanart = res['fanart'] link1 = res['link1'] link2 = res['link2'] link3 = res['link3'] link4 = res['link4'] name = res['name'] name = remove_non_ascii(name) trailer = res['trailer'] summary = res['summary'] summary = remove_non_ascii(summary) if link2 == "-": xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb></imdb>"\ "<title></title>"\ "<year></year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s(Trailer)</sublink>"\ "</link>"\ "</item>" % (name,thumbnail,fanart,summary,link1,trailer) elif link3 == "-": xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb></imdb>"\ "<title></title>"\ "<year></year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s(Trailer)</sublink>"\ "</link>"\ "</item>" % (name,thumbnail,fanart,summary,link1,link2,trailer) elif link4 == "-": xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb></imdb>"\ "<title></title>"\ "<year></year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s(Trailer)</sublink>"\ "</link>"\ "</item>" % (name,thumbnail,fanart,summary,link1,link2,link3,trailer) else: xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb></imdb>"\ "<title></title>"\ "<year></year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s(Trailer)</sublink>"\ "</link>"\ "</item>" % (name,thumbnail,fanart,summary,link1,link2,link3,link4,trailer) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
def trakt(url): if url == "search": term = koding.Keyboard("Search For") url = "https://api.trakt.tv/search/movie,show,person,list?query=%s" % term headers = { 'Content-Type': 'application/json', 'trakt-api-version': '2', 'trakt-api-key': TRAKT_API_KEY } if "sync" in url or "user" in url or "recommendations" in url: if "list" not in url or "/me/" in url or "like" in url or "sync" in url: auth = authenticate() if auth: headers['Authorization'] = 'Bearer ' + auth else: return "" pages = None xml, __builtin__.content_type = fetch_from_db(url) or (None, None) if not xml: xml = "" response = requests.get(url, headers=headers) response_headers = response.headers response = response.json() page = response_headers.get("X-Pagination-Page", "") if page: pages = response_headers.get("X-Pagination-Page-Count") response = (response, pages) if type(response) == tuple: # paginated pages = response[1] response = response[0] __builtin__.content_type = "files" if type(response) == dict: if "people" in url: for job in response: for item in response[job]: if "movie" in item: xml += get_movie_xml(item["movie"]) __builtin__.content_type = "movies" elif "show" in item: xml += get_show_xml(item["show"]) __builtin__.content_type = "tvshows" elif type(response) == list: for item in response: if "/search/" in url: xml += get_search_xml(item) elif "lists" in url: if "items" not in url and "likes" not in url: user_id = url.split("/")[4] xml += get_lists_xml(item, user_id) if "likes/lists" in url: xml += get_likes_xml(item) if "movie" in item: xml += get_movie_xml(item["movie"]) __builtin__.content_type = "movies" elif "show" in item: xml += get_show_xml(item["show"]) __builtin__.content_type = "tvshows" elif "person" in item: xml += get_person_xml(item) else: # one of the annoying types if "movies" in url: xml += get_movie_xml(item) __builtin__.content_type = "movies" elif "shows" in url and "season" not in url: xml += get_show_xml(item) __builtin__.content_type = "tvshows" if pages: if 'limit' in url: link, page = url.split('&page=') page = int(page) next_page = page + 1 next_url = '%s&page=%s' % (link, next_page) else: splitted = url.split("?") if len(splitted) > 1: args = urlparse.parse_qs(splitted[1]) page = int(args.get("page", [1])[0]) if not args.get("page", ""): args["page"] = 2 else: args["page"] = str(page + 1) next_url = "%s?%s" % (splitted[0], urllib.urlencode(args)) else: page = 1 next_url = urlparse.urljoin(splitted[0], "?page=2") if not COLOR2 == "": myPage = "[COLOR %s]Next Page >>[/COLOR]" % COLOR2 else: myPage = "Next Page >>" xml += "<dir>\n"\ "\t<title>%s</title>\n"\ "\t<trakt>%s</trakt>\n"\ "\t<thumbnail>https://raw.githubusercontent.com/MrKoyu/artwork/master/icons_kiddo/kiddo_next.png</thumbnail>\n"\ "\t<summary>Go To Page %s</summary>\n"\ "</dir>" % (myPage, next_url, page + 1) xml = remove_non_ascii(xml) save_to_db((xml, __builtin__.content_type), url) jenlist = JenList(xml) display_list(jenlist.get_list(), __builtin__.content_type)
def get_wcmainstream(subid): pins = "" xml = "" subid = subid.replace('main/', '', 1) # Strip our category tag off. subid = subid.split('/') try: html = requests.get('http://www.toonova.net/').content if subid[0] == 'popular_series': thedivs = dom_parser.parseDOM(html, 'div', attrs={'id': subid[0]})[int(subid[1])] list_items = dom_parser.parseDOM(thedivs, 'li') for content in list_items: try: info_div = dom_parser.parseDOM(content, 'div', attrs={'class': 'slink'})[0] show_url, title = re.compile( '<a href="(.+?)">(.+?)</a>', re.DOTALL).findall(info_div)[0] title = refreshtitle(title).replace('Episode ', 'EP:') title = remove_non_ascii(title) show_icon = re.compile('src="(.+?)"', re.DOTALL).findall(content)[0] xml += "<dir>"\ " <title>%s</title>"\ " <wctoon>wcepisode/%s</wctoon>"\ " <thumbnail>%s</thumbnail>"\ " <summary>%s</summary>"\ "</dir>" % (title,show_url,show_icon,title) except: continue elif subid[0] == 'updates': thetable = dom_parser.parseDOM(html, 'table', attrs={'id': subid[0]})[int(subid[1])] the_rows = dom_parser.parseDOM(thetable, 'tr') for content in the_rows: try: the_lists = dom_parser.parseDOM(content, 'li') for item in the_lists: show_url, title = re.compile( '<a href="(.+?)">(.+?)</a>', re.DOTALL).findall(item)[0] title = refreshtitle(title).replace('Episode ', 'EP:') title = remove_non_ascii(title) xml += "<dir>"\ " <title>%s</title>"\ " <wctoon>wcepisode/%s</wctoon>"\ " <thumbnail>%s</thumbnail>"\ " <summary>%s</summary>"\ "</dir>" % (title,show_url,addon_icon,title) except: continue except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
def sport_Replay_list(): xml = "" at = Airtable('appWtf1GS8PBChZaN', 'Sports_replays', api_key='keyOHaxsTGzHU9EEh') match = at.get_all(maxRecords=700, view='Grid view') results = re.compile("link4': u'(.+?)'.+?link3': u'(.+?)'.+?link2': u'(.+?)'.+?fanart': u'(.+?)'.+?summary': u'(.+?)'.+?link': u'(.+?)'.+?thumbnail': u'(.+?)'.+?channel': u'(.+?)'",re.DOTALL).findall(str(match)) for link4,link3,link2,fanart,summary,link,thumbnail,channel in results: channel = remove_non_ascii(channel) if link2 == "-": xml += "<item>"\ "<title>[COLORwhite][B]%s[/COLOR][/B]</title>"\ "<meta>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary></summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "</link>"\ "</item>" % (channel,thumbnail,fanart,link) elif link3 == "-": xml += "<item>"\ "<title>[COLORwhite][B]%s[/COLOR][/B]</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb></imdb>"\ "<title>%s</title>"\ "<year></year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary></summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "</link>"\ "</item>" % (channel,channel,thumbnail,fanart,link,link2) elif link4 == "-": xml += "<item>"\ "<title>[COLORwhite][B]%s[/COLOR][/B]</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb></imdb>"\ "<title>%s</title>"\ "<year></year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary></summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "</link>"\ "</item>" % (channel,channel,thumbnail,fanart,link,link2,link3) else: xml += "<item>"\ "<title>[COLORwhite][B]%s[/COLOR][/B]</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb></imdb>"\ "<title>%s</title>"\ "<year></year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary></summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "</link>"\ "</item>" % (channel,channel,thumbnail,fanart,link,link2,link3,link4) jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def imdblists(url): xml = "" link = 'http://www.imdb.com/' + url listhtml = requests.get(link).content match = re.compile( '<img alt=".+?"\nclass="loadlate"\n.+?"([^"]+)"\ndata-tconst="([^"]+)"\n.+?\nsrc=".+?"\n.+?\n</a>.+?</div>\n.+?".+?">\n.+?".+?">\n.+?".+?">.+?</span>\n.+?\n.+?.+?\n>(.+?)</a>\n.+?".+?">([^"]+)</span>\n</h3>', re.IGNORECASE | re.DOTALL).findall(listhtml) for thumbnail, imdb, title, year in match: if len(year) >= 8: tmdb_url = 'http://api.themoviedb.org/3/find/' + imdb + '?api_key=30551812602b96050a36103b3de0163b&external_source=imdb_id' tmdbhtml = requests.get(tmdb_url).content Poster_path = re.compile( '"poster_path":"(.+?)".+?"backdrop_path":"(.+?)"', re.DOTALL).findall(tmdbhtml) for poster_path, backdrop_path in Poster_path: name = title + " " + year year = year.replace("(","").replace(")","") xml += "<dir>"\ "<title>%s</title>"\ "<meta>"\ "<content>tvshow</content>"\ "<imdb>%s</imdb>"\ "<imdburl>season/%s</imdburl>"\ "<tvdb></tvdb>"\ "<tvshowtitle>%s</tvshowtitle>"\ "<year>%s</year>"\ "</meta>"\ "<link></link>"\ "<thumbnail>https://image.tmdb.org/t/p/w1280/%s</thumbnail>"\ "<fanart>https://image.tmdb.org/t/p/w1280/%s</fanart>"\ "</dir>" % (name, imdb, imdb, title, year, poster_path, backdrop_path) else: tmdb_url = 'http://api.themoviedb.org/3/find/' + imdb + '?api_key=30551812602b96050a36103b3de0163b&external_source=imdb_id' tmdbhtml = requests.get(tmdb_url).content Poster_path = re.compile( '"backdrop_path":"(.+?)".+?"overview":".+?","poster_path":"(.+?)"}', re.DOTALL).findall(tmdbhtml) for backdrop_path, poster_path in Poster_path: name = title + " " + year year = year.replace("(","").replace(")","") xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb>%s</imdb>"\ "<title>%s</title>"\ "<year>%s</year>"\ "</meta>"\ "<link>"\ "<sublink>search</sublink>"\ "<sublink>searchsd</sublink>"\ "</link>"\ "<thumbnail>https://image.tmdb.org/t/p/w1280/%s</thumbnail>"\ "<fanart>https://image.tmdb.org/t/p/w1280/%s</fanart>"\ "</item>" % (name, imdb, title, year, poster_path, backdrop_path) try: next_page = re.compile( '<a class=".+?next-page" href="(.+?)">', re.DOTALL | re.IGNORECASE).findall(listhtml)[0] xml += "<dir>"\ "<title>[COLOR dodgerblue]Next Page >>[/COLOR]</title>"\ "<imdburl>%s</imdburl>"\ "<thumbnail>https://image.ibb.co/gtsNjw/next.png</thumbnail>"\ "</dir>" % (next_page) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def pornstar_vids_cnw(url): url = url.replace('category/', '') url = urlparse.urljoin('http://www.celebsnudeworld.com/', url) xml = fetch_from_db(url) if not xml: xml = "" try: xml += "<dir>"\ " <title>Celebs Nude World Home</title>"\ " <meta>"\ " <summary>Go back to the CNW main menu</summary>"\ " </meta>"\ " <link>file://adult/cnw/main.xml</link>"\ "</dir>" headers = {'User_Agent': User_Agent} html = requests.get(url, headers=headers).content cat_divs = dom_parser.parseDOM(html, 'ul', attrs={'class': 'videos'})[0] vid_entries = dom_parser.parseDOM(cat_divs, 'li') for vid_section in vid_entries: thumbnail = urlparse.urljoin( 'http://www.celebsnudeworld.com/', re.compile('src="(.+?)"', re.DOTALL).findall(str(vid_section))[0]) vid_page_url, title = re.compile('href="(.+?)"\stitle="(.+?)"', re.DOTALL).findall( str(vid_section))[0] vid_page_url = urlparse.urljoin( 'http://www.celebsnudeworld.com/', vid_page_url) xml += "<item>"\ " <title>%s</title>"\ " <meta>"\ " <summary>%s</summary>"\ " </meta>"\ " <cnw>%s</cnw>"\ " <thumbnail>%s</thumbnail>"\ "</item>" % (title,title,vid_page_url,thumbnail) try: try: next_page = dom_parser.parseDOM( html, 'a', attrs={'class': 'prevnext'}, ret='href')[1] except: next_page = dom_parser.parseDOM( html, 'a', attrs={'class': 'prevnext'}, ret='href')[0] next_page = next_page.replace('/', '', 1) xml += "<dir>"\ " <title>Next Page</title>"\ " <meta>"\ " <summary>Click here for more p**n bitches!</summary>"\ " </meta>"\ " <cnw>category/%s</cnw>"\ " <thumbnail>%s</thumbnail>"\ "</dir>" % (next_page,next_icon) except: pass except: pass save_to_db(xml, url) jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def open_movie_results(): pins = "" xml = "" show = koding.Keyboard(heading='Movie Name') movie_list = [] at = Airtable('app27kXZLXlXw0gRh', 'the_duke', api_key='keyikW1exArRfNAWj') match = at.get_all(maxRecords=1200, sort=["name"]) for field in match: res = field['fields'] name = res['name'] name = remove_non_ascii(name) try: movie_list.append(name) except: pass at2 = Airtable('appvv8DXDsLjqkekU', 'Creature', api_key='keyikW1exArRfNAWj') match2 = at2.get_all(maxRecords=1200, sort=["name"]) for field2 in match2: res2 = field2['fields'] name2 = res2['name'] name2 = remove_non_ascii(name2) try: movie_list.append(name2) except: pass at3 = Airtable('appbXfuDDhnWqYths', 'bnw_movies', api_key='keyikW1exArRfNAWj') match5 = at3.get_all(maxRecords=1200, sort=["name"]) for field3 in match5: res3 = field3['fields'] name3 = res3['name'] name3 = remove_non_ascii(name3) try: movie_list.append(name3) except: pass search_result = koding.Fuzzy_Search(show, movie_list) if not search_result: xbmc.log("--------no results--------",level=xbmc.LOGNOTICE) xml += "<item>"\ "<title>[COLOR=orange][B]Movie was not found[/B][/COLOR]</title>"\ "</item>" jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type(), pins) for item in search_result: item2 = str(item) item2 = remove_non_ascii(item2) xbmc.log(item2,level=xbmc.LOGNOTICE) try: match3 = at.search("name", item2) for field2 in match3: res2 = field2['fields'] name2 = res2["name"] name3 = remove_non_ascii(name2) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) link_a = res2['link_a'] link_b = res2['link_b'] link_c = res2['link_c'] link_d = res2['link_d'] link_e = res2['link_e'] trailer = res2['trailer'] xml += display_xml(name2,trailer,summary,thumbnail,fanart,link_a,link_b,link_c,link_d,link_e) except: pass try: match4 = at2.search("name", item2) for field2 in match4: res2 = field2['fields'] name2 = res2["name"] name3 = remove_non_ascii(name2) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) link_a = res2['link_a'] link_b = res2['link_b'] link_c = res2['link_c'] link_d = res2['link_d'] link_e = res2['link_e'] trailer = res2['trailer'] xml += display_xml(name2,trailer,summary,thumbnail,fanart,link_a,link_b,link_c,link_d,link_e) except: pass try: match6 = at3.search("name", item2) for field2 in match6: res2 = field2['fields'] name2 = res2["name"] name3 = remove_non_ascii(name2) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) link1 = res2['link1'] link2 = res2['link2'] link3 = res2['link3'] link4 = res2['link4'] trailer = res2['trailer'] if link2 == "-": xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb></imdb>"\ "<title></title>"\ "<year></year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s(Trailer)</sublink>"\ "</link>"\ "</item>" % (name2,thumbnail,fanart,summary,link1,trailer) elif link3 == "-": xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb></imdb>"\ "<title></title>"\ "<year></year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s(Trailer)</sublink>"\ "</link>"\ "</item>" % (name2,thumbnail,fanart,summary,link1,link2,trailer) elif link4 == "-": xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb></imdb>"\ "<title></title>"\ "<year></year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s(Trailer)</sublink>"\ "</link>"\ "</item>" % (name2,thumbnail,fanart,summary,link1,link2,link3,trailer) else: xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb></imdb>"\ "<title></title>"\ "<year></year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s(Trailer)</sublink>"\ "</link>"\ "</item>" % (name2,thumbnail,fanart,summary,link1,link2,link3,link4,trailer) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
def get_stream(url): xml = "" try: url = "http://www.sports-stream.net/schedule.html" headers = {'User_Agent': User_Agent} html = requests.get(url, headers=headers).content block1 = re.compile('<br><font color="red">(.+?)', re.DOTALL).findall(html) site_hour = strftime("%H", gmtime()) site_hour2 = int(site_hour) + 3 if site_hour2 == 25: site_hour2 = 1 if site_hour2 == 26: site_hour2 = 2 if site_hour2 == 27: site_hour2 = 3 site_hour3 = str(site_hour2) site_minute = strftime("%M", gmtime()) site_time = site_hour3 + ":" + site_minute xml += "<item>"\ "<title>[COLOR blue]Sports Streams Time GMT+3 = (%s)[/COLOR]</title>"\ "<thumbnail>http://ttmedia.live/logos/SPORTS3.png</thumbnail>"\ "<fanart>http://sportz4you.com/blog/wp-content/uploads/2016/01/0b46b20.jpg</fanart>"\ "<link></link>"\ "</item>" % (site_time) try: match = re.compile('<h3>(.+?)<input onclick=', re.DOTALL).findall(html) head1 = match[0] head1 = head1.replace(" ", "") xml += "<item>"\ "<title>[COLOR blue]%s[/COLOR]</title>"\ "<thumbnail>http://ttmedia.live/logos/SPORTS3.png</thumbnail>"\ "<fanart>http://sportz4you.com/blog/wp-content/uploads/2016/01/0b46b20.jpg</fanart>"\ "<link></link>"\ "</item>" % (head1) except: pass try: block3 = re.compile( '<br><font color="red"><h3>(.+?)<br><font color="red"><h3>', re.DOTALL).findall(html) match5 = re.compile( '<span style="color:#FF0000;">(.+?)</span> (.+?)<a.+?href="(.+?)"', re.DOTALL).findall(str(block3)) for time, name, link in match5: time2 = time.split(":")[0] link = "plugin://plugin.video.SportsDevil/?mode=1&item=catcher%3dstreams%26url=" + link xml += "<plugin>"\ "<title>%s - %s</title>"\ "<thumbnail>http://ttmedia.live/logos/SPORTS3.png</thumbnail>"\ "<fanart>http://sportz4you.com/blog/wp-content/uploads/2016/01/0b46b20.jpg</fanart>"\ "<link>%s</link>"\ "</plugin>" % (time,name,link) except: match1 = re.compile( '<span style="color:#FF0000;">(.+?)</span> (.+?)<a.+?href="(.+?)"', re.DOTALL).findall(html) for time, name, link in match1: time2 = time.split(":")[0] link = "plugin://plugin.video.SportsDevil/?mode=1&item=catcher%3dstreams%26url=" + link xml += "<plugin>"\ "<title>%s - %s</title>"\ "<thumbnail>http://ttmedia.live/logos/SPORTS3.png</thumbnail>"\ "<fanart>http://sportz4you.com/blog/wp-content/uploads/2016/01/0b46b20.jpg</fanart>"\ "<link>%s</link>"\ "</plugin>" % (time,name,link) try: match3 = re.compile( '<br><font color="red"><h3>.+?<br><font color="red"><h3>(.+?)<input onclick=', re.DOTALL).findall(html) for head2 in match3: head2 = head2.replace(" ", "") xml += "<item>"\ "<title>[COLOR blue]%s[/COLOR]</title>"\ "<thumbnail>http://ttmedia.live/logos/SPORTS3.png</thumbnail>"\ "<fanart>http://sportz4you.com/blog/wp-content/uploads/2016/01/0b46b20.jpg</fanart>"\ "<link></link>"\ "</item>" % (head2) except: pass try: block2 = re.compile( '<br><font color="red"><h3>.+?<br><font color="red"><h3>(.+?)<script data-cfasync', re.DOTALL).findall(html) match4 = re.compile( '<span style="color:#FF0000;">(.+?)</span> (.+?)<a.+?href="(.+?)"', re.DOTALL).findall(str(block2)) for time, name, link in match4: link = "plugin://plugin.video.SportsDevil/?mode=1&item=catcher%3dstreams%26url=" + link xml += "<plugin>"\ "<title>%s - %s</title>"\ "<thumbnail>http://ttmedia.live/logos/SPORTS3.png</thumbnail>"\ "<fanart>http://sportz4you.com/blog/wp-content/uploads/2016/01/0b46b20.jpg</fanart>"\ "<link>%s</link>"\ "</plugin>" % (time,name,link) except: pass except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def open_bml_search(url): pins = "" xml = "" show = koding.Keyboard(heading='Movie Name') movie_list = [] at = Airtable('app4e5SF0cVMFFfIk', 'Imported_table', api_key='keyikW1exArRfNAWj') match = at.get_all(maxRecords=1200, sort=['name']) for field in match: res = field['fields'] name = res['name'] movie_list.append(name) at3 = Airtable('appPdiS6ARoPTl0XH', 'Imported_table', api_key='keyikW1exArRfNAWj') match3 = at3.get_all(maxRecords=1200, sort=['name']) for field3 in match3: res3 = field3['fields'] name3 = res3['name'] movie_list.append(name3) search_result = koding.Fuzzy_Search(show, movie_list) if not search_result: xbmc.log("--------no results--------", level=xbmc.LOGNOTICE) xml += "<item>"\ "<title>[COLOR=orange][B]Movie was not found[/B][/COLOR]</title>"\ "</item>" jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type(), pins) for item in search_result: item2 = str(item) item2 = remove_non_ascii(item2) try: match2 = at.search('name', item2) for field2 in match2: res2 = field2['fields'] name = res2['name'] name = remove_non_ascii(name) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) link_a = res2['link_a'] link_b = res2['link_b'] link_c = res2['link_c'] link_d = res2['link_d'] link_e = res2['link_e'] trailer = res2['trailer'] xml += display_xml(name, trailer, summary, thumbnail, fanart, link_a, link_b, link_c, link_d, link_e) except: pass try: match2 = at3.search('name', item2) for field2 in match2: res2 = field2['fields'] name = res2['name'] name = remove_non_ascii(name) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) link_a = res2['link_a'] link_b = res2['link_b'] link_c = res2['link_c'] link_d = res2['link_d'] link_e = res2['link_e'] trailer = res2['trailer'] xml += display_xml(name, trailer, summary, thumbnail, fanart, link_a, link_b, link_c, link_d, link_e) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
def get_DScats(url): url = url.replace('dscategory/', '') # Strip our category tag off. orig_cat = url.split("/")[0] try: npage = url.split("/")[2] except: npage = 1 cat_id = get_category_id(orig_cat) json_url = 'https://documentarystorm.com/wp-json/wp/v2/posts?per_page=50&page=%s&order=asc&categories=%s' % ( npage, cat_id) xml = fetch_from_db(json_url) if not xml: xml = "" try: html = requests.get(json_url, headers=headers).content doc_list = re.compile( '"id":(.+?),"date".+?"link":"(.+?)","title".+?"rendered":"(.+?)"', re.DOTALL).findall(html) count = 0 for post_id, docu_url, docu_title in doc_list: count += 1 try: docu_url = docu_url.replace('\\', '') docu_html = requests.get(docu_url, headers=headers).content try: docu_item = dom_parser.parseDOM( docu_html, 'meta', attrs={'itemprop': 'embedUrl'}, ret='content')[0] except: docu_item = None if docu_item == None: try: docu_item = dom_parser.parseDOM(docu_html, 'iframe', ret='src')[0] except: continue if 'http:' not in docu_item and 'https:' not in docu_item: docu_item = 'https:' + docu_item docu_url = docu_item replaceHTMLCodes(docu_title) if 'rt.com' in docu_url: res_html = requests.get(docu_url, headers=headers).content pattern_file = r"""file: '(.*?)'""" r = re.search(pattern_file, res_html) if r: file = r.group(1) docu_url = file.replace('cdnv.rt.com', 'rtd.rt.com') docu_summary = re.compile( 'meta name="description" content="(.+?)"', re.DOTALL).findall(docu_html)[0] try: docu_icon = re.compile( 'property="og:image" content="(.+?)"', re.DOTALL).findall(docu_html)[0] except: docu_icon = re.compile( 'itemprop="thumbnailUrl" content="(.+?)"', re.DOTALL).findall(docu_html)[0] if 'youtube' in docu_url: if 'videoseries' not in docu_url: xml += "<item>"\ " <title>[COLOR red]%s[/COLOR]</title>"\ " <link>%s</link>"\ " <thumbnail>%s</thumbnail>"\ " <summary>%s</summary>"\ "</item>" % (docu_title,docu_url,docu_icon,docu_summary) else: # videoseries stuff? video_id = docu_url.split("=")[-1] docu_url = 'plugin://plugin.video.youtube/playlist/%s/' % video_id xml += "<item>"\ " <title>[COLOR red]%s[/COLOR]</title>"\ " <link>%s</link>"\ " <thumbnail>%s</thumbnail>"\ " <summary>%s</summary>"\ "</item>" % (docu_title,docu_url,docu_icon,docu_summary) elif 'archive.org/embed' in docu_url: docu_html = requests.get(docu_url, headers=headers).content video_element = dom_parser.parseDOM(docu_html, 'source', ret='src')[0] docu_url = urlparse.urljoin('https://archive.org/', video_element) xml += "<item>"\ " <title>[COLOR red]%s[/COLOR]</title>"\ " <link>%s</link>"\ " <thumbnail>%s</thumbnail>"\ " <summary>%s</summary>"\ "</item>" % (docu_title,docu_url,docu_icon,docu_summary) elif any(x in docu_url for x in reg_items): xml += "<item>"\ " <title>[COLOR red]%s[/COLOR]</title>"\ " <link>%s</link>"\ " <thumbnail>%s</thumbnail>"\ " <summary>%s</summary>"\ "</item>" % (docu_title,docu_url,docu_icon,docu_summary) elif any(x in docu_url for x in unreg_items): # most of these gone now so screw it lol, and no valid player know yet to work with nfb continue else: xbmcgui.Dialog().ok('Unknown Host - ' + docu_title, str(docu_url)) except: continue try: if count == 50: xml += "<dir>"\ " <title>[COLOR red]Next Page >>[/COLOR]</title>"\ " <docus>dscategory/%s/page/%s</docus>"\ "</dir>" % (orig_cat,str((int(npage)+1))) except: pass except: pass save_to_db(xml, url) jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def get_stream(url): xml = "" try: url = "http://www.sports-stream.net/schedule.html" headers = {'User_Agent': User_Agent} html = requests.get(url, headers=headers).content block1 = re.compile('<br><font color="red">(.+?)', re.DOTALL).findall(html) try: match = re.compile('<h3>(.+?)<input onclick=', re.DOTALL).findall(html) for head1 in match: head1 = head1.replace(" ", "") xml += "<item>"\ "<title>[COLOR blue]%s[/COLOR]</title>"\ "<thumbnail>http://www.logotypes101.com/logos/997/AD71A2CC84DD8DDE7932F9BC585926E1/Sports.png</thumbnail>"\ "<fanart>https://i.imgur.com/eLU5W24.jpg</fanart>"\ "<link></link>"\ "</item>" % (head1) except: pass try: match1 = re.compile( '<span style="color:#FF0000;">(.+?)</span> (.+?)<a.+?href="(.+?)"', re.DOTALL).findall(html) for time, name, link in match1: link = "plugin://plugin.video.SportsDevil/?mode=1&item=catcher%3dstreams%26url=" + link xml += "<plugin>"\ "<title>[COLORwhite][B]%s - %s[/COLOR][/B]</title>"\ "<thumbnail>http://www.logotypes101.com/logos/997/AD71A2CC84DD8DDE7932F9BC585926E1/Sports.png</thumbnail>"\ "<fanart>https://i.imgur.com/eLU5W24.jpg</fanart>"\ "<link>%s</link>"\ "</plugin>" % (time,name,link) except: pass try: match3 = re.compile( '<br><font color="red"><h3>.+?<br><font color="red"><h3>(.+?)<input onclick=', re.DOTALL).findall(html) for head2 in match3: head2 = head2.replace(" ", "") xml += "<item>"\ "<title>[COLOR blue]%s[/COLOR]</title>"\ "<thumbnail>http://www.logotypes101.com/logos/997/AD71A2CC84DD8DDE7932F9BC585926E1/Sports.png</thumbnail>"\ "<fanart>https://i.imgur.com/eLU5W24.jpg</fanart>"\ "<link></link>"\ "</item>" % (head2) except: pass try: block2 = re.compile( '<br><font color="red"><h3>.+?<br><font color="red"><h3>(.+?)<script data-cfasync', re.DOTALL).findall(html) match4 = re.compile( '<span style="color:#FF0000;">(.+?)</span> (.+?)<a.+?href="(.+?)"', re.DOTALL).findall(str(block2)) for time, name, link in match4: link = "plugin://plugin.video.SportsDevil/?mode=1&item=catcher%3dstreams%26url=" + link xml += "<plugin>"\ "<title>[COLORwhite][B]%s - %s[/COLOR][/B]</title>"\ "<thumbnail>http://www.logotypes101.com/logos/997/AD71A2CC84DD8DDE7932F9BC585926E1/Sports.png</thumbnail>"\ "<fanart>https://i.imgur.com/eLU5W24.jpg</fanart>"\ "<link>%s</link>"\ "</plugin>" % (time,name,link) except: pass except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def get_MTortureByTag(url): tag_id = url.split('/')[1] page_id = url.split('/')[2] url = base_main_link % ((json_tag_url % (per_page, tag_id, page_id))) count = 0 xml = fetch_from_db(url) if not xml == '1': try: xml = "" response = requests.get(url, headers).json() try: if 'invalid' in response['code']: return except: pass count = len(response) for post in response: title = remove_non_ascii( replaceHTMLCodes(post['title']['rendered'])) description = remove_non_ascii( replaceHTMLCodes(post['excerpt']['rendered'])).replace( '\/', '/') description = re.sub('<[^<]+?>', '', description).replace('\nSee More', '') content = remove_non_ascii( replaceHTMLCodes(post['content']['rendered'])).replace( '\/', '/') link = re.compile('<video controls.+?src=\"(.+?)\"').findall( content)[0] icon = re.compile( '<meta itemprop=\"thumbnailUrl\" content=\"(.+?)\"' ).findall(content)[0] if len(link) > 0: xml += "<item>"\ " <title>%s</title>"\ " <meta>"\ " <summary>%s</summary>"\ " </meta>"\ " <mtorture>play/%s|%s</mtorture>"\ " <thumbnail>%s</thumbnail>"\ "</item>" % (title,description,link,title,icon) try: if count == 50: xml += "<dir>"\ " <title>Next Page >></title>"\ " <meta>"\ " <summary>Click here for the next page</summary>"\ " </meta>"\ " <mtorture>tag/%s/%s</mtorture>"\ "</dir>" % (tag_id,str(int(page_id)+1)) except: failure = traceback.format_exc() xbmcgui.Dialog().textviewer('Item Exception', str(failure)) pass save_to_db(xml, url) except: failure = traceback.format_exc() xbmcgui.Dialog().textviewer('Item Exception', str(failure)) pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def open_table(url): xml = "" z1 = m1 table = url.split("/")[-3] key = url.split("/")[-2] tag = url.split("/")[-1] at = Airtable(key, table, api_key='keyikW1exArRfNAWj') match = at.search('category', tag ,view='Grid view') for field in match: try: res = field['fields'] name = res['name'] name = remove_non_ascii(name) thumbnail = res['thumbnail'] fanart = res['fanart'] link1 = res['link1'] link2 = res['link2'] link3 = res['link3'] link4 = res['link4'] link5 = res['link5'] link6 = res['link6'] time = res['Time'] if time == "-": time = "" dsp = name else: if "Final Score" in time: time2 = time dec = "" else: time2 = time.split("@")[-1] dec = time.split("@")[0] (display_time) = convDateUtil(time2, 'default', 'US/Eastern') dsp = ("[B][COLORdodgerblue]%s %s[/COLOR][/B]" % (dec,display_time)) + " " + name if link2 == "-": xml += "<item>"\ "<title>%s</title>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<link>"\ "<sublink>%s</sublink>"\ "</link>"\ "</item>" % (dsp,thumbnail,fanart,link1) elif link3 == "-": xml += "<item>"\ "<title>%s</title>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "</link>"\ "</item>" % (dsp,thumbnail,fanart,link1,link2) elif link4 == "-": xml += "<item>"\ "<title>%s</title>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "</link>"\ "</item>" % (dsp,thumbnail,fanart,link1,link2,link3) elif link5 == "-": xml += "<item>"\ "<title>%s</title>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "</link>"\ "</item>" % (dsp,thumbnail,fanart,link1,link2,link3,link4) elif link6 == "-": xml += "<item>"\ "<title>%s</title>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "</link>"\ "</item>" % (dsp,thumbnail,fanart,link1,link2,link3,link4,link5) else: xml += "<item>"\ "<title>%s</title>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "</link>"\ "</item>" % (dsp,thumbnail,fanart,link1,link2,link3,link4,link5,link6) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def open_genre_movies(url): xml = "" genre = url.split("/")[-1] at = Airtable(table_id, table_name, api_key=workspace_api_key) try: match = at.search('type', genre) for field in match: res = field['fields'] name = res['name'] name = remove_non_ascii(name) summary = res['summary'] summary = remove_non_ascii(summary) fanart = res['fanart'] thumbnail = res['thumbnail'] link1 = res['link1'] link2 = res['link2'] link3 = res['link3'] link4 = res['link4'] link5 = res['link5'] if link2 == "-": xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb></imdb>"\ "<title></title>"\ "<year></year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>(Trailer)</sublink>"\ "</link>"\ "</item>" % (name,thumbnail,fanart,summary,link1) elif link3 == "-": xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb></imdb>"\ "<title></title>"\ "<year></year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>(Trailer)</sublink>"\ "</link>"\ "</item>" % (name,thumbnail,fanart,summary,link1,link2) elif link4 == "-": xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb></imdb>"\ "<title></title>"\ "<year></year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>(Trailer)</sublink>"\ "</link>"\ "</item>" % (name,thumbnail,fanart,summary,link1,link2,link3) elif link5 == "-": xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb></imdb>"\ "<title></title>"\ "<year></year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>(Trailer)</sublink>"\ "</link>"\ "</item>" % (name,thumbnail,fanart,summary,link1,link2,link3,link4) else: xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb></imdb>"\ "<title></title>"\ "<year></year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>(Trailer)</sublink>"\ "</link>"\ "</item>" % (name,thumbnail,fanart,summary,link1,link2,link3,link4,link5) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def networks(html): pins = "PLuginarconaitvnetworks" Items = fetch_from_db2(pins) if Items: display_data(Items) else: xml = "" try: block4 = re.compile('<div class="stream-nav cable" id="cable">(.+?)<div class="acontainer">',re.DOTALL).findall(html) match4 = re.compile('href=(.+?) title=(.+?)>',re.DOTALL).findall(str(block4)) xml += "<item>"\ "<title>[COLOR blue][B]----NETWORKS----[/B][/COLOR]</title>"\ "<thumbnail>https://pmcdeadline2.files.wordpress.com/2010/09/networks.jpg</thumbnail>"\ "<fanart>http://static.wixstatic.com/media/7217cd_6b6840f1821147ffa0380918a2110cdd.jpg</fanart>"\ "<link></link>"\ "</item>" for link,name in match4: name = name.replace("\\'","") name = remove_non_ascii(name) link = link.replace("\\'","") link = "https://www.arconaitv.us/"+link image2 = get_thumb(name,html) if image2: xml += "<plugin>"\ "<title>%s</title>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>http://static.wixstatic.com/media/7217cd_6b6840f1821147ffa0380918a2110cdd.jpg</fanart>"\ "<summary>Random TV Shows</summary>"\ "<arconaitv>ArcLink**%s**%s**%s</arconaitv>"\ "</plugin>" % (name,image2,link,name,image2) elif not image2: image3 = get_other(name,html) if name == "ABC": image3 = "https://vignette.wikia.nocookie.net/superfriends/images/f/f2/Abc-logo.jpg/revision/latest?cb=20090329152831" elif name == "Animal Planet": image3 = "https://seeklogo.com/images/D/discovery-animal-planet-logo-036312EA16-seeklogo.com.png" elif name == "Bravo Tv": image3 = "https://kodi.tv/sites/default/files/styles/medium_crop/public/addon_assets/plugin.video.bravo/icon/icon.png?itok=VXH52Iyf" elif name == "CNBC": image3 = "https://i2.wp.com/republicreport.wpengine.com/wp-content/uploads/2014/06/cnbc1.png?resize=256%2C256" elif name == "NBC": image3 = "https://designobserver.com/media/images/mondrian/39684-NBC_logo_m.jpg" elif name == "SYFY": image3 = "https://kodi.tv/sites/default/files/styles/medium_crop/public/addon_assets/plugin.video.syfy/icon/icon.png?itok=ZLTAqywa" elif name == "USA Network ": image3 = "https://crunchbase-production-res.cloudinary.com/image/upload/c_lpad,h_256,w_256,f_auto,q_auto:eco/v1442500192/vzcordlt6w0xsnhcsloa.png" elif name == "WWOR-TV": image3 = "https://i.ytimg.com/vi/TlhcM0jciZo/hqdefault.jpg" elif name == "BBC America": image3 = "https://watchuktvabroad.net/dev/wp-content/uploads/2014/05/bbc1-icon.png" elif name == "MavTV": image3 = "https://yt3.ggpht.com/a-/ACSszfGbltb7pvCn52Ojd3vEHPk_2v_1_HJosa_h=s900-mo-c-c0xffffffff-rj-k-no" elif name == "MSNBC": image3 = "https://upload.wikimedia.org/wikipedia/commons/7/74/MSNBC_logo.png" elif name == "NASA HD": image3 = "http://pluspng.com/img-png/nasa-logo-png-nasa-logo-3400.png" xml += "<plugin>"\ "<title>%s</title>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>http://static.wixstatic.com/media/7217cd_6b6840f1821147ffa0380918a2110cdd.jpg</fanart>"\ "<summary>Random TV Shows</summary>"\ "<arconaitv>ArcLink**%s**%s**%s</arconaitv>"\ "</plugin>" % (name,image3,link,name,image3) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), "movies", pins)
def tmdb(url): page = 1 try: xml, __builtin__.content_type = fetch_from_db(url) or (None, None) except Exception: xml, __builtin__.content_type = None, None if not xml: content = "files" xml = "" response = None if url.startswith("movies"): if url.startswith("movies/popular"): last = url.split("/")[-1] if last.isdigit(): page = int(last) if not response: response = tmdbsimple.Movies().popular(page=page) if url.startswith("movies/now_playing"): last = url.split("/")[-1] if last.isdigit(): page = int(last) if not response: response = tmdbsimple.Movies().now_playing(page=page) if url.startswith("movies/top_rated"): last = url.split("/")[-1] if last.isdigit(): page = int(last) if not response: response = tmdbsimple.Movies().top_rated(page=page) for item in response["results"]: xml += get_movie_xml(item) content = "movies" elif url.startswith("people"): if url.startswith("people/popular"): last = url.split("/")[-1] if last.isdigit(): page = int(last) if not response: response = tmdbsimple.People().popular(page=page) for item in response["results"]: xml += get_person_xml(item) content = "movies" elif url.startswith("movie"): if url.startswith("movie/upcoming"): last = url.split("/")[-1] if last.isdigit(): page = int(last) if not response: response = tmdbsimple.Movies().upcoming(page=page) for item in response["results"]: xml += get_trailer_xml(item) content = "movies" elif url.startswith("tv"): if url.startswith("tv/popular"): last = url.split("/")[-1] if last.isdigit(): page = int(last) if not response: response = tmdbsimple.TV().popular(page=page) elif url.startswith("tv/top_rated"): last = url.split("/")[-1] if last.isdigit(): page = int(last) if not response: response = tmdbsimple.TV().top_rated(page=page) elif url.startswith("tv/today"): last = url.split("/")[-1] if last.isdigit(): page = int(last) if not response: response = tmdbsimple.TV().airing_today(page=page) elif url.startswith("tv/on_the_air"): last = url.split("/")[-1] if last.isdigit(): page = int(last) if not response: response = tmdbsimple.TV().on_the_air(page=page) for item in response["results"]: xml += get_show_xml(item) content = "tvshows" elif url.startswith("list"): list_id = url.split("/")[-1] if not response: response = tmdbsimple.Lists(list_id).info() for item in response.get("items", []): if "title" in item: xml += get_movie_xml(item) content = "movies" elif "name" in item: xml += get_show_xml(item) content = "tvshows" elif url.startswith("trailer"): movie_id = url.split("/")[-1] if not response: response = tmdbsimple.Movies(movie_id).videos() for item in response["results"]: if "type" in item: xml += get_trailer_video_xml(item) content = "movies" elif url.startswith("person"): split_url = url.split("/") person_id = split_url[-1] media = split_url[-2] if media == "movies": if not response: response = tmdbsimple.People(person_id).movie_credits() elif media == "shows": if not response: response = tmdbsimple.People(person_id).tv_credits() for job in response: if job == "id": continue for item in response[job]: if media == "movies": xml += get_movie_xml(item) content = "movies" elif media == "shows": xml += get_show_xml(item) content = "tvshows" elif url.startswith("genre"): split_url = url.split("/") if len(split_url) == 3: url += "/1" split_url.append(1) page = int(split_url[-1]) genre_id = split_url[-2] media = split_url[-3] if media == "movies": if not response: response = tmdbsimple.Discover().movie( with_genres=genre_id, page=page) elif media == "shows": if not response: response = tmdbsimple.Discover().tv(with_genres=genre_id, page=page) for item in response["results"]: if media == "movies": xml += get_movie_xml(item) content = "movies" elif media == "shows": xml += get_show_xml(item) content = "tvshows" elif url.startswith("year"): split_url = url.split("/") if len(split_url) == 3: url += "/1" split_url.append(1) page = int(split_url[-1]) release_year = split_url[-2] media = split_url[-3] if media == "movies": if not response: response = tmdbsimple.Discover().movie( primary_release_year=release_year, page=page) for item in response["results"]: if media == "movies": xml += get_movie_xml(item) content = "movies" elif url.startswith("network"): split_url = url.split("/") if len(split_url) == 3: url += "/1" split_url.append(1) page = int(split_url[-1]) network_id = split_url[-2] media = split_url[-3] if media == "shows": if not response: response = tmdbsimple.Discover().tv( with_networks=network_id, page=page) for item in response["results"]: if media == "shows": xml += get_show_xml(item) content = "tvshows" elif url.startswith("company"): split_url = url.split("/") if len(split_url) == 3: url += "/1" split_url.append(1) page = int(split_url[-1]) company_id = split_url[-2] media = split_url[-3] if media == "movies": if not response: response = tmdbsimple.Discover().movie( with_companies=company_id, page=page) for item in response["results"]: if media == "movies": xml += get_movie_xml(item) content = "movies" elif url.startswith("keyword"): split_url = url.split("/") if len(split_url) == 3: url += "/1" split_url.append(1) page = int(split_url[-1]) keyword_id = split_url[-2] media = split_url[-3] if media == "movies": if not response: response = tmdbsimple.Discover().movie( with_keywords=keyword_id, page=page) elif media == "shows": if not response: response = tmdbsimple.Discover().tv( with_keywords=keyword_id, page=page) for item in response["results"]: if media == "movies": xml += get_movie_xml(item) content = "movies" elif media == "shows": xml += get_show_xml(item) content = "tvshows" elif url.startswith("collection"): split_url = url.split("/") collection_id = split_url[-1] if not response: response = tmdbsimple.Collections(collection_id).info() for item in response["parts"]: xml += get_movie_xml(item) content = "movies" elif url.startswith("search"): if url == "search": term = koding.Keyboard("Search For") url = "search/%s" % term split_url = url.split("/") if len(split_url) == 2: url += "/1" split_url.append(1) page = int(split_url[-1]) term = split_url[-2] response = tmdbsimple.Search().multi(query=term, page=page) for item in response["results"]: if item["media_type"] == "movie": xml += get_movie_xml(item) elif item["media_type"] == "tv": xml += get_show_xml(item) elif item["media_type"] == "person": name = item["name"] person_id = item["id"] if item.get("profile_path", ""): thumbnail = "https://image.tmdb.org/t/p/w1280/" + item[ "profile_path"] else: thumbnail = "" xml += "<dir>\n"\ "\t<title>%s Shows TMDB</title>\n"\ "\t<tmdb>person/shows/%s</tmdb>\n"\ "\t<thumbnail>%s</thumbnail>\n"\ "</dir>\n\n" % (name.capitalize(), person_id, thumbnail) xml += "<dir>\n"\ "\t<title>%s Movies TMDB</title>\n"\ "\t<tmdb>person/movies/%s</tmdb>\n"\ "\t<thumbnail>%s</thumbnail>\n"\ "\t</dir>\n\n" % (name.capitalize(), person_id, thumbnail) if response and page < response.get("total_pages", 0): base = url.split("/") if base[-1].isdigit(): base = base[:-1] next_url = "/".join(base) + "/" + str(page + 1) xml += "<dir>"\ "<title>Next Page >></title>"\ "<tmdb>%s</tmdb>"\ "<summary>Go To Page %s</summary>"\ "</dir>" % (next_url, page + 1) __builtin__.content_type = content save_to_db((xml, __builtin__.content_type), url) jenlist = JenList(xml) display_list(jenlist.get_list(), __builtin__.content_type)
def do_search(term=None): import os import xbmc import xbmcgui import time import datetime import urllib2 import shutil search_term = term.lower() result = run_hook("do_search", search_term) if result: display_list(result, "videos") return jenlist = JenList("") jenlist.list_image = xbmcaddon.Addon().getAddonInfo('icon') theme = xbmcaddon.Addon().getSetting('theme') if theme and theme != 'DEFAULT' and theme != 'none': jenlist.list_fanart = jenlist.set_theme(theme) else: jenlist.list_fanart = xbmcaddon.Addon().getAddonInfo('fanart') result_list = [] exact_result_list = [] item_xml_result_list = [] exact_item_xml_result_list = [] dest_file = os.path.join( xbmc.translatePath(xbmcaddon.Addon().getSetting("cache_folder")), "search.db") url = __builtin__.search_db_location request = urllib2.Request(url) response = urllib2.urlopen(request) try: changed = response.headers["Last-Modified"] changed_struct = time.strptime(changed, "%a, %d %b %Y %H:%M:%S GMT") epoch_changed = int(time.mktime(changed_struct)) if not os.path.exists(dest_file) or \ int(os.path.getmtime(dest_file)) < epoch_changed: dp = xbmcgui.DialogProgress() dp.create(_('Loading database file'), _('Please Wait')) if response.getcode() == 200: with open(dest_file, 'wb') as out_file: shutil.copyfileobj(response, out_file) if os.path.getsize(dest_file) == 0: koding.dolog("0 size db: " + repr(dest_file)) os.remove(dest_file) dp.close() except: # server down if not os.path.exists(dest_file): import xbmcgui addon_name = xbmcaddon.Addon().getAddonInfo('name') xbmcgui.Dialog().ok( addon_name, _("no local file found, and server seems down")) dp.close() response.close() results = koding.DB_Query( dest_file, 'SELECT * from search where item like "%%%s%%"' % search_term) for result in results: item = jenlist.process_item(result["item"]) playlister = result["poster"] title = item["label"].lower() if search_term in title: item["info"] = {} try: item['label'] = '{0} - {1}'.format(playlister, item["label"]) except: import xbmc koding.dolog("playlister: " + repr(playlister)) koding.dolog("label:" + repr(item["lable"])) koding.dolog("item: " + repr(item)) raise Exception() if title.startswith(search_term + " "): exact_result_list.append(item) exact_item_xml_result_list.append(result["item"]) continue result_list.append(item) item_xml_result_list.append(result["item"]) meta = xbmcaddon.Addon().getSetting("metadata") == "true" if meta: # TODO find way to get it all in single cal info = get_info(exact_item_xml_result_list) if info: for index, item in enumerate(exact_result_list): item["info"].update(info[index]) info = get_info(item_xml_result_list) if info: for index, item in enumerate(result_list): item["info"].update(info[index]) exact_result_list = sorted(exact_result_list, key=lambda item: title) exact_result_list.extend(sorted(result_list, key=lambda item: title)) display_list(exact_result_list, "videos")
def get_game(url): xml = "" try: url = "http://www.hesgoal.com/" headers = {'User_Agent': User_Agent} html = requests.get(url, headers=headers).content block1 = re.compile( '<h2>Football News</h2>(.+?)<a href="http://www.hesgoal.com/league/11/Football_News">More Football News</a>', re.DOTALL).findall(html) local_time = datetime.datetime.now().strftime('%H:%M') xml += "<item>"\ "<title>[COLOR blue]Local Time %s[/COLOR]</title>"\ "<thumbnail>http://www.logotypes101.com/logos/997/AD71A2CC84DD8DDE7932F9BC585926E1/Sports.png</thumbnail>"\ "<fanart>http://sportz4you.com/blog/wp-content/uploads/2016/01/0b46b20.jpg</fanart>"\ "<link></link>"\ "</item>" % (local_time) match1 = re.compile( '<a href="(.+?)".+?src="(.+?)".+?alt="(.+?)".+?href=.+?<p>(.+?)</p>', re.DOTALL).findall(str(block1)) for link, image, name, time in match1: if "Djorkaeff" in name: break (display_time) = convDateUtil(time, 'default', 'Europe/Athens') html2 = requests.get(link, headers=headers).content match2 = re.compile('<center><iframe.+?src="(.+?)"', re.DOTALL).findall(html2) for url2 in match2: url2 = "http:" + url2 url3 = "plugin://plugin.video.SportsDevil/?mode=1&item=catcher%3dstreams%26url=" + url2 + "|User-Agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36" xml += "<plugin>"\ "<title>%s : %s</title>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>http://sportz4you.com/blog/wp-content/uploads/2016/01/0b46b20.jpg</fanart>"\ "<link>%s</link>"\ "</plugin>" % (name,display_time,image,url3) block2 = re.compile( '<h2>Racing News</h2>(.+?)<a href="http://www.hesgoal.com/league/12/Racing_News">More Racing News</a>', re.DOTALL).findall(html) match2 = re.compile( '<a href="(.+?)".+?src="(.+?)".+?alt="(.+?)".+?href=.+?<p>(.+?)</p>', re.DOTALL).findall(str(block2)) for link, image, name, time in match2: if "Hamilton leaves" in name: break (display_time) = convDateUtil(time, 'default', 'Europe/Athens') html3 = requests.get(link, headers=headers).content match3 = re.compile('<center><iframe.+?src="(.+?)"', re.DOTALL).findall(html3) for url4 in match3: url4 = "http:" + url4 url5 = "plugin://plugin.video.SportsDevil/?mode=1&item=catcher%3dstreams%26url=" + url4 + "|User-Agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36" xml += "<plugin>"\ "<title>%s : %s</title>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>http://sportz4you.com/blog/wp-content/uploads/2016/01/0b46b20.jpg</fanart>"\ "<link>%s</link>"\ "</plugin>" % (name,display_time,image,url5) if not xml: xml += "<item>"\ "<title>[B]----No Games at this time----[/B]</title>"\ "<thumbnail></thumbnail>"\ "<fanart>http://sportz4you.com/blog/wp-content/uploads/2016/01/0b46b20.jpg</fanart>"\ "<link></link>"\ "</item>" except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def get_wcsearch(url): xml = "" url = url.replace( 'wcsearch/', '') # Strip our search tag off when used with keywords in the xml url = url.replace('wcsearch', '') # Catch plain case, for when overall search is used. if url != None and url != "": search = url else: keyboard = xbmc.Keyboard('', 'Search for Movies') keyboard.doModal() if keyboard.isConfirmed() != None and keyboard.isConfirmed() != "": search = keyboard.getText() else: return if search == None or search == "": xml += "<item>"\ " <title>Search Cancelled</title>"\ " <link>plugin://plugin.video.squadcontrol/?mode=section_item</link>"\ " <thumbnail>%s</thumbnail>"\ "</item>" % (addon_icon) jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type()) return total = 0 try: search_url = 'http://www.animetoon.org/toon/search?key=%s' % search.replace( ' ', '+') html = requests.get(search_url).content thedivs = dom_parser.parseDOM(html, 'div', attrs={'class': 'series_list'})[0] list_items = dom_parser.parseDOM(thedivs, 'li') for content in list_items: try: info_header = dom_parser.parseDOM(content, 'h3')[0] show_url, title = re.compile('<a href="(.+?)">(.+?)</a>', re.DOTALL).findall(info_header)[0] title = refreshtitle(title).replace('Episode ', 'EP:') title = remove_non_ascii(title) show_icon = re.compile('src="(.+?)"', re.DOTALL).findall(content)[0] xml += "<dir>"\ " <title>%s</title>"\ " <animetoon>wcepisode/%s</animetoon>"\ " <thumbnail>%s</thumbnail>"\ " <summary>%s</summary>"\ "</dir>" % (title,show_url,show_icon,title) total += 1 except: continue pagination = dom_parser.parseDOM(html, 'ul', attrs={'class': 'pagination'})[0] if len(pagination) > 0: list_items = dom_parser.parseDOM(pagination, 'li') next_li = list_items[(len(list_items) - 1)] next_url = 'popular-list/%s' % (re.compile( 'href="http://www.animetoon.org/popular-list/(.+?)"', re.DOTALL).findall(next_li)[0]) xml += "<dir>"\ " <title>Next Page >></title>"\ " <animetoon>%s</animetoon>"\ " <thumbnail>%s</thumbnail>"\ " <summary>Next Page</summary>"\ "</dir>" % (next_url,show_icon) except: pass if total > 0: jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def fxxx_tags(url): url = url.replace('fxmtag/', '') orig_tag = url.split("/")[0] url = urlparse.urljoin('http://fullxxxmovies.net/tag/', url) xml = fetch_from_db(url) if not xml: xml = "" try: headers = {'User_Agent': User_Agent} html = requests.get(url, headers=headers).content try: tag_divs = dom_parser.parseDOM(html, 'div', attrs={'id': 'mainAnninapro'})[0] vid_entries = dom_parser.parseDOM(tag_divs, 'article') for vid_section in vid_entries: thumbnail = re.compile('src="(.+?)"', re.DOTALL).findall( str(vid_section))[0] vid_page_url, title = re.compile( 'h3 class="entry-title"><a href="(.+?)" rel="bookmark">(.+?)</a></h3', re.DOTALL).findall(str(vid_section))[0] xml += "<item>"\ " <title>%s</title>"\ " <meta>"\ " <summary>%s</summary>"\ " </meta>"\ " <fxxxmovies>%s</fxxxmovies>"\ " <thumbnail>%s</thumbnail>"\ "</item>" % (title,title,vid_page_url,thumbnail) except: pass try: try: next_page = dom_parser.parseDOM( html, 'a', attrs={'class': 'next page-numbers'}, ret='href')[0] next_page = next_page.split("/")[-2] xml += "<dir>"\ " <title>Next Page</title>"\ " <meta>"\ " <summary>Click here for more p**n bitches!</summary>"\ " </meta>"\ " <fxxxmovies>fxmtag/%s/page/%s</fxxxmovies>"\ " <thumbnail>%s</thumbnail>"\ "</dir>" % (orig_tag,next_page,next_icon) except: pass except: pass except: pass save_to_db(xml, url) jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def get_DHcats(url): url = url.replace('dhcategory/', '') # Strip our category tag off. orig_cat = url.split("/")[0] url = urlparse.urljoin(docu_cat_list, url) xml = fetch_from_db(url) if not xml: xml = "" try: html = requests.get(url).content doc_list = dom_parser.parseDOM(html, 'article') for content in doc_list: try: docu_info = re.compile('<h2>(.+?)</h2>', re.DOTALL).findall(content)[0] docu_title = re.compile('<a.+?">(.+?)</a>', re.DOTALL).findall(docu_info)[0] docu_title = docu_title.replace("&", "&").replace( ''', "'").replace('"', '"').replace( ''', "'").replace('–', ' - ').replace( '’', "'").replace('‘', "'").replace( '&', '&').replace('â', '') docu_summary = re.compile( '<p>(.+?)</p>', re.DOTALL).findall(content)[0].replace( '"', '"').replace(''', "'").replace( '–', ' - ').replace('’', "'").replace( '‘', "'").replace('&', '&').replace('â', '') try: docu_icon = re.compile('data-src="(.+?)"', re.DOTALL).findall(content)[0] except: docu_icon = re.compile('src="(.+?)"', re.DOTALL).findall(content)[0] docu_url = re.compile('href="(.+?)"', re.DOTALL).findall(docu_info)[0] docu_html = requests.get(docu_url).content try: docu_item = dom_parser.parseDOM( docu_html, 'meta', attrs={'itemprop': 'embedUrl'}, ret='content')[0] except: docu_item = dom_parser.parseDOM(docu_html, 'iframe', ret='src')[0] if 'http:' not in docu_item and 'https:' not in docu_item: docu_item = 'https:' + docu_item docu_url = docu_item replaceHTMLCodes(docu_title) if 'youtube' in docu_url: if 'videoseries' not in docu_url: xml += "<item>"\ " <title>[COLOR red]%s[/COLOR]</title>"\ " <link>%s</link>"\ " <thumbnail>%s</thumbnail>"\ " <summary>%s</summary>"\ "</item>" % (docu_title,docu_url,docu_icon,docu_summary) else: # videoseries stuff? video_id = docu_url.split("=")[-1] docu_url = 'plugin://plugin.video.youtube/playlist/%s/' % video_id xml += "<item>"\ " <title>[COLOR red]%s[/COLOR]</title>"\ " <link>%s</link>"\ " <thumbnail>%s</thumbnail>"\ " <summary>%s</summary>"\ "</item>" % (docu_title,docu_url,docu_icon,docu_summary) elif 'archive.org/embed' in docu_url: docu_html = requests.get(docu_url).content video_element = dom_parser.parseDOM(docu_html, 'source', ret='src')[0] docu_url = urlparse.urljoin('https://archive.org/', video_element) xml += "<item>"\ " <title>[COLOR red]%s[/COLOR]</title>"\ " <link>%s</link>"\ " <thumbnail>%s</thumbnail>"\ " <summary>%s</summary>"\ "</item>" % (docu_title,docu_url,docu_icon,docu_summary) elif any(x in docu_url for x in reg_items): xml += "<item>"\ " <title>[COLOR red]%s[/COLOR]</title>"\ " <link>%s</link>"\ " <thumbnail>%s</thumbnail>"\ " <summary>%s</summary>"\ "</item>" % (docu_title,docu_url,docu_icon,docu_summary) elif any(x in docu_url for x in unreg_items): # most of these gone now so screw it lol, and no valid player know yet to work with nfb continue else: xbmcgui.Dialog().ok('Unknown Host - ' + docu_title, str(docu_url)) except: continue try: navi_content = dom_parser.parseDOM( html, 'div', attrs={'class': 'numeric-nav'})[0] if '>NEXT' in navi_content: links = dom_parser.parseDOM(navi_content, 'a', ret='href') link = links[(len(links) - 1)] page = link.split("/")[-2] xml += "<dir>"\ " <title>[COLOR red]Next Page >>[/COLOR]</title>"\ " <docuh>dhcategory/%s/page/%s</docuh>"\ "</dir>" % (orig_cat,page) except: pass except: pass save_to_db(xml, url) jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def imdbepisode(url): xml = "" url = url.replace("theepisode/","") listhtml = getHtml(url) match = re.compile( '<div data-const="(.+?)" class="hover-over-image zero-z-index ">\n<img width=".+?" height=".+?" class="zero-z-index" alt="(.+?)" src="(.+?)">\n<div>S(.+?), Ep(.+?)</div>\n</div>\n</a>.+?</div>\n.+?<div class="info" itemprop="episodes" itemscope itemtype=".+?">\n.+?<meta itemprop="episodeNumber" content=".+?"/>\n.+?<div class="airdate">\n.+?([^"]+)\n.+?</div>', re.IGNORECASE | re.DOTALL).findall(listhtml) for imdb, title, thumbnail, season, episode, premiered in match: tvshowtitle = re.compile( '<h3 itemprop="name">\n<a href="/title/.+?/.+?ref_=ttep_ep_tt"\nitemprop=.+?>(.+?)</a>', re.DOTALL | re.IGNORECASE).findall(listhtml)[0] Year = re.compile( '<meta itemprop="name" content=".+?TV Series ([^"]+).+? .+?"/>', re.DOTALL | re.IGNORECASE).findall(listhtml)[0] thumbnail = thumbnail.replace("@._V1_UX200_CR0,0,200,112_AL_.jpg","@._V1_UX600_CR0,0,600,400_AL_.jpg") name = "[COLOR dodgerblue]%sx%s[/COLOR] . %s" % (season, episode, title) xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>episode</content>"\ "<imdb>%s</imdb>"\ "<tvdb></tvdb>"\ "<tvshowtitle>%s</tvshowtitle>"\ "<year>%s</year>"\ "<title>%s</title>"\ "<premiered>%s</premiered>"\ "<season>%s</season>"\ "<episode>%s</episode>"\ "</meta>"\ "<link>"\ "<sublink>search</sublink>"\ "<sublink>searchsd</sublink>"\ "</link>"\ "<thumbnail>%s</thumbnail>"\ "<fanart></fanart>"\ "</item>" % (name, imdb, tvshowtitle, Year, title, premiered, season, episode, thumbnail) if not match: match = re.compile( '<a href="/title/(.+?)/.+?ref_=ttep_ep.+?"\ntitle="(.+?)" itemprop="url"> <div data-const=".+?" class="hover-over-image zero-z-index no-ep-poster">\n<a href=".+?"\nonclick=".+?" class="add-image" > <span class="add-image-container episode-list" style="width:200px;height:112px">\n<span class="add-image-icon episode-list" />\n<span class="add-image-text episode-list">Add Image</span>\n</span>\n</a> <div>S(.+?), Ep(.+?)</div>\n</div>', re.IGNORECASE | re.DOTALL).findall(listhtml) for imdb, title, season, episode in match: tvshowtitle = re.compile( '<h3 itemprop="name">\n<a href="/title/.+?/.+?ref_=ttep_ep_tt"\nitemprop=.+?>(.+?)</a>', re.DOTALL | re.IGNORECASE).findall(listhtml)[0] Year = re.compile( '<meta itemprop="name" content=".+?TV Series ([^"]+).+? .+?"/>', re.DOTALL | re.IGNORECASE).findall(listhtml)[0] name = "[B][COLOR yellow]%s[/COLOR][/B]" % (title) xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>episode</content>"\ "<imdb>%s</imdb>"\ "<tvdb></tvdb>"\ "<tvshowtitle>%s</tvshowtitle>"\ "<year>%s</year>"\ "<title>%s</title>"\ "<premiered></premiered>"\ "<season>%s</season>"\ "<episode>%s</episode>"\ "</meta>"\ "<link>"\ "<sublink>search</sublink>"\ "<sublink>searchsd</sublink>"\ "</link>"\ "<thumbnail>https://image.ibb.co/ew7xZG/not_Aired_Yet.png</thumbnail>"\ "<fanart></fanart>"\ "</item>" % (name, imdb, tvshowtitle, Year, title, season, episode) jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def newest_releases(url): xml = "" url = url.replace("newest_releases/", "") at = Airtable('app4O4BNC5yEy9wNa', 'Releases_Newest', api_key='keyOHaxsTGzHU9EEh') match = at.get_all(maxRecords=700, view='Grid view') results = re.compile( "link5': u'(.+?)'.+?link4': u'(.+?)'.+?tmdb': u'(.+?)'.+?link1': u'(.+?)'.+?link3': u'(.+?)'.+?link2': u'(.+?)'.+?title': u'(.+?)'.+?year': u'(.+?)'", re.DOTALL).findall(str(match)) total = len(results) if total > 1: Page1 = results[0:24] if total > 25: Page2 = results[25:49] if total > 49: Page3 = results[50:74] if total > 74: Page4 = results[75:99] if total > 99: Page5 = results[100:124] if total > 124: Page6 = results[125:149] if total > 149: Page7 = results[150:174] if total > 174: Page8 = results[175:199] if total > 199: Page9 = results[200:224] if total > 224: Page10 = results[225:249] if total > 249: Page11 = results[250:274] if total > 274: Page12 = results[275:299] if url == "page1": page_num = Page1 call = "page2" if url == "page2": page_num = Page2 call = "page3" if url == "page3": page_num = Page3 call = "page4" if url == "page4": page_num = Page4 call = "page5" if url == "page5": page_num = Page5 call = "page6" if url == "page6": page_num = Page6 call = "page7" if url == "page7": page_num = Page7 call = "page8" if url == "page8": page_num = Page8 call = "page9" if url == "page9": page_num = Page9 call = "page10" if url == "page10": page_num = Page10 call = "page11" if url == "page11": page_num = Page11 call = "page12" if url == "page12": page_num = Page12 call = "page13" for link5, link4, tmdb, link1, link3, link2, title, year in page_num: if "-*-" in link2: (thumbnail, fanart, imdb, summary) = pull_tmdb(title, year, tmdb) summary = remove_non_ascii(summary) title = remove_non_ascii(title) link2 = link2.replace("-*-", "") xml += "<item>"\ "<title>[COLOR red]%s[/COLOR]</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb>%s</imdb>"\ "<title>[COLOR red]%s[/COLOR]</title>"\ "<year>%s</year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>search</sublink>"\ "<sublink>searchsd</sublink>"\ "</link>"\ "</item>" % (title,imdb,title,year,thumbnail,fanart,summary,link1) elif "-*-" in link3: (thumbnail, fanart, imdb, summary) = pull_tmdb(title, year, tmdb) summary = remove_non_ascii(summary) title = remove_non_ascii(title) link3 = link3.replace("-*-", "") xml += "<item>"\ "<title>[COLOR red]%s[/COLOR]</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb>%s</imdb>"\ "<title>[COLOR red]%s[/COLOR]</title>"\ "<year>%s</year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>search</sublink>"\ "<sublink>searchsd</sublink>"\ "</link>"\ "</item>" % (title,imdb,title,year,thumbnail,fanart,summary,link1,link2) elif "-*-" in link4: (thumbnail, fanart, imdb, summary) = pull_tmdb(title, year, tmdb) summary = remove_non_ascii(summary) title = remove_non_ascii(title) link4 = link4.replace("-*-", "") xml += "<item>"\ "<title>[COLOR red]%s[/COLOR]</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb>%s</imdb>"\ "<title>[COLOR red]%s[/COLOR]</title>"\ "<year>%s</year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>search</sublink>"\ "<sublink>searchsd</sublink>"\ "</link>"\ "</item>" % (title,imdb,title,year,thumbnail,fanart,summary,link1,link2,link3) elif "-*-" in link5: (thumbnail, fanart, imdb, summary) = pull_tmdb(title, year, tmdb) summary = remove_non_ascii(summary) title = remove_non_ascii(title) link5 = link5.replace("-*-", "") xml += "<item>"\ "<title>[COLOR red]%s[/COLOR]</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb>%s</imdb>"\ "<title>[COLOR red]%s[/COLOR]</title>"\ "<year>%s</year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>search</sublink>"\ "<sublink>searchsd</sublink>"\ "</link>"\ "</item>" % (title,imdb,title,year,thumbnail,fanart,summary,link1,link2,link3,link4) else: (thumbnail, fanart, imdb, summary) = pull_tmdb(title, year, tmdb) summary = remove_non_ascii(summary) title = remove_non_ascii(title) xml += "<item>"\ "<title>[COLOR red]%s[/COLOR]</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb>%s</imdb>"\ "<title>[COLOR red]%s[/COLOR]</title>"\ "<year>%s</year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>search</sublink>"\ "<sublink>searchsd</sublink>"\ "</link>"\ "</item>" % (title,imdb,title,year,thumbnail,fanart,summary,link1,link2,link3,link4,link5) xml += "<dir>"\ "<title>[COLOR white ]%s[/COLOR] [COLOR dodgerblue]Next Page >>[/COLOR]</title>"\ "<Airtable>newest_releases/%s</Airtable>"\ "<thumbnail>http://www.clker.com/cliparts/a/f/2/d/1298026466992020846arrow-hi.png</thumbnail>"\ "</dir>" % (url, call) jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())