def Dialog_specific(): main_text = 'Click below to download an image to be used as a background.\n\nYou will first need to enter a name for your image,\nThen you must enter the Url to that image .\nPlease keep image names different else they will be overwritten!\nYou may need to add home folder as a source to file manager before u can use the images.' my_buttons = ['Grab it'] my_choice = koding.Custom_Dialog(main_content=main_text, pos='center', size='650x400', buttons=my_buttons, transparency=95, highlight_color='red', header='INSTALLA') my_path = xbmc.translatePath('special://home/backgrounds/') if not os.path.exists(my_path): os.makedirs(my_path) if my_choice == 0: my_path = xbmc.translatePath('special://home/backgrounds/') image_name = koding.Keyboard(heading='Type in a name for your image', default='test text') src = koding.Keyboard(heading='Please enter the url of your image', default='http://') dst = xbmc.translatePath('special://home/backgrounds/' + image_name + '.jpg') full_path_image = my_path + image_name + '.jpg' dp = xbmcgui.DialogProgress() dp.create('Downloading File', 'Please Wait') koding.Download(src, dst, dp) dialog.ok( '[COLOR gold]DOWNLOAD COMPLETE[/COLOR]', 'Your download is complete, You can find images in home/backgrounds -' )
def add_search(): term = str(koding.Keyboard(_("Enter search term"))) if not term: return koding.Add_To_Table("search", {"term": term}) xbmc.executebuiltin("Container.update(%s, replace)" % get_addon_url("do_search", term))
def search_trailers(url): xml = "" try: search_query = koding.Keyboard(heading='Search for Trailers') search_query = search_query.replace(" ", "%20") url = "https://www.moviefone.com/search/%s/" % search_query html = requests.get(url).content block = re.compile('search-head-one(.+?)text/javascript', re.DOTALL).findall(html) try: match = re.compile( '"search-title">(.+?)</a>.+?"search-more-links"><a href="(.+?)">(.+?)</a>', re.DOTALL).findall(str(block)) for name2, link2, key2 in match: if key2 == "Trailers": url2 = link2 html2 = requests.get(url2).content match2 = re.compile( '"name": "(.+?)".+?"description": "(.+?)".+?thumbnailUrl": "(.+?)".+?"contentUrl": "(.+?)"', re.DOTALL).findall(html2) for name, summary, thumbnail, link in match2: name = name.replace("'", "") name = remove_non_ascii(name) summary = clean_search(summary) xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb></imdb>"\ "<title></title>"\ "<year></year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<link>%s</link>"\ "</item>" % (name,thumbnail,thumbnail,summary,link) if xml == "": xml += "<item>"\ "<title>No Results</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb></imdb>"\ "<title></title>"\ "<year></year>"\ "<thumbnail></thumbnail>"\ "<fanart></fanart>"\ "<summary></summary>"\ "</meta>"\ "<link></link>"\ "</item>" except: pass except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def output_folder(): folder_name = koding.Keyboard(heading='Output Folder Name') folder_name = folder_name.replace(" ", "_") xml_folder = os.path.join(xml_path, folder_name) if os.path.exists(xml_folder): koding.Notify(title='Folder Already Exists', message='Choose a different folder', duration=5000) xml_folder = output_folder() else: os.mkdir(xml_folder, 0755) return folder_name
def channel_search(url,fanart): search_item = koding.Keyboard(heading=SingleColor(local_string(30053),_Edit.DialogBoxColor1)) koding.dolog('search item = %s'%search_item,line_info=True) YouTube_Scraper.channel_search(url,search_item) for items in YouTube_Scraper.CHANNEL_SEARCH: title = items.get('title','Title Missing') playlink = items.get('playlink','') icon = items.get('artwork','') date = items.get('date','') description = items.get('description','') BYB.addDir_file(ItemColor(title),playlink,33,icon,fanart,description,'',date,'')
def search(search_item=None): if search_item == None: search_item = koding.Keyboard(heading=SingleColor(local_string(30053),_Edit.DialogBoxColor1)) koding.dolog('search item = %s'%search_item,line_info=True) fanart = addon_fanart if _Edit.YT_SearchFanart == '' else _Edit.YT_SearchFanart YouTube_Scraper.search(search_item) for items in YouTube_Scraper.SEARCH_VIDEO: title = items.get('title','Title Missing') playlink = items.get('playlink','') icon = items.get('icon') BYB.addDir_file(ItemColor(title),playlink,33,icon,fanart,'','','','')
def search_trailers(url): xml = "" try: search_query = koding.Keyboard(heading='Search for Trailers') search_query = search_query.replace(" ", "%20") url = "https://www.moviefone.com/search/%s/" % search_query headers = {'User_Agent': User_Agent} html = requests.get(url, headers=headers).content block = re.compile('<h1>Search results for(.+?)<h2>Top Trailers', re.DOTALL).findall(html) try: match = re.compile( 'data-src="(.+?)".+?alt="(.+?)".+?<a href="(.+?)".+?<p class="search-description">(.+?)</p>.+?<div class="search-more-links">.+?">(.+?)</a>', re.DOTALL).findall(str(block)) for thumbnail, name, link1, summary, key2 in match: if key2 == "Trailers": name = name.replace("'", "") name = remove_non_ascii(name) summary = clean_search(summary) xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb></imdb>"\ "<title></title>"\ "<year></year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<moviefone>result**%s</moviefone>"\ "</item>" % (name,thumbnail,thumbnail,summary,link1) if xml == "": xml += "<item>"\ "<title>No Results</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb></imdb>"\ "<title></title>"\ "<year></year>"\ "<thumbnail></thumbnail>"\ "<fanart></fanart>"\ "<summary></summary>"\ "</meta>"\ "<link></link>"\ "</item>" except: pass except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def SearchTv(url): #search tv shows using TMDB api list SearchString = koding.Keyboard( SingleColor(local_string(30057), _Edit.DialogBoxColor1)) BYBAPI.tmdb_search(url, search_type='tv', search_term=SearchString, total_pages=1) for items in BYBAPI.Details_list: addDir( ChannelColor(items.get('title', '')).encode('utf-8'), 'tmdb=' + items.get('ID', ''), 404, items.get('poster_path', icon_Search), items.get('backdrop_path', fanart_Search), items.get('overview', '').encode('utf-8'), str(items.get('Genres', '')).encode('utf-8'), items.get('release_date', '').encode('utf-8'), '') del BYBAPI.Details_list[:]
def tmdb(url): page = 1 try: xml, __builtin__.content_type = fetch_from_db(url) or (None, None) except Exception: xml, __builtin__.content_type = None, None if not xml: content = "files" xml = "" response = None if url.startswith("movies"): if url.startswith("movies/popular"): last = url.split("/")[-1] if last.isdigit(): page = int(last) if not response: response = tmdbsimple.Movies().popular(page=page) if url.startswith("movies/now_playing"): last = url.split("/")[-1] if last.isdigit(): page = int(last) if not response: response = tmdbsimple.Movies().now_playing(page=page) if url.startswith("movies/top_rated"): last = url.split("/")[-1] if last.isdigit(): page = int(last) if not response: response = tmdbsimple.Movies().top_rated(page=page) for item in response["results"]: xml += get_movie_xml(item) content = "movies" elif url.startswith("people"): if url.startswith("people/popular"): last = url.split("/")[-1] if last.isdigit(): page = int(last) if not response: response = tmdbsimple.People().popular(page=page) for item in response["results"]: xml += get_person_xml(item) content = "movies" elif url.startswith("movie"): if url.startswith("movie/upcoming"): last = url.split("/")[-1] if last.isdigit(): page = int(last) if not response: response = tmdbsimple.Movies().upcoming(page=page) for item in response["results"]: xml += get_trailer_xml(item) content = "movies" elif url.startswith("tv"): if url.startswith("tv/popular"): last = url.split("/")[-1] if last.isdigit(): page = int(last) if not response: response = tmdbsimple.TV().popular(page=page) elif url.startswith("tv/top_rated"): last = url.split("/")[-1] if last.isdigit(): page = int(last) if not response: response = tmdbsimple.TV().top_rated(page=page) elif url.startswith("tv/today"): last = url.split("/")[-1] if last.isdigit(): page = int(last) if not response: response = tmdbsimple.TV().airing_today(page=page) elif url.startswith("tv/on_the_air"): last = url.split("/")[-1] if last.isdigit(): page = int(last) if not response: response = tmdbsimple.TV().on_the_air(page=page) for item in response["results"]: xml += get_show_xml(item) content = "tvshows" elif url.startswith("list"): list_id = url.split("/")[-1] if not response: response = tmdbsimple.Lists(list_id).info() for item in response.get("items", []): if "title" in item: xml += get_movie_xml(item) content = "movies" elif "name" in item: xml += get_show_xml(item) content = "tvshows" elif url.startswith("trailer"): movie_id = url.split("/")[-1] if not response: response = tmdbsimple.Movies(movie_id).videos() for item in response["results"]: if "type" in item: xml += get_trailer_video_xml(item) content = "movies" elif url.startswith("person"): split_url = url.split("/") person_id = split_url[-1] media = split_url[-2] if media == "movies": if not response: response = tmdbsimple.People(person_id).movie_credits() elif media == "shows": if not response: response = tmdbsimple.People(person_id).tv_credits() for job in response: if job == "id": continue for item in response[job]: if media == "movies": xml += get_movie_xml(item) content = "movies" elif media == "shows": xml += get_show_xml(item) content = "tvshows" elif url.startswith("genre"): split_url = url.split("/") if len(split_url) == 3: url += "/1" split_url.append(1) page = int(split_url[-1]) genre_id = split_url[-2] media = split_url[-3] if media == "movies": if not response: response = tmdbsimple.Discover().movie( with_genres=genre_id, page=page) elif media == "shows": if not response: response = tmdbsimple.Discover().tv(with_genres=genre_id, page=page) for item in response["results"]: if media == "movies": xml += get_movie_xml(item) content = "movies" elif media == "shows": xml += get_show_xml(item) content = "tvshows" elif url.startswith("year"): split_url = url.split("/") if len(split_url) == 3: url += "/1" split_url.append(1) page = int(split_url[-1]) release_year = split_url[-2] media = split_url[-3] if media == "movies": if not response: response = tmdbsimple.Discover().movie( primary_release_year=release_year, page=page) for item in response["results"]: if media == "movies": xml += get_movie_xml(item) content = "movies" elif url.startswith("network"): split_url = url.split("/") if len(split_url) == 3: url += "/1" split_url.append(1) page = int(split_url[-1]) network_id = split_url[-2] media = split_url[-3] if media == "shows": if not response: response = tmdbsimple.Discover().tv( with_networks=network_id, page=page) for item in response["results"]: if media == "shows": xml += get_show_xml(item) content = "tvshows" elif url.startswith("company"): split_url = url.split("/") if len(split_url) == 3: url += "/1" split_url.append(1) page = int(split_url[-1]) company_id = split_url[-2] media = split_url[-3] if media == "movies": if not response: response = tmdbsimple.Discover().movie( with_companies=company_id, page=page) for item in response["results"]: if media == "movies": xml += get_movie_xml(item) content = "movies" elif url.startswith("keyword"): split_url = url.split("/") if len(split_url) == 3: url += "/1" split_url.append(1) page = int(split_url[-1]) keyword_id = split_url[-2] media = split_url[-3] if media == "movies": if not response: response = tmdbsimple.Discover().movie( with_keywords=keyword_id, page=page) elif media == "shows": if not response: response = tmdbsimple.Discover().tv( with_keywords=keyword_id, page=page) for item in response["results"]: if media == "movies": xml += get_movie_xml(item) content = "movies" elif media == "shows": xml += get_show_xml(item) content = "tvshows" elif url.startswith("collection"): split_url = url.split("/") collection_id = split_url[-1] if not response: response = tmdbsimple.Collections(collection_id).info() for item in response["parts"]: xml += get_movie_xml(item) content = "movies" elif url.startswith("search"): if url == "search": term = koding.Keyboard("Search For") url = "search/%s" % term split_url = url.split("/") if len(split_url) == 2: url += "/1" split_url.append(1) page = int(split_url[-1]) term = split_url[-2] response = tmdbsimple.Search().multi(query=term, page=page) for item in response["results"]: if item["media_type"] == "movie": xml += get_movie_xml(item) elif item["media_type"] == "tv": xml += get_show_xml(item) elif item["media_type"] == "person": name = item["name"] person_id = item["id"] if item.get("profile_path", ""): thumbnail = "https://image.tmdb.org/t/p/w1280/" + item[ "profile_path"] else: thumbnail = "" xml += "<dir>\n"\ "\t<title>%s Shows TMDB</title>\n"\ "\t<tmdb>person/shows/%s</tmdb>\n"\ "\t<thumbnail>%s</thumbnail>\n"\ "</dir>\n\n" % (name.capitalize(), person_id, thumbnail) xml += "<dir>\n"\ "\t<title>%s Movies TMDB</title>\n"\ "\t<tmdb>person/movies/%s</tmdb>\n"\ "\t<thumbnail>%s</thumbnail>\n"\ "\t</dir>\n\n" % (name.capitalize(), person_id, thumbnail) if response and page < response.get("total_pages", 0): base = url.split("/") if base[-1].isdigit(): base = base[:-1] next_url = "/".join(base) + "/" + str(page + 1) xml += "<dir>"\ "<title>Next Page >></title>"\ "<tmdb>%s</tmdb>"\ "<summary>Go To Page %s</summary>"\ "</dir>" % (next_url, page + 1) __builtin__.content_type = content save_to_db((xml, __builtin__.content_type), url) jenlist = JenList(xml) display_list(jenlist.get_list(), __builtin__.content_type)
def open_bml_search(url): pins = "" xml = "" show = koding.Keyboard(heading='Movie Name') movie_list = [] at = Airtable('app1aK3wfaR0xDxSK', 'OTB Big Movie List', api_key='keyikW1exArRfNAWj') match = at.get_all(maxRecords=1200, sort=['name']) for field in match: res = field['fields'] name = res['name'] movie_list.append(name) at3 = Airtable('appaVv9EN3EJnvUz4', 'OTB Big Movie List 2', api_key='keyikW1exArRfNAWj') match3 = at3.get_all(maxRecords=1200, sort=['name']) for field3 in match3: res3 = field3['fields'] name3 = res3['name'] movie_list.append(name3) search_result = koding.Fuzzy_Search(show, movie_list) if not search_result: xbmc.log("--------no results--------",level=xbmc.LOGNOTICE) xml += "<item>"\ "<title>[COLOR=orange][B]Movie was not found[/B][/COLOR]</title>"\ "</item>" jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type(), pins) for item in search_result: item2 = str(item) item2 = remove_non_ascii(item2) try: match2 = at.search('name', item2) for field2 in match2: res2 = field2['fields'] name = res2['name'] name = remove_non_ascii(name) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) link_a = res2['link_a'] link_b = res2['link_b'] link_c = res2['link_c'] link_d = res2['link_d'] link_e = res2['link_e'] trailer = res2['trailer'] xml += display_xml(name,trailer,summary,thumbnail,fanart,link_a,link_b,link_c,link_d,link_e) except: pass try: match2 = at3.search('name', item2) for field2 in match2: res2 = field2['fields'] name = res2['name'] name = remove_non_ascii(name) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) link_a = res2['link_a'] link_b = res2['link_b'] link_c = res2['link_c'] link_d = res2['link_d'] link_e = res2['link_e'] trailer = res2['trailer'] xml += display_xml(name,trailer,summary,thumbnail,fanart,link_a,link_b,link_c,link_d,link_e) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
def add_search(): term = str(koding.Keyboard("Enter search term")) if not term: return koding.Add_To_Table("search", {"term": term}) do_search(term)
def imdb_info(url): try: folder_name = output_folder() # folder_name = koding.Keyboard(heading='Output Folder Name') # folder_name = folder_name.replace(" ","_") #xml_folder = os.path.join(xml_path,folder_name) # os.mkdir( xml_folder, 0755 ) list_number2 = koding.Keyboard(heading='IMDB List Number') list_number2 = list_number2.replace("ls", "") url = "http://www.imdb.com/list/ls%s/" % int(list_number2) html = requests.get(url).content match2 = re.compile('<h1 class="header list-name">(.+?)</h1>', re.DOTALL).findall(html) list_name = match2[0] list_name = clean_search(list_name) list_name = list_name.replace(" ", "_") #koding.Show_Busy(status=True) (url, html) = Pull_info(html, list_name, url, folder_name) print "pass1" except: pass try: match3 = re.compile( '<a class="flat-button lister-page-next next-page" href="(.+?)"', re.DOTALL).findall(html) url = "http://www.imdb.com" + match3[0] html = requests.get(url).content (url, html) = Pull_info(html, list_name, url, folder_name) print "pass2" except: pass try: match3 = re.compile( '<a class="flat-button lister-page-next next-page" href="(.+?)"', re.DOTALL).findall(html) url = "http://www.imdb.com" + match3[0] html = requests.get(url).content (url, html) = Pull_info(html, list_name, url, folder_name) print "pass3" except: pass try: match3 = re.compile( '<a class="flat-button lister-page-next next-page" href="(.+?)"', re.DOTALL).findall(html) url = "http://www.imdb.com" + match3[0] html = requests.get(url).content (url, html) = Pull_info(html, list_name, url, folder_name) print "pass4" except: pass try: match3 = re.compile( '<a class="flat-button lister-page-next next-page" href="(.+?)"', re.DOTALL).findall(html) url = "http://www.imdb.com" + match3[0] html = requests.get(url).content (url, html) = Pull_info(html, list_name, url, folder_name) print "pass5" except: pass try: match3 = re.compile( '<a class="flat-button lister-page-next next-page" href="(.+?)"', re.DOTALL).findall(html) url = "http://www.imdb.com" + match3[0] html = requests.get(url).content (url, html) = Pull_info(html, list_name, url, folder_name) print "pass6" except: pass try: match3 = re.compile( '<a class="flat-button lister-page-next next-page" href="(.+?)"', re.DOTALL).findall(html) url = "http://www.imdb.com" + match3[0] html = requests.get(url).content (url, html) = Pull_info(html, list_name, url, folder_name) print "pass7" except: pass try: match3 = re.compile( '<a class="flat-button lister-page-next next-page" href="(.+?)"', re.DOTALL).findall(html) url = "http://www.imdb.com" + match3[0] html = requests.get(url).content (url, html) = Pull_info(html, list_name, url, folder_name) print "pass8" except: pass
def open_bml_search(): pins = "" xml = "" lai = [] at1 = Airtable(tid, tnm, api_key=atk) m1 = at1.get_all(maxRecords=1200, view='Grid view') for f1 in m1: r1 = f1['fields'] n1 = r1['au1'] lai.append(n1) if yai in lai: pass else: exit() show = koding.Keyboard(heading='Movie Name') movie_list = [] at = Airtable('appwblOWrmZ5uwcce', 'OTB Audiobooks', api_key='keyem86gyhcLFSLqh') match = at.get_all(maxRecords=1200, sort=['name']) for field in match: res = field['fields'] name = res['name'] movie_list.append(name) at2 = Airtable('appOKb0JBT9M0MivF', 'OTB Audiobooks 2', api_key='keyem86gyhcLFSLqh') match2 = at2.get_all(maxRecords=1200, sort=['name']) for field2 in match2: res2 = field2['fields'] name2 = res2['name'] movie_list.append(name2) at3 = Airtable('appGoC0VblD0MCcvw', 'OTB Audiobooks 3', api_key='keyem86gyhcLFSLqh') match3 = at3.get_all(maxRecords=1200, sort=['name']) for field3 in match3: res3 = field3['fields'] name3 = res3['name'] movie_list.append(name3) at4 = Airtable('appYbxBoLWcYY9LSI', 'OTB Audiobooks 4', api_key='keyem86gyhcLFSLqh') match4 = at4.get_all(maxRecords=1200, sort=['name']) for field4 in match4: res4 = field4['fields'] name4 = res4['name'] movie_list.append(name4) search_result = koding.Fuzzy_Search(show, movie_list) if not search_result: xbmc.log("--------no results--------", level=xbmc.LOGNOTICE) xml += "<item>"\ "<title>[COLOR=orange][B]Movie was not found[/B][/COLOR]</title>"\ "</item>" jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type()) for item in search_result: item2 = str(item) item2 = remove_non_ascii(item2) try: match2 = at.search('name', item2) for field2 in match2: res2 = field2['fields'] name = res2['name'] name = remove_non_ascii(name) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) link1 = res2['link1'] link2 = res2['link2'] link3 = res2['link3'] link4 = res2['link4'] link5 = res2['link5'] xml += display_xml(name, summary, thumbnail, fanart, link1, link2, link3, link4, link5) except: pass try: match2 = at2.search('name', item2) for field2 in match2: res2 = field2['fields'] name = res2['name'] name = remove_non_ascii(name) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) link1 = res2['link1'] link2 = res2['link2'] link3 = res2['link3'] link4 = res2['link4'] link5 = res2['link5'] xml += display_xml(name, summary, thumbnail, fanart, link1, link2, link3, link4, link5) except: pass try: match2 = at3.search('name', item2) for field2 in match2: res2 = field2['fields'] name = res2['name'] name = remove_non_ascii(name) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) link1 = res2['link1'] link2 = res2['link2'] link3 = res2['link3'] link4 = res2['link4'] link5 = res2['link5'] xml += display_xml(name, summary, thumbnail, fanart, link1, link2, link3, link4, link5) except: pass try: match2 = at4.search('name', item2) for field2 in match2: res2 = field2['fields'] name = res2['name'] name = remove_non_ascii(name) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) link1 = res2['link1'] link2 = res2['link2'] link3 = res2['link3'] link4 = res2['link4'] link5 = res2['link5'] xml += display_xml(name, summary, thumbnail, fanart, link1, link2, link3, link4, link5) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
def Tmdb_info(url): trail_key = "" folder_name = output_folder() xml_folder = os.path.join(xml_path, folder_name) list_number = koding.Keyboard(heading='TMDB List Number') start_url = "https://api.themoviedb.org/3/list/%s?api_key=%s&language=en-US" % ( int(list_number), tmdb_api_key) html = requests.get(start_url).content match = json.loads(html) list_name = match['name'] list_name = list_name.replace(" ", "_") list_name = clean_search(list_name) if not list_name: list_name = match['description'] res = match['items'] if not res: res = match['results'] xml_folder = os.path.join(xml_path, folder_name) File = os.path.join(xml_folder, list_name) length = len(res) count = 0 dp = xbmcgui.DialogProgress() dp.create("[COLOR ghostwhite]Writing XML's.... [/COLOR]") open('%s.xml' % (File), 'w') for results in res: count = count + 1 progress(length, count, dp) media = results['media_type'] if media == 'movie': icon = results['poster_path'] if not icon: icon = "" key = "thumbnail" show_name = "" missing_art(show_name, name, key, folder_name) name = results['title'] date = results['release_date'] year = date.split("-")[0] fanart = results['backdrop_path'] if not fanart: fanart = "" key = "fanart" show_name = "" missing_art(show_name, name, key, folder_name) tmdb = results['id'] if get_trailer == 'true': turl = "https://api.themoviedb.org/3/movie/%s/videos?api_key=%s" % ( tmdb, tmdb_api_key) html = requests.get(turl).json() try: r = html['results'][0] site = html['results'][0]['site'] if site == "YouTube": trail_key = html['results'][0]['key'] else: trail_key = "" except: pass url2 = "https://api.themoviedb.org/3/movie/%s/external_ids?api_key=%s" % ( tmdb, tmdb_api_key) html2 = requests.get(url2).content match2 = json.loads(html2) try: imdb = match2['imdb_id'] except: imdb = "none" elif media == 'tv': icon = results['poster_path'] if not icon: icon = "" key = "thumbnail" show_name = "" missing_art(show_name, name, key, folder_name) name = results['name'] date = results['first_air_date'] year = date.split("-")[0] fanart = results['backdrop_path'] if not fanart: fanart = "" key = "fanart" show_name = "" missing_art(show_name, name, key, folder_name) tmdb = results['id'] if get_trailer == 'true': turl = "https://api.themoviedb.org/3/tv/%s/videos?api_key=%s" % ( tmdb, tmdb_api_key) html = requests.get(turl).json() try: r = html['results'][0] site = html['results'][0]['site'] if site == "YouTube": trail_key = html['results'][0]['key'] else: trail_key = "" except: pass url2 = "https://api.themoviedb.org/3/movie/%s/external_ids?api_key=%s" % ( tmdb, tmdb_api_key) html2 = requests.get(url2).content match2 = json.loads(html2) try: imdb = match2['imdb_id'] except: imdb = "none" get_tv_seasons(tmdb, fanart, imdb, folder_name) print_movie_xml(list_name, media, name, year, imdb, tmdb, icon, fanart, folder_name, trail_key)
def imdb_info(url): try: folder_name = output_folder() list_number2 = koding.Keyboard(heading='IMDB List Number') list_number2 = list_number2.replace("ls", "") url = "http://www.imdb.com/list/ls%s/" % int(list_number2) html = requests.get(url).content match2 = re.compile( '<h1 class="header list-name">(.+?)</h1>.+?<div class="desc lister-total-num-results">(.+?)</div>', re.DOTALL).findall(html) for list_name, total_list in match2: list_name = clean_search(list_name) list_name = list_name.replace(" ", "_") total_list = total_list.replace("titles", "").replace(" ", "") (url, html) = Pull_info(html, list_name, url, folder_name, total_list) print "pass1" except: pass try: match3 = re.compile( '<a class="flat-button lister-page-next next-page" href="(.+?)"', re.DOTALL).findall(html) url = "http://www.imdb.com" + match3[0] html = requests.get(url).content (url, html) = Pull_info(html, list_name, url, folder_name, total_list) print "pass2" except: pass try: match3 = re.compile( '<a class="flat-button lister-page-next next-page" href="(.+?)"', re.DOTALL).findall(html) url = "http://www.imdb.com" + match3[0] html = requests.get(url).content (url, html) = Pull_info(html, list_name, url, folder_name) print "pass3" except: pass try: match3 = re.compile( '<a class="flat-button lister-page-next next-page" href="(.+?)"', re.DOTALL).findall(html) url = "http://www.imdb.com" + match3[0] html = requests.get(url).content (url, html) = Pull_info(html, list_name, url, folder_name) print "pass4" except: pass try: match3 = re.compile( '<a class="flat-button lister-page-next next-page" href="(.+?)"', re.DOTALL).findall(html) url = "http://www.imdb.com" + match3[0] html = requests.get(url).content (url, html) = Pull_info(html, list_name, url, folder_name) print "pass5" except: pass try: match3 = re.compile( '<a class="flat-button lister-page-next next-page" href="(.+?)"', re.DOTALL).findall(html) url = "http://www.imdb.com" + match3[0] html = requests.get(url).content (url, html) = Pull_info(html, list_name, url, folder_name) print "pass6" except: pass try: match3 = re.compile( '<a class="flat-button lister-page-next next-page" href="(.+?)"', re.DOTALL).findall(html) url = "http://www.imdb.com" + match3[0] html = requests.get(url).content (url, html) = Pull_info(html, list_name, url, folder_name) print "pass7" except: pass try: match3 = re.compile( '<a class="flat-button lister-page-next next-page" href="(.+?)"', re.DOTALL).findall(html) url = "http://www.imdb.com" + match3[0] html = requests.get(url).content (url, html) = Pull_info(html, list_name, url, folder_name) print "pass8" except: pass
def trakt_info(url): try: folder_name = output_folder() list_number3 = koding.Keyboard(heading='Trakt List Name') list_name = list_number3.replace(" ", "-") user = trakt_user_name.replace(" ", "-") headers = { 'Content-Type': 'application/json', 'trakt-api-version': '2', 'trakt-api-key': trakt_client_id } url1 = "https://api.trakt.tv/users/%s/lists/%s/" % (user, list_name) html1 = requests.get(url1, headers=headers).content match1 = json.loads(html1) length = match1['item_count'] url = "https://api.trakt.tv/users/%s/lists/%s/items/" % (user, list_name) count = 0 dp = xbmcgui.DialogProgress() dp.create("[COLOR ghostwhite]Writing XML's.... [/COLOR]") if user == "user-name": print "no" else: html = requests.get(url, headers=headers).json() for res in html: media = res['type'] count = count + 1 progress(length, count, dp) if media == 'movie': info = res['movie'] elif media == 'show': info = res['show'] year = info['year'] if not year: year = "none" ids = info['ids'] tmdb = ids['tmdb'] if not tmdb: tmdb = "none" imdb = ids['imdb'] if not imdb: imdb = "none" trakt = ids['trakt'] name = info['title'] icon = "" fanart = "" try: time.sleep(.2) tmdb_url = 'http://api.themoviedb.org/3/find/' + imdb + '?api_key=' + tmdb_api_key + '&external_source=imdb_id' headers = {'User-Agent': User_Agent} tmdbhtml = requests.get(tmdb_url, headers=headers, timeout=20).content match = json.loads(tmdbhtml) movie_results = match['movie_results'] tv_results = match['tv_results'] if movie_results: media = "movie" res = movie_results elif tv_results: media = "tv" res = tv_results for results in res: if media == 'movie': icon = results['poster_path'] if not icon: key = "thumbnail" show_name = "" missing_art(show_name, name, key, folder_name) date = results['release_date'] year = date.split("-")[0] fanart = results['backdrop_path'] if not fanart: key = "fanart" show_name = "" missing_art(show_name, name, key, folder_name) tmdb = results['id'] if get_trailer == 'true': turl = "https://api.themoviedb.org/3/movie/%s/videos?api_key=%s" % ( tmdb, tmdb_api_key) html = requests.get(turl).json() r = html['results'][0] site = html['results'][0]['site'] if site == "YouTube": trail_key = html['results'][0]['key'] else: trail_key = "" elif media == 'tv': icon = results['poster_path'] date = results['first_air_date'] year = date.split("-")[0] fanart = results['backdrop_path'] tmdb = results['id'] if get_trailer == 'true': turl = "https://api.themoviedb.org/3/tv/%s/videos?api_key=%s" % ( tmdb, tmdb_api_key) html = requests.get(turl).json() r = html['results'][0] site = html['results'][0]['site'] if site == "YouTube": trail_key = html['results'][0]['key'] else: trail_key = "" get_tv_seasons(tmdb, fanart, imdb, folder_name) except: icon = "none" fanart = "none" print_movie_xml(list_name, media, name, year, imdb, tmdb, icon, fanart, folder_name, trail_key) except: pass
def open_movie_results(): pins = "" xml = "" show = koding.Keyboard(heading='Movie Name') movie_list = [] at = Airtable('app27kXZLXlXw0gRh', 'the_duke', api_key='keyikW1exArRfNAWj') match = at.get_all(maxRecords=1200, sort=["name"]) for field in match: res = field['fields'] name = res['name'] name = remove_non_ascii(name) try: movie_list.append(name) except: pass at2 = Airtable('appvv8DXDsLjqkekU', 'Creature', api_key='keyikW1exArRfNAWj') match2 = at2.get_all(maxRecords=1200, sort=["name"]) for field2 in match2: res2 = field2['fields'] name2 = res2['name'] name2 = remove_non_ascii(name2) try: movie_list.append(name2) except: pass at3 = Airtable('appbXfuDDhnWqYths', 'bnw_movies', api_key='keyikW1exArRfNAWj') match5 = at3.get_all(maxRecords=1200, sort=["name"]) for field3 in match5: res3 = field3['fields'] name3 = res3['name'] name3 = remove_non_ascii(name3) try: movie_list.append(name3) except: pass search_result = koding.Fuzzy_Search(show, movie_list) if not search_result: xbmc.log("--------no results--------",level=xbmc.LOGNOTICE) xml += "<item>"\ "<title>[COLOR=orange][B]Movie was not found[/B][/COLOR]</title>"\ "</item>" jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type(), pins) for item in search_result: item2 = str(item) item2 = remove_non_ascii(item2) xbmc.log(item2,level=xbmc.LOGNOTICE) try: match3 = at.search("name", item2) for field2 in match3: res2 = field2['fields'] name2 = res2["name"] name3 = remove_non_ascii(name2) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) link_a = res2['link_a'] link_b = res2['link_b'] link_c = res2['link_c'] link_d = res2['link_d'] link_e = res2['link_e'] trailer = res2['trailer'] xml += display_xml(name2,trailer,summary,thumbnail,fanart,link_a,link_b,link_c,link_d,link_e) except: pass try: match4 = at2.search("name", item2) for field2 in match4: res2 = field2['fields'] name2 = res2["name"] name3 = remove_non_ascii(name2) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) link_a = res2['link_a'] link_b = res2['link_b'] link_c = res2['link_c'] link_d = res2['link_d'] link_e = res2['link_e'] trailer = res2['trailer'] xml += display_xml(name2,trailer,summary,thumbnail,fanart,link_a,link_b,link_c,link_d,link_e) except: pass try: match6 = at3.search("name", item2) for field2 in match6: res2 = field2['fields'] name2 = res2["name"] name3 = remove_non_ascii(name2) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) link1 = res2['link1'] link2 = res2['link2'] link3 = res2['link3'] link4 = res2['link4'] trailer = res2['trailer'] if link2 == "-": xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb></imdb>"\ "<title></title>"\ "<year></year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s(Trailer)</sublink>"\ "</link>"\ "</item>" % (name2,thumbnail,fanart,summary,link1,trailer) elif link3 == "-": xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb></imdb>"\ "<title></title>"\ "<year></year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s(Trailer)</sublink>"\ "</link>"\ "</item>" % (name2,thumbnail,fanart,summary,link1,link2,trailer) elif link4 == "-": xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb></imdb>"\ "<title></title>"\ "<year></year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s(Trailer)</sublink>"\ "</link>"\ "</item>" % (name2,thumbnail,fanart,summary,link1,link2,link3,trailer) else: xml += "<item>"\ "<title>%s</title>"\ "<meta>"\ "<content>movie</content>"\ "<imdb></imdb>"\ "<title></title>"\ "<year></year>"\ "<thumbnail>%s</thumbnail>"\ "<fanart>%s</fanart>"\ "<summary>%s</summary>"\ "</meta>"\ "<link>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s</sublink>"\ "<sublink>%s(Trailer)</sublink>"\ "</link>"\ "</item>" % (name2,thumbnail,fanart,summary,link1,link2,link3,link4,trailer) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
def musicmp3ru(url): req_url = BASE_URL response_key = None __builtin__.content_type = "files" if url.startswith("artist"): artist = url.split("/")[1] if url.endswith("info"): req_url += "?method=artist.getinfo&artist=%s" % artist elif "albums" in url: req_url += "?method=artist.gettopalbums&artist=%s" % artist response_key = "topalbums" __builtin__.content_type = "albums" elif "tracks" in url: req_url += "?method=artist.gettoptracks&artist=%s" % artist response_key = "toptracks" __builtin__.content_type = "songs" elif url.startswith("album"): splitted = url.split("/") artist = splitted[1] album = splitted[2] if splitted[-1] == "tracks": req_url += "?method=album.getinfo&artist=%s&album=%s" % (artist, album) response_key = "album" __builtin__.content_type = "songs" elif url.startswith("chart"): if "artists" in url: req_url += "?method=chart.gettopartists" response_key = "artists" __builtin__.content_type = "artists" elif "tracks" in url: req_url += "?method=chart.gettoptracks" response_key = "tracks" __builtin__.content_type = "songs" elif "tags" in url: req_url += "?method=chart.gettoptags" response_key = "tags" elif url.startswith("tag"): splitted = url.split("/") tag = splitted[1] if splitted[-1] == "tracks": req_url += "?method=tag.gettoptracks&tag=%s" % tag response_key = "tracks" __builtin__.content_type = "songs" elif splitted[-1] == "artists": req_url += "?method=tag.gettopartists&tag=%s" % tag response_key = "topartists" __builtin__.content_type = "artists" elif splitted[-1] == "albums": req_url += "?method=tag.gettopalbums&tag=%s" % tag response_key = "albums" __builtin__.content_type = "albums" elif url.startswith("search"): splitted = url.split("/") if splitted[-1] == "artist": term = koding.Keyboard("Search For Artist") response_key = "results" req_url += "?method=artist.search&artist=%s" % term elif splitted[-1] == "album": term = koding.Keyboard("Search For Album") response_key = "results" req_url += "?method=album.search&album=%s" % term req_url += "&api_key=%s&format=json" % LASTFM_API_KEY last = url.split("/")[-1] if last.isdigit(): req_url += "&page=%s" % last # xml = fetch_from_db(url) # if not xml: xml = "" response = requests.get(req_url) response = response.json() if response_key: response = response[response_key] for key in response: if key == "album": for album in response["album"]: xml += get_album_xml(album) elif key == "tracks": images = response["image"] try: image = images[-1]["#text"] except Exception: image = "" for track in response["tracks"]["track"]: xml += get_track_xml(track, image) elif key == "track": for track in response["track"]: xml += get_track_xml(track) elif key == "artist" and "artist" in url: for artist in response["artist"]: xml += get_artist_xml(artist) elif key == "tag": for tag in response["tag"]: xml += get_tag_xml(tag) elif key == "artistmatches": matches = response['artistmatches'] for artist in matches['artist']: xml += get_artist_xml(artist) elif key == "albummatches": matches = response['albummatches'] for album in matches['album']: xml += get_search_album_xml(album) try: if "@attr" in response: pages = int(response["@attr"]["totalPages"]) else: pages = 1 if pages > 1: current_page = int(response["@attr"]["page"]) if current_page < pages: last = url.split("/")[-1] if last.isdigit(): next_url = "/".join(url.split("/")[:-1]) else: next_url = url next_url += "/%s" % str(current_page + 1) xml += "<dir>\n"\ "\t<title>Next Page >></title>\n"\ "\t<musicmp3ru>%s</musicmp3ru>\n"\ "\t<summary>Go To Page %s</summary>\n"\ "</dir>" % (next_url, current_page + 1) except: pass xml = remove_non_ascii(xml) #save_to_db(xml, url) jenlist = JenList(xml) display_list(jenlist.get_list(), __builtin__.content_type)
def Configure_Menus(menutype='HOME_'): shortcut_file = { String(30061): 'x606.DATA.xml', String(30077): 'x303.DATA.xml', String(30062): 'x404.DATA.xml', String(30063): 'x505.DATA.xml', String(30064): 'x8.DATA.xml', String(30065): 'x12.DATA.xml', String(30066): 'x10.DATA.xml', String(30067): 'x3.DATA.xml', String(30068): 'x1.DATA.xml', String(30069): 'x2.DATA.xml', String(30070): 'x202.DATA.xml', String(30071): 'x.7.DATA.xml', String(30072): 'x11.DATA.xml', String(30073): 'x4.DATA.xml', String(30074): 'x6.DATA.xml', String(30075): 'x13.DATA.xml' } master_list = [ String(30061), String(30077), String(30062), String(30063), String(30064), String(30065), String(30066), String(30067), String(30068), String(30069), String(30070), String(30071), String(30072), String(30073), String(30074), String(30075), String(30504) ] choice = dialog.select(String(30294), sorted(master_list)) if choice >= 0: menu = sorted(master_list)[choice] # If it's reset all to factory defaults if menu == String(30504): Reset_Factory(menutype) else: new_list = [ String(30295) % menu, String(30296) % menu, String(30297), String(30298), String(30299) ] choice = dialog.select(String(30303), new_list) if choice >= 0: my_setting = menutype + menu.upper().replace(' ', '_') dialog_plus_user = os.path.join( redirects, '%s_DIALOG_PLUS_USER' % my_setting) dialog_user = os.path.join(redirects, '%s_DIALOG_USER' % my_setting) exec_user = os.path.join(redirects, '%s_EXEC_USER' % my_setting) sf_user = os.path.join(redirects, '%s_SF_USER' % my_setting) delete_array = [ dialog_plus_user, dialog_user, exec_user, sf_user ] if choice == 0: my_shortcut = my_setting + '_SF' if menutype == 'HOME_': Addon_Setting(setting=my_setting, value='super_faves_user') for item in delete_array: if not my_shortcut in item and os.path.exists( item): os.remove(item) if not os.path.exists(sf_user): Text_File(sf_user, 'w', '') else: dialog.ok(String(30308), String(30309)) return if choice == 1: my_shortcut = my_setting + '_DIALOG_PLUS_USER' if menutype == 'HOME_': dolog('Setting %s to: dialog_plus_user' % (my_setting)) Addon_Setting(setting=my_setting, value='dialog_plus_user') for item in delete_array: if not my_shortcut in item and os.path.exists( item): os.remove(item) if not os.path.exists(dialog_plus_user): Text_File(dialog_plus_user, 'w', '') else: dialog.ok(String(30308), String(30309)) return if choice == 2: my_shortcut = my_setting + '_DIALOG_USER' if menutype == 'HOME_': dolog('Setting %s to: dialog_user' % (my_setting)) Addon_Setting(setting=my_setting, value='dialog_user') for item in delete_array: if not my_shortcut in item and os.path.exists( item): os.remove(item) if not os.path.exists(dialog_user): Text_File(dialog_user, 'w', '') else: dialog.ok(String(30308), String(30309)) return if choice == 3: my_shortcut = my_setting + '_EXEC_USER' if menutype == 'HOME_': dolog('Setting %s to: executable_user' % (my_setting)) Addon_Setting(setting=my_setting, value='executable_user') for item in delete_array: if not my_shortcut in item and os.path.exists( item): os.remove(item) if not os.path.exists(exec_user): my_code = koding.Keyboard( 'Enter the command you want to run') if not my_code: return Text_File(exec_user, 'w', my_code) else: dialog.ok(String(30308), String(30309)) return if choice == 4: if menutype == 'HOME_': dolog('Setting %s to defaults' % (my_setting)) Default_Setting(setting=my_setting, reset=True) # Find the relevant home menu and clear the contents for item in os.listdir(redirects): if item.startswith(my_setting) and item.endswith( '_USER'): delete_path = os.path.join(redirects, item) try: os.remove(delete_path) except: dolog('FAILED TO REMOVE: %s' % delete_path) if os.path.exists( xbmc.translatePath( 'special://home/addons/script.openwindow/default.py' )): xbmc.executebuiltin( 'RunScript(special://home/addons/script.openwindow/default.py,update_shares)' ) elif os.path.exists( xbmc.translatePath( 'xbmc://home/addons/script.openwindow/default.py' )): xbmc.executebuiltin( 'RunScript(xbmc://home/addons/script.openwindow/default.py,update_shares)' ) else: try: os.remove( os.path.join(skin_shortcuts, shortcut_file[menu])) os.remove( os.path.join(skin_shortcuts, current_skin + '.hash')) except: xbmc.log('FAILED TO DEFAULT SKIN SETTINGS: %s' % koding.Last_Error()) for item in delete_array: if os.path.exists(item): os.remove(item) koding.Refresh('skin') # If it's a submenu we try and add to the skinshortcuts and give it a name if menutype == 'SUBMENU_' and choice != 4: sub_name = koding.Keyboard( 'Enter a name for this sub-menu item') if not sub_name: return Edit_Sub_Menu(shortcut=my_shortcut, sub_name=sub_name, shortcut_file=shortcut_file[menu]) else: Main_Menu_Check()
def open_bml_search(): xml = "" pins = "" show = koding.Keyboard(heading='Station Name or Number') movie_list = [] at = Airtable('appEQMKxvYhvxB6fY', 'Radio Stations', api_key='keyikW1exArRfNAWj') match = at.get_all(maxRecords=1200, sort=['name']) for field in match: res = field['fields'] name = res['name'] movie_list.append(name) at2 = Airtable('appjrEMH0kEoM8GeQ', 'Radio Stations 2', api_key='keyikW1exArRfNAWj') match2 = at2.get_all(maxRecords=1200, sort=['name']) for field2 in match2: res2 = field2['fields'] name2 = res2['name'] movie_list.append(name2) at3 = Airtable('appgveAIgb4kfsMoe', 'Radio Stations 3', api_key='keyikW1exArRfNAWj') match3 = at3.get_all(maxRecords=1200, sort=['name']) for field3 in match3: res3 = field3['fields'] name3 = res3['name'] movie_list.append(name3) at4 = Airtable('appWZAAh8GXRyo6SN', 'Radio Stations 4', api_key='keyikW1exArRfNAWj') match4 = at4.get_all(maxRecords=1200, sort=['name']) for field4 in match4: res4 = field4['fields'] name4 = res4['name'] movie_list.append(name4) at5 = Airtable('appgqVj0DBZjvGtrt', 'Radio Stations 5', api_key='keyikW1exArRfNAWj') match5 = at5.get_all(maxRecords=1200, sort=['name']) for field5 in match5: res5 = field5['fields'] name5 = res5['name'] movie_list.append(name5) at6 = Airtable('appWSAjWWuFRZ2cZb', 'Radio Stations 6', api_key='keyikW1exArRfNAWj') match6 = at6.get_all(maxRecords=1200, sort=['name']) for field6 in match6: res6 = field6['fields'] name6 = res6['name'] movie_list.append(name6) at7 = Airtable('app41uxtTRva32pvc', 'Radio Stations 7', api_key='keyikW1exArRfNAWj') match7 = at7.get_all(maxRecords=1200, sort=['name']) for field7 in match7: res7 = field7['fields'] name7 = res7['name'] movie_list.append(name7) search_result = koding.Fuzzy_Search(show, movie_list) if not search_result: xbmc.log("--------no results--------", level=xbmc.LOGNOTICE) xml += "<item>"\ "<title>[COLOR=orange][B]Station was not found[/B][/COLOR]</title>"\ "</item>" jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type(), pins) for item in search_result: item2 = str(item) item2 = remove_non_ascii(item2) try: match2 = at.search('name', item2) for field2 in match2: res2 = field2['fields'] name = res2['name'] name = remove_non_ascii(name) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) summary = clean_summary(summary) link1 = res2['link1'] link2 = res2['link2'] link3 = res2['link3'] link4 = res2['link4'] link5 = res2['link5'] xml += display_xml(name, summary, thumbnail, fanart, link1, link2, link3, link4, link5) except: pass try: match2 = at2.search('name', item2) for field2 in match2: res2 = field2['fields'] name = res2['name'] name = remove_non_ascii(name) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) summary = clean_summary(summary) link1 = res2['link1'] link2 = res2['link2'] link3 = res2['link3'] link4 = res2['link4'] link5 = res2['link5'] xml += display_xml(name, summary, thumbnail, fanart, link1, link2, link3, link4, link5) except: pass try: match2 = at3.search('name', item2) for field2 in match2: res2 = field2['fields'] name = res2['name'] name = remove_non_ascii(name) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) summary = clean_summary(summary) link1 = res2['link1'] link2 = res2['link2'] link3 = res2['link3'] link4 = res2['link4'] link5 = res2['link5'] xml += display_xml(name, summary, thumbnail, fanart, link1, link2, link3, link4, link5) except: pass try: match2 = at4.search('name', item2) for field2 in match2: res2 = field2['fields'] name = res2['name'] name = remove_non_ascii(name) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) summary = clean_summary(summary) link1 = res2['link1'] link2 = res2['link2'] link3 = res2['link3'] link4 = res2['link4'] link5 = res2['link5'] xml += display_xml(name, summary, thumbnail, fanart, link1, link2, link3, link4, link5) except: pass try: match2 = at5.search('name', item2) for field2 in match2: res2 = field2['fields'] name = res2['name'] name = remove_non_ascii(name) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) summary = clean_summary(summary) link1 = res2['link1'] link2 = res2['link2'] link3 = res2['link3'] link4 = res2['link4'] link5 = res2['link5'] xml += display_xml(name, summary, thumbnail, fanart, link1, link2, link3, link4, link5) except: pass try: match2 = at6.search('name', item2) for field2 in match2: res2 = field2['fields'] name = res2['name'] name = remove_non_ascii(name) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) summary = clean_summary(summary) link1 = res2['link1'] link2 = res2['link2'] link3 = res2['link3'] link4 = res2['link4'] link5 = res2['link5'] xml += display_xml(name, summary, thumbnail, fanart, link1, link2, link3, link4, link5) except: pass try: match2 = at7.search('name', item2) for field2 in match2: res2 = field2['fields'] name = res2['name'] name = remove_non_ascii(name) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) summary = clean_summary(summary) link1 = res2['link1'] link2 = res2['link2'] link3 = res2['link3'] link4 = res2['link4'] link5 = res2['link5'] xml += display_xml(name, summary, thumbnail, fanart, link1, link2, link3, link4, link5) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
def trakt(url): if url == "search": term = koding.Keyboard("Search For") url = "https://api.trakt.tv/search/movie,show,person,list?query=%s" % term headers = { 'Content-Type': 'application/json', 'trakt-api-version': '2', 'trakt-api-key': TRAKT_API_KEY } if "sync" in url or "user" in url or "recommendations" in url: if "list" not in url or "/me/" in url or "like" in url or "sync" in url: auth = authenticate() if auth: headers['Authorization'] = 'Bearer ' + auth else: return "" pages = None xml, __builtin__.content_type = fetch_from_db(url) or (None, None) if not xml: xml = "" response = requests.get(url, headers=headers) response_headers = response.headers response = response.json() page = response_headers.get("X-Pagination-Page", "") if page: pages = response_headers.get("X-Pagination-Page-Count") response = (response, pages) if type(response) == tuple: # paginated pages = response[1] response = response[0] __builtin__.content_type = "files" if type(response) == dict: if "people" in url: for job in response: for item in response[job]: if "movie" in item: xml += get_movie_xml(item["movie"]) __builtin__.content_type = "movies" elif "show" in item: xml += get_show_xml(item["show"]) __builtin__.content_type = "tvshows" elif type(response) == list: for item in response: if "/search/" in url: xml += get_search_xml(item) elif "lists" in url: if "items" not in url and "likes" not in url: user_id = url.split("/")[4] xml += get_lists_xml(item, user_id) if "likes/lists" in url: xml += get_likes_xml(item) if "movie" in item: xml += get_movie_xml(item["movie"]) __builtin__.content_type = "movies" elif "show" in item: xml += get_show_xml(item["show"]) __builtin__.content_type = "tvshows" elif "person" in item: xml += get_person_xml(item) else: # one of the annoying types if "movies" in url: xml += get_movie_xml(item) __builtin__.content_type = "movies" elif "shows" in url and "season" not in url: xml += get_show_xml(item) __builtin__.content_type = "tvshows" if pages: splitted = url.split("?") if len(splitted) > 1: args = urlparse.parse_qs(splitted[1]) page = int(args.get("page", [1])[0]) if not args.get("page", ""): args["page"] = 2 else: args["page"] = str(page + 1) next_url = "%s?%s" % (splitted[0], urllib.urlencode(args)) else: page = 1 next_url = urlparse.urljoin(splitted[0], "?page=2") xml += "<dir>\n"\ "\t<title>Next Page >></title>\n"\ "\t<trakt>%s</trakt>\n"\ "\t<summary>Go To Page %s</summary>\n"\ "</dir>" % (next_url, page + 1) xml = remove_non_ascii(xml) save_to_db((xml, __builtin__.content_type), url) jenlist = JenList(xml) display_list(jenlist.get_list(), __builtin__.content_type)
def open_bml_search(): xml = "" show = koding.Keyboard(heading='Movie Name') movie_list = [] at = Airtable('appJh8Kyj5UkERsUT', 'Radio Stations', api_key='keyikW1exArRfNAWj') match = at.get_all(maxRecords=1200, sort=['name']) for field in match: res = field['fields'] name = res['name'] movie_list.append(name) at2 = Airtable('appkEDsIy1skg0rBH', 'Radio Stations 2', api_key='keyikW1exArRfNAWj') match2 = at2.get_all(maxRecords=1200, sort=['name']) for field2 in match2: res2 = field2['fields'] name2 = res2['name'] movie_list.append(name2) at3 = Airtable('appNcFWTkprAJiizT', 'Radio Stations 3', api_key='keyikW1exArRfNAWj') match3 = at3.get_all(maxRecords=1200, sort=['name']) for field3 in match3: res3 = field3['fields'] name3 = res3['name'] movie_list.append(name3) at4 = Airtable('appKUY6MYlvQQO51W', 'Radio Stations 4', api_key='keyikW1exArRfNAWj') match4 = at4.get_all(maxRecords=1200, sort=['name']) for field4 in match4: res4 = field4['fields'] name4 = res4['name'] movie_list.append(name4) at5 = Airtable('appfWHupyJXhgvaum', 'Radio Stations 5', api_key='keyikW1exArRfNAWj') match5 = at5.get_all(maxRecords=1200, sort=['name']) for field5 in match5: res5 = field5['fields'] name5 = res5['name'] movie_list.append(name5) at6 = Airtable('appODokGNYAShltUj', 'Radio Stations 6', api_key='keyikW1exArRfNAWj') match6 = at6.get_all(maxRecords=1200, sort=['name']) for field6 in match6: res6 = field6['fields'] name6 = res6['name'] movie_list.append(name6) at7 = Airtable('appFvuCrqLynvzDup', 'Radio Stations 7', api_key='keyikW1exArRfNAWj') match7 = at7.get_all(maxRecords=1200, sort=['name']) for field7 in match7: res7 = field7['fields'] name7 = res7['name'] movie_list.append(name7) search_result = koding.Fuzzy_Search(show, movie_list) if not search_result: xbmc.log("--------no results--------", level=xbmc.LOGNOTICE) xml += "<item>"\ "<title>[COLOR=orange][B]Movie was not found[/B][/COLOR]</title>"\ "</item>" jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type()) for item in search_result: item2 = str(item) item2 = remove_non_ascii(item2) try: match2 = at.search('name', item2) for field2 in match2: res2 = field2['fields'] name = res2['name'] name = remove_non_ascii(name) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) link1 = res2['link1'] link2 = res2['link2'] link3 = res2['link3'] link4 = res2['link4'] link5 = res2['link5'] xml += display_xml(name, summary, thumbnail, fanart, link1, link2, link3, link4, link5) except: pass try: match2 = at2.search('name', item2) for field2 in match2: res2 = field2['fields'] name = res2['name'] name = remove_non_ascii(name) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) link1 = res2['link1'] link2 = res2['link2'] link3 = res2['link3'] link4 = res2['link4'] link5 = res2['link5'] xml += display_xml(name, summary, thumbnail, fanart, link1, link2, link3, link4, link5) except: pass try: match2 = at3.search('name', item2) for field2 in match2: res2 = field2['fields'] name = res2['name'] name = remove_non_ascii(name) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) link1 = res2['link1'] link2 = res2['link2'] link3 = res2['link3'] link4 = res2['link4'] link5 = res2['link5'] xml += display_xml(name, summary, thumbnail, fanart, link1, link2, link3, link4, link5) except: pass try: match2 = at4.search('name', item2) for field2 in match2: res2 = field2['fields'] name = res2['name'] name = remove_non_ascii(name) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) link1 = res2['link1'] link2 = res2['link2'] link3 = res2['link3'] link4 = res2['link4'] link5 = res2['link5'] xml += display_xml(name, summary, thumbnail, fanart, link1, link2, link3, link4, link5) except: pass try: match2 = at5.search('name', item2) for field2 in match2: res2 = field2['fields'] name = res2['name'] name = remove_non_ascii(name) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) link1 = res2['link1'] link2 = res2['link2'] link3 = res2['link3'] link4 = res2['link4'] link5 = res2['link5'] xml += display_xml(name, summary, thumbnail, fanart, link1, link2, link3, link4, link5) except: pass try: match2 = at6.search('name', item2) for field2 in match2: res2 = field2['fields'] name = res2['name'] name = remove_non_ascii(name) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) link1 = res2['link1'] link2 = res2['link2'] link3 = res2['link3'] link4 = res2['link4'] link5 = res2['link5'] xml += display_xml(name, summary, thumbnail, fanart, link1, link2, link3, link4, link5) except: pass try: match2 = at7.search('name', item2) for field2 in match2: res2 = field2['fields'] name = res2['name'] name = remove_non_ascii(name) fanart = res2['fanart'] thumbnail = res2['thumbnail'] summary = res2['summary'] summary = remove_non_ascii(summary) link1 = res2['link1'] link2 = res2['link2'] link3 = res2['link3'] link4 = res2['link4'] link5 = res2['link5'] xml += display_xml(name, summary, thumbnail, fanart, link1, link2, link3, link4, link5) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())