def piratebay_page(root, page): import re from bs4 import BeautifulSoup from urlparse import urljoin from stream.utils import url_get page = int(page) html_data = url_get(urljoin(BASE_URL, "%s/%d/7/100,200,500" % (root, page)), headers=HEADERS) soup = BeautifulSoup(html_data, "html5lib") nodes = soup.findAll("div", "detName") for node in nodes: seeds, peers = map(lambda x: x.text, node.parent.parent.findAll("td")[2:]) magnet_node = node.parent.findAll("a")[1] desc_node = node.parent.findAll("font", "detDesc")[0] size = re.search("Size (.*?),", desc_node.text).group(1) text = "%s (%s S:%s P:%s)" % (node.a.text, size.replace(" ", " "), seeds, peers) yield { "label": text, "path": plugin.url_for("play", uri=magnet_node["href"]), "is_playable": True, } yield { "label": ">> Next page", "path": plugin.url_for("piratebay_page", root=root, page=page + 1), "is_playable": False, }
def espoiler_ver_serie(titulo): print "espoiler_ver_serie %s" % titulo plugin.set_content("episodes") html_data = s.get( BASE_URL+"ficha/"+titulo ) soup = BeautifulSoup(html_data.content, "html5lib") for node in soup.findAll('div',attrs={'class': re.compile(r".*\bepisodio\b.*")}): print node if node.div.input.has_attr('value'): #print node.div.input['value'] divTitulo = node.findAll('div',attrs={'class': re.compile(r".*\btitulo\b.*")})[0].get_text() visto = node.findAll('button',attrs={'class': re.compile(r".*\bvisto\b.*")})[0]['data-visto'] playcount = 0 if visto=='no' else 2 print visto + " " + str(playcount) contextMenu = ("Marcar como visto", "XBMC.RunPlugin(%s)" % plugin.url_for("espoiler_marcar_visto", idEpisodio=node.div.input['value'], accion='visto' )) if playcount > 0: contextMenu = ("Marcar como NO visto", "XBMC.RunPlugin(%s)" % plugin.url_for("espoiler_marcar_visto", idEpisodio=node.div.input['value'], accion='noVisto' )) yield { 'label': '%s - %s' % (node['id'],divTitulo), 'path': plugin.url_for("espoiler_ver_fuentes", capitulo=node.div.input['value']), 'is_playable': False, 'context_menu': [ contextMenu ], 'info':{ "episode": "la madre que lo pario", 'playcount': playcount } }
def espoiler_calendario(dia=0): from datetime import date,timedelta dia = int(dia) hoy = date.today() un_dia = timedelta(days=1) hoy = hoy + un_dia*dia yield { 'label': 'espoilerTV Inicio' , 'path': plugin.url_for("espoiler_index"), 'is_playable': False } yield { 'label': '<<< ' + (hoy-un_dia).isoformat() , 'path': plugin.url_for("espoiler_calendario", dia=dia-1), 'is_playable': False } r = s.get( BASE_URL+"api/v1/calendario?fecha="+hoy.isoformat() ) dayCalendar = json.loads(r.content) for serie in dayCalendar['series']: yield { 'label': '%s (S%sE%s)' % (serie['titulo'], serie['temporada'].zfill(2), serie['episodio'].zfill(2)), 'path': plugin.url_for("espoiler_ver_serie", titulo=serie['titBase']), 'is_playable': False } yield { 'label': '>>> '+(hoy+un_dia).isoformat() , 'path': plugin.url_for("espoiler_calendario", dia=dia+1), 'is_playable': False }
def _fn(*a, **kwds): items = fn(*a, **kwds) if items is not None: for item in items: if item.get("is_playable"): label = item["label"].encode("utf-8") item.setdefault("context_menu", []).extend([ ("Add to Movies", "XBMC.RunPlugin(%s)" % plugin.url_for("library_add", content_type="movies", label=label, href=item["path"])), ("Add to TV", "XBMC.RunPlugin(%s)" % plugin.url_for("library_add", content_type="tvshows", label=label, href=item["path"])), ]) yield item
def make_cats(root, prefix=""): for cat in root: if cat[1] == 207: yield { "label": "%s%s" % (prefix, cat[0]), "path": plugin.url_for("piratebay_page2", root=cat[1], page=0), } else: yield { "label": "%s%s" % (prefix, cat[0]), "path": plugin.url_for("piratebay_page", root="/browse/%d" % cat[1], page=0), } if len(cat) > 2: for entry in make_cats(cat[2], prefix="%s " % prefix): yield entry
def mejorenvo_index(): print "mejor en VO!" plugin.set_content("movies") from bs4 import BeautifulSoup from stream.utils import url_get from stream import tmdb import re def filter_Desgargar(el): return el.has_attr('href') and '-pelicula-' in el['href'] and not 'comentarios' in el['href'] for url in ["http://www.mejorenvo.com/descargar-peliculas.html","http://www.mejorenvo.com/peliculas-p2.html"]: html_data = url_get( url, headers=HEADERS) soup = BeautifulSoup(html_data, "html5lib") nodes = soup.findAll(filter_Desgargar) for node in nodes: id = re.search(r'pelicula-(\d+).html', node['href']).group(1) torrent_url = 'http://www.mejorenvo.com/descargar.php?t=peliculas&id=' + id + '&torrent=1' title_year = node.get_text() title = node.stripped_strings.next() info = tmdb.search( title ) info = info['results'][0] print title print info item = tmdb.get_list_item(info) item['label'] = '%s (%f)' % (info['title'], info['vote_average']) item['path'] = plugin.url_for("play", uri=torrent_url) item['is_playable']=True yield item
def popcorn_get_show(show_id): import json from stream.utils import url_get response = url_get("%sshow/%s" % (BASE_URL,show_id), headers=HEADERS) show = json.loads(response) #print show items = [] for episode in show['episodes']: item={} item["info"]={} item["info"]["tvshowtitle"]= show['title'] item["info"]["title"]= episode["title"] item["info"]["season"]= episode["season"] item["info"]["episode"]= episode["episode"] item["info"]["premiered"]= episode["first_aired"] item["icon"]= show['images']['poster'] item["thumbnail"]= show['images']['poster'] item['label'] = "S%02dE%02d %s" % (episode['season'], episode['episode'], episode["title"]) item["info"]['orden'] = 100 * episode['season'] + episode['episode'] item["path"]= plugin.url_for("play", uri=episode['torrents']['0']['url']) item["stream_info"]= {"video": {"video": 'x264'} } item["is_playable"]= True item['properties'] = {} item['properties']['fanart_image'] = show['images']['fanart'] items.append(item) return sorted(items, key=lambda a: a['info']['orden'] )
def popcorn_index(): import json from stream.utils import url_get with shelf("popcorn.shows") as popcorn_shows: #print popcorn_shows if not popcorn_shows: pages = json.loads( url_get("%sshows" % BASE_URL, headers=HEADERS) ) popcorn_shows['shows'] = [] for page in pages: print "%s%s" % (BASE_URL,page) response = url_get("%s%s" % (BASE_URL,page), headers=HEADERS) if response=="": continue page_shows = json.loads(response) for show in page_shows: popcorn_shows['shows'].append( { "label": show['title'], "icon": show['images']['poster'], "thumbnail": show['images']['poster'], "path": plugin.url_for("popcorn_get_show", show_id=show['_id'] ), "is_playable":False, 'properties': {'fanart_image': show['images']['fanart'] } } ) #print popcorn_shows return popcorn_shows['shows']
def eztv_index(): import string for letter in ["0-9"] + list(string.ascii_uppercase): yield { "label": letter, "path": plugin.url_for("eztv_shows_by_letter", letter=letter), "is_playable": False, }
def yify_search(): query = plugin.request.args.get("query") if query: query = query[0] else: query = plugin.keyboard("", "stream - YIFY - Search") if query: plugin.redirect(plugin.url_for("yify_search_query", keywords=query, quality="all", set=1, limit=MOVIES_PER_PAGE))
def make_cats(root, prefix=""): for cat in root: yield { "label": "%s%s" % (prefix, cat[0]), "path": plugin.url_for("piratebay_page", root="/browse/%d" % cat[1], page=0), } if len(cat) > 2: for entry in make_cats(cat[2], prefix="%s " % prefix): yield entry
def index(): if PLATFORM["os"] not in ["android", "linux", "windows", "darwin"]: plugin.notify("Your system \"%(os)s_%(arch)s\" is not supported. Sorry about that." % PLATFORM, delay=15000) for module in MODULES: yield { "label": module["name"], "thumbnail": module["image"], "path": plugin.url_for(module["view"]), }
def piratebay_search(): import urllib query = plugin.request.args.get("query") if query: query = query[0] else: query = plugin.keyboard("", "The Pirate Bay - Search") if query: plugin.redirect(plugin.url_for("piratebay_page", root="/search/%s" % urllib.quote(query, safe=""), page=0))
def piratebay_page(root, page): import xbmc import xbmcgui from concurrent import futures from contextlib import nested, closing from bs4 import BeautifulSoup from urlparse import urljoin from stream.utils import url_get from itertools import izip, chain from stream.utils import url_get_json, terminating, SafeDialogProgress with closing(SafeDialogProgress(delay_close=0)) as dialog: dialog.create(plugin.name) dialog.update(percent=0, line1="Fetching index page...", line2="", line3="") page = int(page) try: html_data = url_get(urljoin(BASE_URL, "%s/%d/7/100,200,500" % (root, page)), headers=HEADERS) except: dialog = xbmcgui.Dialog() dialog.ok("Piratebay","Timeout") return soup = BeautifulSoup(html_data, "html5lib") nodes = soup.findAll("div", "detName") state = {"done": 0} def on_movie(future): data = future.result() state["done"] += 1 dialog.update( percent=int(state["done"] * 100.0 / len(nodes)), line2=data.get("label") or "", ) dialog.update(percent=0, line1="Fetching movie information...", line2="", line3="") with futures.ThreadPoolExecutor(max_workers=10) as pool_tmdb: tmdb_list = [pool_tmdb.submit(piratebay_record, node) for node in nodes] [future.add_done_callback(on_movie) for future in tmdb_list] while not all(job.done() for job in tmdb_list): if dialog.iscanceled(): return xbmc.sleep(200) tmdb_list = map(lambda job: job.result(), tmdb_list) for node, item in izip(nodes, tmdb_list): yield node.item yield { "label": ">> Next page", "path": plugin.url_for("piratebay_page", root=root, page=page + 1), "is_playable": False, }
def espoiler_index(): print "espoilerTV!" plugin.set_content("episodes") yield { "label": ">> Calendario", "path": plugin.url_for("espoiler_calendario", dia=0), 'is_playable': False } r = s.get(BASE_URL + 'api/v1/mitv?grupo=porVer') mitv = json.loads(r.content) for serie in mitv['series']: print serie['titulo'] print plugin.url_for("espoiler_ver_fuentes", capitulo=serie['idEpisodio']) item = {} item['label'] = '%s (S%sE%s)' % (serie['titulo'], serie['temporada'].zfill(2), serie['episodio'].zfill(2)) item['path'] = plugin.url_for("espoiler_ver_serie", titulo=serie['titBase']) item['is_playable'] = False item['replace_context_menu'] = True yield item
def espoiler_play( url ): print "espoiler_play %s" % url html_data = url_get( url, headers=HEADERS) soup = BeautifulSoup(html_data, "html5lib") def filter_Magnet(el): return el.has_attr('href') and 'magnet:' in el['href'] nodes = soup.findAll(filter_Magnet) for node in nodes: yield { 'label': '%s' % node['href'], 'path': plugin.url_for("play", uri=node['href']), 'is_playable': True }
def piratebay_index(): yield {"label": "Search", "path": plugin.url_for("piratebay_search")} def make_cats(root, prefix=""): for cat in root: yield { "label": "%s%s" % (prefix, cat[0]), "path": plugin.url_for("piratebay_page", root="/browse/%d" % cat[1], page=0), } if len(cat) > 2: for entry in make_cats(cat[2], prefix="%s " % prefix): yield entry for cat in make_cats(CATEGORIES): yield cat
def espoiler_ver_fuentes(capitulo): r = s.get(BASE_URL+"serv/asincrono/enlaces.php?id="+capitulo) info = json.loads(r.content) """ yield { 'label': '%s (S%sE%s)' % (info['titSerie'], info['temporada'].zfill(2), info['episodio'].zfill(2)), 'path': plugin.url_for("espoiler_ver_fuentes", capitulo ), 'is_playable':False } """ for fuente in info['vid']: yield { 'label': '%s (%s,%s)' % (fuente['dominio'], fuente['descargas'], fuente['reportes']), 'path': plugin.url_for("espoiler_play", url=fuente['url']), 'is_playable': False }
def eztv_get_show_seasons(show_id): import random from bs4 import BeautifulSoup from itertools import groupby from concurrent import futures from stream.utils import first, terminating, url_get from stream import tvdb plugin.set_content("seasons") tvdb_id = first(plugin.request.args.get("tvdb_id")) with futures.ThreadPoolExecutor(max_workers=2) as pool: def _eztv_get_show(): plugin.log.info("Getting show") response = url_get("%s/shows/%s/" % (BASE_URL, show_id), headers=HEADERS) plugin.log.info("Got show") return BeautifulSoup(response, "html5lib") soup = pool.submit(_eztv_get_show) if tvdb_id: tvdb_show = pool.submit(tvdb.get_all_meta, plugin.request.args["tvdb_id"][0]) soup = soup.result() fanarts = [] if tvdb_id: tvdb_show = tvdb_show.result() fanarts = list([banner for banner in tvdb_show["banners"] if banner["bannertype"] == "fanart"]) random.shuffle(fanarts) seasons = {} for node_episode in soup.findAll("a", "epinfo"): season, episode = get_episode_data_from_name(node_episode.text) seasons.setdefault(season, {})[episode] = True for i, season in enumerate(reversed(sorted(seasons.keys()))): item = tvdb_id and tvdb.get_season_list_item(tvdb_show, season) or {} item.update({ "label": "Season %d [%d episodes]" % (season, len(seasons[season])), "path": plugin.url_for("eztv_get_episodes_for_season", show_id=show_id, season=season, tvdb_id=tvdb_id), }) if fanarts: item.setdefault("properties", {}).update({ "fanart_image": fanarts[i % len(fanarts)]["bannerpath"], }) yield item
def piratebay_record(node): import re from stream.utils import url_get from urlparse import urljoin from stream import tmdb node.seeds, node.peers = map(lambda x: x.text, node.parent.parent.findAll("td")[2:]) node.magnet_node = node.parent.findAll("a")[1] node.desc_node = node.parent.findAll("font", "detDesc")[0] node.size = re.search("Size (.*?),", node.desc_node.text).group(1) node.txt = "%s (%s S:%s P:%s)" % (node.a.text, node.size.replace(" ", " "), node.seeds, node.peers) node.item = {} try: node.search_result = url_get(urljoin(BASE_URL, node.parent.findAll("a")[0]["href"]), headers=HEADERS) except: pass else: if node.search_result: try: node.imdb_url = re.search("http://www.imdb.com/title/tt[0-9]*", node.search_result).group(0) except: pass else: if node.imdb_url: node.imdb_id = re.search(r"(tt\d+)", node.imdb_url).group(0) if node.imdb_id: node.release_tags = tmdb.get_list_item(tmdb.get(node.imdb_id)) if node.release_tags: node.item.update(node.release_tags) node.item.update({ "label": node.txt, "path": plugin.url_for("play", uri=node.magnet_node["href"]), "is_playable": True, }) return node.item
def eztv_shows_by_letter(letter): import re import xbmc import xbmcgui from bs4 import BeautifulSoup from contextlib import nested, closing from itertools import izip, groupby from concurrent import futures from stream.scrapers import ungenerate from stream.utils import terminating, url_get, SafeDialogProgress from stream import tvdb with shelf("it.eztv.shows") as eztv_shows: if not eztv_shows: response = url_get("%s/showlist/" % BASE_URL, headers=HEADERS) soup = BeautifulSoup(response, "html5lib") nodes = soup.findAll("a", "thread_link") for node in nodes: show_id, show_named_id = node["href"].split("/")[2:4] show_name = node.text if show_name == "": continue print "#%s %s" , (show_id,show_name) show_first_letter = show_name[0].lower() if re.match("\d+", show_first_letter): show_first_letter = "0-9" eztv_shows.setdefault(show_first_letter, {}).update({ show_id: { "id": show_id, "named_id": show_named_id, "name": node.text, } }) shows_list = sorted(eztv_shows[letter.lower()].values(), key=lambda x: x["name"].lower()) with closing(SafeDialogProgress(delay_close=0)) as dialog: dialog.create(plugin.name) dialog.update(percent=0, line1="Fetching series...", line2="", line3="") state = {"done": 0} def on_serie(future): data = future.result() state["done"] += 1 dialog.update( percent=int(state["done"] * 100.0 / len(shows_list)), line2=data and data["seriesname"] or "", ) with futures.ThreadPoolExecutor(max_workers=5) as pool_tvdb: tvdb_list = [pool_tvdb.submit(tvdb.search, show["name"], True) for show in shows_list] [future.add_done_callback(on_serie) for future in tvdb_list] while not all(job.done() for job in tvdb_list): if dialog.iscanceled(): return xbmc.sleep(100) print shows_list tvdb_list_new = [] for job in tvdb_list: try: result = job.result() print "RESULT: (%s)" % result if result is not None: tvdb_list_new.append(result) except: None tvdb_list = tvdb_list_new #tvdb_list = [job.result() for job in tvdb_list] print tvdb_list for i, (eztv_show, tvdb_show) in enumerate(izip(shows_list, tvdb_list)): if tvdb_show: item = tvdb.get_list_item(tvdb_show) item.update({ "path": plugin.url_for("eztv_get_show_seasons", show_id=eztv_show["id"], tvdb_id=tvdb_show["id"]) }) yield item else: yield { "label": eztv_show["name"], "path": plugin.url_for("eztv_get_show_seasons", show_id=eztv_show["id"]) }
def eztv_get_episodes_for_season(show_id, season): import copy import random from bs4 import BeautifulSoup from itertools import izip from concurrent import futures from stream.utils import first, terminating, url_get from stream import tvdb plugin.set_content("episodes") season = int(season) tvdb_id = first(plugin.request.args.get("tvdb_id")) with futures.ThreadPoolExecutor(max_workers=2) as pool: def _eztv_get_show(): return BeautifulSoup(url_get("%s/shows/%s/" % (BASE_URL, show_id), headers=HEADERS), "html5lib") soup = pool.submit(_eztv_get_show) if tvdb_id: tvdb_show = pool.submit(tvdb.get_all_meta, plugin.request.args["tvdb_id"][0]) soup = soup.result() items = [] fanarts = [] if tvdb_id: tvdb_show = tvdb_show.result() fanarts = list([banner for banner in tvdb_show["banners"] if banner["bannertype"] == "fanart"]) random.shuffle(fanarts) items = list(tvdb.build_episode_list_items(tvdb_show, int(season))) text_nodes = soup.findAll("a", "epinfo") href_nodes = soup.findAll("a", "magnet") season_nodes = izip(text_nodes, href_nodes) season_nodes = filter(lambda x: get_episode_data_from_name(x[0].text)[0] == season, season_nodes) for i, (node_text, node_magnet) in enumerate(season_nodes): season, episode = get_episode_data_from_name(node_text.text) if tvdb_id and episode >= 0: item = copy.deepcopy(items[int(episode) - 1]) for pattern, suffix in (("720p", "(HD)"), ("1080p", "(FullHD)"), ("repack", "(REPACK)"), ("proper", "(PROPER)")): if pattern in node_text.text.lower(): item["label"] = "%s %s" % (item["label"], suffix) else: item = { "label": node_text.text, } item.setdefault("info", {}).update({ "tvshowtitle": node_text.text, "title": item["label"], }) stream_info = {} if "x264" in node_text.text: stream_info["codec"] = item["info"]["video_codec"] = "h264" if "xvid" in node_text.text.lower(): stream_info["codec"] = item["info"]["video_codec"] = "xvid" if "720p" in node_text.text: stream_info["width"] = 1280 stream_info["height"] = 720 if "1080p" in node_text.text: stream_info["width"] = 1920 stream_info["height"] = 1080 item.update({ "path": plugin.url_for("play", uri=node_magnet["href"]), "stream_info": {"video": stream_info}, "is_playable": True, }) if fanarts: item.setdefault("properties", {}).update({ "fanart_image": fanarts[i % len(fanarts)]["bannerpath"], }) yield item
def yify_genres(): for genre in GENRES: yield { "label": genre, "path": plugin.url_for("yify_genre", genre=genre, sort="seeds", order="desc", quality="all", set=1, limit=MOVIES_PER_PAGE), }
def piratebay_page2(root, page): import re from bs4 import BeautifulSoup from urlparse import urljoin from stream.utils import url_get from stream import tmdb page = int(page) html_data = url_get(urljoin(BASE_URL, "search/BluRay/%s/7/%s" % (page, root)), headers=HEADERS) soup = BeautifulSoup(html_data, "html5lib") nodes = soup.findAll("div", "detName") for node in nodes: seeds, peers = map(lambda x: x.text, node.parent.parent.findAll("td")[2:]) magnet_node = node.parent.findAll("a")[1] desc_node = node.parent.findAll("font", "detDesc")[0] size = re.search("Size (.*?),", desc_node.text).group(1) text = "%s (%s S:%s P:%s)" % (node.a.text, size.replace(" ", " "), seeds, peers) try: title = re.search("(.*?)(\d{4}|720p|1080p|\()", node.a.text).group(1).replace(".", " ") tmdb_list = tmdb.search(title) if len(tmdb_list["results"]) > 0: tmdb_meta = tmdb.get(tmdb_list["results"][0]["id"]) item = tmdb.get_list_item(tmdb_meta) width = 1920 height = 1080 if node.a.text.find("720p") >= 0: width = 1280 height = 720 item.update({ "path": plugin.url_for("play", uri=magnet_node["href"]), "is_playable": True, "label": "%s %sp (%s) S:%s P:%s" % (item["label"], height, size.replace(" ", " "), seeds, peers), }) audio = "aac" if node.a.text.find("DTS") >= 0: audio = "dts" item.setdefault("stream_info", {}).update({ "video": { "codec": "h264", "width": width, "height": height, }, "audio": { "codec": audio, }, }) yield item else: yield { "label": text, "path": plugin.url_for("play", uri=magnet_node["href"]), "is_playable": True, } except Exception as e: print e yield { "label": text, "path": plugin.url_for("play", uri=magnet_node["href"]), "is_playable": True, } yield { "label": ">> Next page", "path": plugin.url_for("piratebay_page2", root=root, page=page + 1), "is_playable": False, }
def eztv_shows_by_letter(letter): import re import xbmc import xbmcgui from bs4 import BeautifulSoup from contextlib import nested, closing from itertools import izip, groupby from concurrent import futures from stream.scrapers import ungenerate from stream.utils import terminating, url_get, SafeDialogProgress from stream import tvdb with shelf("it.eztv.shows") as eztv_shows: if not eztv_shows: response = url_get("%s/showlist/" % BASE_URL, headers=HEADERS) soup = BeautifulSoup(response, "html5lib") nodes = soup.findAll("a", "thread_link") for node in nodes: show_id, show_named_id = node["href"].split("/")[2:4] show_name = node.text show_first_letter = show_name[0].lower() if re.match("\d+", show_first_letter): show_first_letter = "0-9" eztv_shows.setdefault(show_first_letter, {}).update({ show_id: { "id": show_id, "named_id": show_named_id, "name": node.text, } }) shows_list = sorted(eztv_shows[letter.lower()].values(), key=lambda x: x["name"].lower()) with closing(SafeDialogProgress(delay_close=0)) as dialog: dialog.create(plugin.name) dialog.update(percent=0, line1="Fetching series...", line2="", line3="") state = {"done": 0} def on_serie(future): data = future.result() state["done"] += 1 dialog.update( percent=int(state["done"] * 100.0 / len(shows_list)), line2=data and data["seriesname"] or "", ) with futures.ThreadPoolExecutor(max_workers=5) as pool_tvdb: tvdb_list = [pool_tvdb.submit(tvdb.search, show["name"], True) for show in shows_list] [future.add_done_callback(on_serie) for future in tvdb_list] while not all(job.done() for job in tvdb_list): if dialog.iscanceled(): return xbmc.sleep(100) tvdb_list = [job.result() for job in tvdb_list] for i, (eztv_show, tvdb_show) in enumerate(izip(shows_list, tvdb_list)): if tvdb_show: item = tvdb.get_list_item(tvdb_show) item.update({ "path": plugin.url_for("eztv_get_show_seasons", show_id=eztv_show["id"], tvdb_id=tvdb_show["id"]) }) yield item else: yield { "label": eztv_show["name"], "path": plugin.url_for("eztv_get_show_seasons", show_id=eztv_show["id"]) }
def yify_show_data(callback): import xbmc import xbmcgui from contextlib import nested, closing from itertools import izip, chain from concurrent import futures from stream import tmdb from stream.utils import url_get_json, terminating, SafeDialogProgress plugin.set_content("movies") args = dict((k, v[0]) for k, v in plugin.request.args.items()) current_page = int(args["set"]) limit = int(args["limit"]) with closing(SafeDialogProgress(delay_close=0)) as dialog: dialog.create(plugin.name) dialog.update(percent=0, line1="Fetching movie information...", line2="", line3="") try: search_result = url_get_json("%s/api/list.json" % BASE_URL, params=args, headers=HEADERS) except: plugin.notify("Unable to connect to %s." % BASE_URL) raise movies = search_result.get("MovieList") or [] if not movies: return state = {"done": 0} def on_movie(future): data = future.result() state["done"] += 1 dialog.update( percent=int(state["done"] * 100.0 / len(movies)), line2=data.get("title") or data.get("MovieTitleClean") or "", ) with futures.ThreadPoolExecutor(max_workers=2) as pool_tmdb: tmdb_list = [pool_tmdb.submit(tmdb.get, movie["ImdbCode"]) for movie in movies] [future.add_done_callback(on_movie) for future in tmdb_list] while not all(job.done() for job in tmdb_list): if dialog.iscanceled(): return xbmc.sleep(100) tmdb_list = map(lambda job: job.result(), tmdb_list) for movie, tmdb_meta in izip(movies, tmdb_list): if tmdb_meta: item = tmdb.get_list_item(tmdb_meta) if args.get("quality") == "all" and movie["Quality"] != "720p": item["label"] = "%s (%s)" % (item["label"], movie["Quality"]) item.update({ "path": plugin.url_for("play", uri=movie["TorrentMagnetUrl"]), "is_playable": True, }) item.setdefault("info", {}).update({ "count": movie["MovieID"], "genre": "%s (%s S:%s P:%s)" % (item["info"]["genre"], movie["Size"], movie["TorrentSeeds"], movie["TorrentPeers"]), "plot_outline": tmdb_meta["overview"], "video_codec": "h264", }) width = 1920 height = 1080 if movie["Quality"] == "720p": width = 1280 height = 720 item.setdefault("stream_info", {}).update({ "video": { "codec": "h264", "width": width, "height": height, }, "audio": { "codec": "aac", }, }) yield item if current_page < (int(search_result["MovieCount"]) / limit): next_args = args.copy() next_args["set"] = int(next_args["set"]) + 1 yield { "label": ">> Next page", "path": plugin.url_for(callback, **next_args), }
def yify_index(): return [ {"label": "Search", "path": plugin.url_for("yify_search")}, {"label": "Browse by Genre", "path": plugin.url_for("yify_genres")}, {"label": "Most Popular", "path": plugin.url_for("yify_movies", sort="seeds", order="desc", quality="all", set=1, limit=MOVIES_PER_PAGE)}, {"label": " in 720p", "path": plugin.url_for("yify_movies", sort="seeds", order="desc", quality="720p", set=1, limit=MOVIES_PER_PAGE)}, {"label": " in 1080p", "path": plugin.url_for("yify_movies", sort="seeds", order="desc", quality="1080p", set=1, limit=MOVIES_PER_PAGE)}, {"label": "Best Rated", "path": plugin.url_for("yify_movies", sort="rating", order="desc", quality="all", set=1, limit=MOVIES_PER_PAGE)}, {"label": " in 720p", "path": plugin.url_for("yify_movies", sort="rating", order="desc", quality="720p", set=1, limit=MOVIES_PER_PAGE)}, {"label": " in 1080p", "path": plugin.url_for("yify_movies", sort="rating", order="desc", quality="1080p", set=1, limit=MOVIES_PER_PAGE)}, {"label": "Most Recent", "path": plugin.url_for("yify_movies", sort="date", order="desc", quality="all", set=1, limit=MOVIES_PER_PAGE)}, {"label": " in 720p", "path": plugin.url_for("yify_movies", sort="date", order="desc", quality="720p", set=1, limit=MOVIES_PER_PAGE)}, {"label": " in 1080p", "path": plugin.url_for("yify_movies", sort="date", order="desc", quality="1080p", set=1, limit=MOVIES_PER_PAGE)}, ]