def kat_page(root, page, sort_field, sort_order): from urlparse import urljoin from kmediatorrent.scrapers import rss from kmediatorrent.utils import url_get content_type = plugin.request.args_dict.get("content_type") if content_type: plugin.set_content(content_type) page = int(page) page_data = url_get(urljoin(BASE_URL, "%s/%d" % (root, page)), headers=HEADERS, params={ "rss": "1", "field": sort_field, "sorder": sort_order }) for item in rss.parse(page_data, content_type): yield item yield { "label": ">> Next page", "path": plugin.url_for("kat_page", root=root, page=page + 1, sort_field=sort_field, sort_order=sort_order, **plugin.request.args_dict), }
def eztv_get_show_seasons(show_id): import random from bs4 import BeautifulSoup from itertools import groupby from concurrent import futures from kmediatorrent.utils import first, terminating, url_get from kmediatorrent import tvdb plugin.set_content("seasons") tvdb_id = first(plugin.request.args.get("tvdb_id")) with futures.ThreadPoolExecutor(max_workers=2) as pool: def _eztv_get_show(): plugin.log.info("Getting show") response = url_get("%s/shows/%s/" % (BASE_URL, show_id), headers=HEADERS) plugin.log.info("Got show") return BeautifulSoup(response, "html5lib") soup = pool.submit(_eztv_get_show) if tvdb_id: tvdb_show = pool.submit(tvdb.get_all_meta, plugin.request.args["tvdb_id"][0]) soup = soup.result() fanarts = [] if tvdb_id: tvdb_show = tvdb_show.result() fanarts = list([ banner for banner in tvdb_show["banners"] if banner["bannertype"] == "fanart" ]) random.shuffle(fanarts) seasons = {} for node_episode in soup.findAll("a", "epinfo"): season, episode = get_episode_data_from_name(node_episode.text) seasons.setdefault(season, {})[episode] = True for i, season in enumerate(reversed(sorted(seasons.keys()))): item = tvdb_id and tvdb.get_season_list_item(tvdb_show, season) or {} item.update({ "label": "Season %d [%d episodes]" % (season, len(seasons[season])), "path": plugin.url_for("eztv_get_episodes_for_season", show_id=show_id, season=season, tvdb_id=tvdb_id), }) if fanarts: item.setdefault("properties", {}).update({ "fanart_image": fanarts[i % len(fanarts)]["bannerpath"], }) yield item
def _fn(*a, **kwds): import hashlib basename = "kmediatorrent.route.%s" % hashlib.sha1(plugin.request.path).hexdigest() with shelf(basename, ttl=kwargs.get("ttl") or 0) as result: if not result.get("value"): ret = fn(*a, **kwds) import types if isinstance(ret, types.GeneratorType): ret = list(ret) result["value"] = ret if kwargs.get("content_type"): plugin.set_content(kwargs.get("content_type")) return result["value"]
def _fn(*a, **kwds): import hashlib basename = "kmediatorrent.route.%s" % hashlib.sha1( plugin.request.path).hexdigest() with shelf(basename, ttl=kwargs.get("ttl") or 0) as result: if not result.get("value"): ret = fn(*a, **kwds) import types if isinstance(ret, types.GeneratorType): ret = list(ret) result["value"] = ret if kwargs.get("content_type"): plugin.set_content(kwargs.get("content_type")) return result["value"]
def zelka_page(page, cat, query): global s s = requests.Session() fnd['page'] = page fnd['cat'] = cat if query != u'!br!': fnd['search'] = query if do_login(): r = s.get('%s/browse.php' % BASE_URL, params=fnd, headers=HEADERS) sp = BeautifulSoup(r.text, "html5lib", parse_only=SoupStrainer('td')) for link in sp.findAll('a', href=re.compile(r'magnet:\?xt=.*')): pr = link.find_parent('td') if cat != '0' or pr.find_previous_sibling('td').find( 'a', href=re.compile( r'browse\.php\?cat=\d+'))['href'].split('=')[1] in ids: ss = pr.find_next_siblings('td') dat = pr.find('a', href=re.compile(r'details\.php\?id=\d+')) title = "%s (%s, DLs:%s)" % (dat.string, ss[3].get_text(' '), ss[5].string) r = _info_get(dat.get('onmouseover')) yield { "label": title, "path": plugin.url_for("play", uri=link['href']), "is_playable": True, "info": { "plot": r['info'] }, "thumbnail": r['img'], "properties": { "fanart_image": r['img'] } } nn = sp.find('b', text=re.compile(u'(?:Следваща.)')) if nn and nn.find_parent('a'): yield { "label": ">> Next page", "path": plugin.url_for("zelka_page", query=query, cat=cat, page=int(page) + 1), "is_playable": False, } do_logout() s.close() plugin.set_content('movies')
def bitsnoop_page(root, page): from urlparse import urljoin from kmediatorrent.scrapers import rss from kmediatorrent.utils import url_get content_type = plugin.request.args_dict.get("content_type") if content_type: plugin.set_content(content_type) page = int(page) page_data = url_get(urljoin(BASE_URL, "%s/%d/" % (root, page)), headers=HEADERS, params={ "fmt": "rss", "sort": "n_s", "dir": "desc", }) return rss.parse(page_data)
def extratorrent_page(type_="", cid="", search="", page=1): from urlparse import urljoin from kmediatorrent.scrapers import rss from kmediatorrent.utils import url_get content_type = plugin.request.args_dict.pop("content_type", None) if content_type: plugin.set_content(content_type) params = { "type": type_, "search": search, "cid": cid, } params.update(plugin.request.args_dict) page_data = url_get(urljoin(BASE_URL, "/rss.xml"), headers=HEADERS, params=params) return rss.parse(page_data, content_type)
def eztv_get_show_seasons(show_id): import random from bs4 import BeautifulSoup from itertools import groupby from concurrent import futures from kmediatorrent.utils import first, terminating, url_get from kmediatorrent import tvdb plugin.set_content("seasons") tvdb_id = first(plugin.request.args.get("tvdb_id")) with futures.ThreadPoolExecutor(max_workers=2) as pool: def _eztv_get_show(): plugin.log.info("Getting show") response = url_get("%s/shows/%s/" % (BASE_URL, show_id), headers=HEADERS) plugin.log.info("Got show") return BeautifulSoup(response, "html5lib") soup = pool.submit(_eztv_get_show) if tvdb_id: tvdb_show = pool.submit(tvdb.get_all_meta, plugin.request.args["tvdb_id"][0]) soup = soup.result() fanarts = [] if tvdb_id: tvdb_show = tvdb_show.result() fanarts = list([banner for banner in tvdb_show["banners"] if banner["bannertype"] == "fanart"]) random.shuffle(fanarts) seasons = {} for node_episode in soup.findAll("a", "epinfo"): season, episode = get_episode_data_from_name(node_episode.text) seasons.setdefault(season, {})[episode] = True for i, season in enumerate(reversed(sorted(seasons.keys()))): item = tvdb_id and tvdb.get_season_list_item(tvdb_show, season) or {} item.update({ "label": "Season %d [%d episodes]" % (season, len(seasons[season])), "path": plugin.url_for("eztv_get_episodes_for_season", show_id=show_id, season=season, tvdb_id=tvdb_id), }) if fanarts: item.setdefault("properties", {}).update({ "fanart_image": fanarts[i % len(fanarts)]["bannerpath"], }) yield item
def zelka_page(page, cat, query): global s s = requests.Session() fnd["page"] = page fnd["cat"] = cat if query != u"!br!": fnd["search"] = query if do_login(): r = s.get("%s/browse.php" % BASE_URL, params=fnd, headers=HEADERS) sp = BeautifulSoup(r.text, "html5lib") for link in sp.findAll("a", href=re.compile(r"magnet:\?xt=.*")): pr = link.find_parent("td") if ( cat != "0" or pr.find_previous_sibling("td") .find("a", href=re.compile(r"browse\.php\?cat=\d+"))["href"] .split("=")[1] in ids ): ss = pr.find_next_siblings("td") dat = pr.find("a", href=re.compile(r"details\.php\?id=\d+")) title = "%s (%s, DLs:%s)" % (dat.string, ss[3].get_text(" "), ss[5].string) r = _info_get(dat.get("onmouseover")) yield { "label": title, "path": plugin.url_for("play", uri=link["href"]), "is_playable": True, "info": {"plot": r["info"]}, "thumbnail": r["img"], "properties": {"fanart_image": r["img"]}, } nn = sp.find("b", text=re.compile(u"(?:Следваща.)")) if nn and nn.find_parent("a"): yield { "label": ">> Next page", "path": plugin.url_for("zelka_page", query=query, cat=cat, page=int(page) + 1), "is_playable": False, } do_logout() s.close() plugin.set_content("movies")
def yify_show_data(callback): import xbmc import xbmcgui from contextlib import nested, closing from itertools import izip, chain from concurrent import futures from kmediatorrent import tmdb from kmediatorrent.utils import url_get_json, terminating, SafeDialogProgress plugin.set_content("movies") args = dict((k, v[0]) for k, v in plugin.request.args.items()) current_page = int(args["set"]) limit = int(args["limit"]) with closing(SafeDialogProgress(delay_close=0)) as dialog: dialog.create(plugin.name) dialog.update(percent=0, line1="Fetching movie information...", line2="", line3="") try: search_result = url_get_json("%s/api/list.json" % BASE_URL, params=args, headers=HEADERS) except: plugin.notify("Unable to connect to %s." % BASE_URL) raise movies = search_result.get("MovieList") or [] if not movies: return state = {"done": 0} def on_movie(future): data = future.result() state["done"] += 1 dialog.update( percent=int(state["done"] * 100.0 / len(movies)), line2=data.get("title") or data.get("MovieTitleClean") or "", ) with futures.ThreadPoolExecutor(max_workers=2) as pool_tmdb: tmdb_list = [pool_tmdb.submit(tmdb.get, movie["ImdbCode"]) for movie in movies] [future.add_done_callback(on_movie) for future in tmdb_list] while not all(job.done() for job in tmdb_list): if dialog.iscanceled(): return xbmc.sleep(100) tmdb_list = map(lambda job: job.result(), tmdb_list) for movie, tmdb_meta in izip(movies, tmdb_list): if tmdb_meta: item = tmdb.get_list_item(tmdb_meta) if args.get("quality") == "all" and movie["Quality"] != "720p": item["label"] = "%s (%s)" % (item["label"], movie["Quality"]) item.update({ "path": plugin.url_for("play", uri=movie["TorrentMagnetUrl"]), "is_playable": True, }) item.setdefault("info", {}).update({ "count": movie["MovieID"], "genre": "%s (%s S:%s P:%s)" % (item["info"]["genre"], movie["Size"], movie["TorrentSeeds"], movie["TorrentPeers"]), "plot_outline": tmdb_meta["overview"], "video_codec": "h264", }) width = 1920 height = 1080 if movie["Quality"] == "720p": width = 1280 height = 720 item.setdefault("stream_info", {}).update({ "video": { "codec": "h264", "width": width, "height": height, }, "audio": { "codec": "aac", }, }) yield item if current_page < (int(search_result["MovieCount"]) / limit): next_args = args.copy() next_args["set"] = int(next_args["set"]) + 1 yield { "label": ">> Next page", "path": plugin.url_for(callback, **next_args), }
def eztv_get_episodes_for_season(show_id, season): import copy import random from bs4 import BeautifulSoup from itertools import izip from concurrent import futures from kmediatorrent.utils import first, terminating, url_get from kmediatorrent import tvdb plugin.set_content("episodes") season = int(season) tvdb_id = first(plugin.request.args.get("tvdb_id")) with futures.ThreadPoolExecutor(max_workers=2) as pool: def _eztv_get_show(): return BeautifulSoup(url_get("%s/shows/%s/" % (BASE_URL, show_id), headers=HEADERS), "html5lib") soup = pool.submit(_eztv_get_show) if tvdb_id: tvdb_show = pool.submit(tvdb.get_all_meta, plugin.request.args["tvdb_id"][0]) soup = soup.result() items = [] fanarts = [] if tvdb_id: tvdb_show = tvdb_show.result() fanarts = list([banner for banner in tvdb_show["banners"] if banner["bannertype"] == "fanart"]) random.shuffle(fanarts) items = list(tvdb.build_episode_list_items(tvdb_show, int(season))) text_nodes = soup.findAll("a", "epinfo") href_nodes = soup.findAll("a", "magnet") season_nodes = izip(text_nodes, href_nodes) season_nodes = filter(lambda x: get_episode_data_from_name(x[0].text)[0] == season, season_nodes) for i, (node_text, node_magnet) in enumerate(season_nodes): season, episode = get_episode_data_from_name(node_text.text) if tvdb_id and episode >= 0: try: item = copy.deepcopy(items[int(episode) - 1]) except IndexError: yield IndexError for pattern, suffix in (("720p", "(HD)"), ("1080p", "(FullHD)"), ("repack", "(REPACK)"), ("proper", "(PROPER)")): if pattern in node_text.text.lower(): item["label"] = "%s %s" % (item["label"], suffix) else: item = { "label": node_text.text, } item.setdefault("info", {}).update({ "tvshowtitle": node_text.text, "title": item["label"], }) stream_info = {} if "x264" in node_text.text: stream_info["codec"] = item["info"]["video_codec"] = "h264" if "xvid" in node_text.text.lower(): stream_info["codec"] = item["info"]["video_codec"] = "xvid" if "720p" in node_text.text: stream_info["width"] = 1280 stream_info["height"] = 720 if "1080p" in node_text.text: stream_info["width"] = 1920 stream_info["height"] = 1080 item.update({ "path": plugin.url_for("play", uri=node_magnet["href"]), "stream_info": {"video": stream_info}, "is_playable": True, }) if fanarts: item.setdefault("properties", {}).update({ "fanart_image": fanarts[i % len(fanarts)]["bannerpath"], }) yield item
def eztv_get_episodes_for_season(show_id, season): import copy import random from bs4 import BeautifulSoup from itertools import izip from concurrent import futures from kmediatorrent.utils import first, terminating, url_get from kmediatorrent import tvdb plugin.set_content("episodes") season = int(season) tvdb_id = first(plugin.request.args.get("tvdb_id")) with futures.ThreadPoolExecutor(max_workers=2) as pool: def _eztv_get_show(): return BeautifulSoup( url_get("%s/shows/%s/" % (BASE_URL, show_id), headers=HEADERS), "html5lib") soup = pool.submit(_eztv_get_show) if tvdb_id: tvdb_show = pool.submit(tvdb.get_all_meta, plugin.request.args["tvdb_id"][0]) soup = soup.result() items = [] fanarts = [] if tvdb_id: tvdb_show = tvdb_show.result() fanarts = list([ banner for banner in tvdb_show["banners"] if banner["bannertype"] == "fanart" ]) random.shuffle(fanarts) items = list(tvdb.build_episode_list_items(tvdb_show, int(season))) text_nodes = soup.findAll("a", "epinfo") href_nodes = soup.findAll("a", "magnet") season_nodes = izip(text_nodes, href_nodes) season_nodes = filter( lambda x: get_episode_data_from_name(x[0].text)[0] == season, season_nodes) for i, (node_text, node_magnet) in enumerate(season_nodes): season, episode = get_episode_data_from_name(node_text.text) if tvdb_id and episode >= 0: try: item = copy.deepcopy(items[int(episode) - 1]) except IndexError: yield IndexError for pattern, suffix in (("720p", "(HD)"), ("1080p", "(FullHD)"), ("repack", "(REPACK)"), ("proper", "(PROPER)")): if pattern in node_text.text.lower(): item["label"] = "%s %s" % (item["label"], suffix) else: item = { "label": node_text.text, } item.setdefault("info", {}).update({ "tvshowtitle": node_text.text, "title": item["label"], }) stream_info = {} if "x264" in node_text.text: stream_info["codec"] = item["info"]["video_codec"] = "h264" if "xvid" in node_text.text.lower(): stream_info["codec"] = item["info"]["video_codec"] = "xvid" if "720p" in node_text.text: stream_info["width"] = 1280 stream_info["height"] = 720 if "1080p" in node_text.text: stream_info["width"] = 1920 stream_info["height"] = 1080 item.update({ "path": plugin.url_for("play", uri=node_magnet["href"]), "stream_info": { "video": stream_info }, "is_playable": True, }) if fanarts: item.setdefault("properties", {}).update({ "fanart_image": fanarts[i % len(fanarts)]["bannerpath"], }) yield item
def yify_show_data(callback): import xbmc import xbmcgui from contextlib import nested, closing from itertools import izip, chain from concurrent import futures from kmediatorrent import tmdb from kmediatorrent.utils import url_get_json, terminating, SafeDialogProgress plugin.set_content("movies") args = dict((k, v[0]) for k, v in plugin.request.args.items()) current_page = int(args["page"]) limit = int(args["limit"]) with closing(SafeDialogProgress(delay_close=0)) as dialog: dialog.create(plugin.name) dialog.update(percent=0, line1="Fetching movie information...", line2="", line3="") try: search_result = url_get_json("%s/api/v2/list_movies.json" % BASE_URL, params=args, headers=HEADERS) except: plugin.notify("Unable to connect to %s." % BASE_URL) raise movies = search_result.get("data").get("movies") or [] if not movies: return state = {"done": 0} def on_movie(future): data = future.result() state["done"] += 1 dialog.update( percent=int(state["done"] * 100.0 / len(movies)), line2=data.get("title") or data.get("MovieTitleClean") or "", ) with futures.ThreadPoolExecutor(max_workers=2) as pool_tmdb: tmdb_list = [pool_tmdb.submit(tmdb.get, movie["imdb_code"]) for movie in movies] [future.add_done_callback(on_movie) for future in tmdb_list] while not all(job.done() for job in tmdb_list): if dialog.iscanceled(): return xbmc.sleep(100) tmdb_list = map(lambda job: job.result(), tmdb_list) for movie, tmdb_meta in izip(movies, tmdb_list): for tor in movie.get("torrents"): if tor["quality"] == args.get("quality") or args.get("quality") == "all": if tmdb_meta: item = tmdb.get_list_item(tmdb_meta) if tor["quality"] != "720p": item["label"] = "%s (%s)" % (item["label"], tor["quality"]) item.update({ "path": plugin.url_for("play", uri=tor["url"]), "is_playable": True, }) item.setdefault("info", {}).update({ "count": movie["id"], "genre": "%s (%s S:%s P:%s)" % (item["info"]["genre"], tor["size"], tor["seeds"], tor["peers"]), "plot_outline": tmdb_meta["overview"], "video_codec": "h264", }) width = 1920 height = 1080 if tor["quality"] == "720p": width = 1280 height = 720 item.setdefault("stream_info", {}).update({ "video": { "codec": "h264", "width": width, "height": height, }, "audio": { "codec": "aac", }, }) yield item if current_page < (int(search_result["data"]["movie_count"]) / limit): next_args = args.copy() next_args["page"] = int(next_args["page"]) + 1 yield { "label": ">> Next page", "path": plugin.url_for(callback, **next_args), }