def piratebay_page(root, page): import re from bs4 import BeautifulSoup from urlparse import urljoin from streamajoker.utils import url_get page = int(page) html_data = url_get(urljoin(BASE_URL, "%s/%d/7/100,200,500" % (root, page)), headers=HEADERS) soup = BeautifulSoup(html_data, "html5lib") nodes = soup.findAll("div", "detName") for node in nodes: seeds, peers = map(lambda x: x.text, node.parent.parent.findAll("td")[2:]) magnet_node = node.parent.findAll("a")[1] desc_node = node.parent.findAll("font", "detDesc")[0] size = re.search("Size (.*?),", desc_node.text).group(1) text = "%s (%s S:%s P:%s)" % (node.a.text, size.replace(" ", " "), seeds, peers) yield { "label": text, "path": plugin.url_for("play", uri=magnet_node["href"]), "is_playable": True, } yield { "label": ">> Next page", "path": plugin.url_for("piratebay_page", root=root, page=page + 1), "is_playable": False, }
def btdigg_page(query, sort, page): from bs4 import BeautifulSoup from streamajoker.utils import url_get html_data = url_get("%s/search" % BASE_URL, headers=HEADERS, params={ "order": sort, "q": query, "p": page, }) soup = BeautifulSoup(html_data, "html5lib") name_nodes = soup.findAll("td", "torrent_name") attr_nodes = soup.findAll("table", "torrent_name_tbl")[1::2] for name_node, attr_node in zip(name_nodes, attr_nodes): attrs = attr_node.findAll("span", "attr_val") title = "%s (%s, DLs:%s)" % (name_node.find("a").text, attrs[0].text, attrs[2].text) yield { "label": title, "path": plugin.url_for("play", uri=attr_node.find("a")["href"]), "is_playable": True, } yield { "label": ">> Next page", "path": plugin.url_for("btdigg_page", query=query, sort=sort, page=int(page) + 1), "is_playable": False, }
def _fn(*a, **kwds): items = fn(*a, **kwds) if items is not None: for item in items: if item.get("is_playable"): label = item["label"].encode("utf-8") item.setdefault("context_menu", []).extend([ ("Add to Movies", "XBMC.RunPlugin(%s)" % plugin.url_for("library_add", content_type="movies", label=label, href=item["path"])), ("Add to TV", "XBMC.RunPlugin(%s)" % plugin.url_for("library_add", content_type="tvshows", label=label, href=item["path"])), ]) yield item
def extratorrent_index(): cats = [ {"label": "Search Movies", "path": plugin.url_for("extratorrent_search", cid="4", content_type="movies")}, {"label": "Search TV Episodes", "path": plugin.url_for("extratorrent_search", cid="8")}, {"label": "Search Anime", "path": plugin.url_for("extratorrent_search", cid="1")}, {"label": "Popular Movies", "path": plugin.url_for("extratorrent_page", type_="popular", cid="4", content_type="movies")}, {"label": "Popular TV Episodes", "path": plugin.url_for("extratorrent_page", type_="popular", cid="8")}, {"label": "Popular Anime", "path": plugin.url_for("extratorrent_page", type_="popular", cid="1")}, {"label": "Last Movies", "path": plugin.url_for("extratorrent_page", type_="last", cid="4", content_type="movies")}, {"label": "Last TV Episodes", "path": plugin.url_for("extratorrent_page", type_="last", cid="8")}, {"label": "Last Anime", "path": plugin.url_for("extratorrent_page", type_="last", cid="1")}, ] return cats
def yify_genres(): for genre in GENRES: yield { "label": genre, "path": plugin.url_for( "yify_genre", genre=genre, sort="seeds", order="desc", quality="all", set=1, limit=MOVIES_PER_PAGE ), }
def eztv_index(): import string for letter in ["0-9"] + list(string.ascii_uppercase): yield { "label": letter, "path": plugin.url_for("eztv_shows_by_letter", letter=letter), "is_playable": False, }
def nyaa_search(): query = plugin.request.args.get("query") if query: query = query[0] else: query = plugin.keyboard("", "streamajoker - NyaaTorrents - Search") if query: plugin.redirect(plugin.url_for("search_result_page", page="search", cats="0_0", filter=0, term=query, offset=1, sort=SORT_DATE, order=SORT_DESCENDING))
def make_cats(root, prefix=""): for cat in root: yield { "label": "%s%s" % (prefix, cat[0]), "path": plugin.url_for("piratebay_page", root="/browse/%d" % cat[1], page=0), } if len(cat) > 2: for entry in make_cats(cat[2], prefix="%s " % prefix): yield entry
def bitsnoop_search(): import urllib from streamajoker.utils import first query = plugin.request.args_dict.pop("query", None) if not query: query = plugin.keyboard("", "streamajoker - Bitsnoop - Search") if query: plugin.redirect(plugin.url_for("bitsnoop_page", root="/search/video/%s/c/d/" % urllib.quote("%s safe:no" % query, safe=""), page=1, **plugin.request.args_dict))
def make_cats(root, prefix=""): for cat in root: yield { "label": "%s%s" % (prefix, cat[0]), "path": plugin.url_for("default_nyaa_page", cats=cat[1], offset=0, sort=SORT_SEEDERS, order=SORT_DESCENDING), } if len(cat) > 2: for entry in make_cats(cat[2], prefix="%s " % prefix): yield entry
def index(): if PLATFORM["os"] not in ["android", "linux", "windows", "darwin"]: plugin.notify("Your system \"%(os)s_%(arch)s\" is not supported." % PLATFORM, delay=15000) for module in MODULES: yield { "label": module["name"], "thumbnail": module["image"], "path": plugin.url_for(module["view"]), }
def yify_search(): query = plugin.request.args.get("query") if query: query = query[0] else: query = plugin.keyboard("", "streamajoker - YIFY - Search") if query: plugin.redirect( plugin.url_for("yify_search_query", keywords=query, quality="all", set=1, limit=MOVIES_PER_PAGE) )
def piratebay_search(): import urllib query = plugin.request.args.get("query") if query: query = query[0] else: query = plugin.keyboard("", "streamajoker - The Pirate Bay - Search") if query: plugin.redirect(plugin.url_for("piratebay_page", root="/search/%s" % urllib.quote(query, safe=""), page=0))
def eztv_get_show_seasons(show_id): import random from bs4 import BeautifulSoup from itertools import groupby from concurrent import futures from streamajoker.utils import first, terminating, url_get from streamajoker import tvdb plugin.set_content("seasons") tvdb_id = first(plugin.request.args.get("tvdb_id")) with futures.ThreadPoolExecutor(max_workers=2) as pool: def _eztv_get_show(): plugin.log.info("Getting show") response = url_get("%s/shows/%s/" % (BASE_URL, show_id), headers=HEADERS) plugin.log.info("Got show") return BeautifulSoup(response, "html5lib") soup = pool.submit(_eztv_get_show) if tvdb_id: tvdb_show = pool.submit(tvdb.get_all_meta, plugin.request.args["tvdb_id"][0]) soup = soup.result() fanarts = [] if tvdb_id: tvdb_show = tvdb_show.result() fanarts = list([banner for banner in tvdb_show["banners"] if banner["bannertype"] == "fanart"]) random.shuffle(fanarts) seasons = {} for node_episode in soup.findAll("a", "epinfo"): season, episode = get_episode_data_from_name(node_episode.text) seasons.setdefault(season, {})[episode] = True for i, season in enumerate(reversed(sorted(seasons.keys()))): item = tvdb_id and tvdb.get_season_list_item(tvdb_show, season) or {} item.update({ "label": "Season %d [%d episodes]" % (season, len(seasons[season])), "path": plugin.url_for("eztv_get_episodes_for_season", show_id=show_id, season=season, tvdb_id=tvdb_id), }) if fanarts: item.setdefault("properties", {}).update({ "fanart_image": fanarts[i % len(fanarts)]["bannerpath"], }) yield item
def nyaa_page(cats, offset, sort, order, term=""): from streamajoker.scrapers import rss from streamajoker.utils import url_get offset = int(offset) rss_data = url_get(BASE_URL, headers=HEADERS, params={ "cats": cats, "offset": "%d" % offset, "sort": sort, "order": order, "term": term, "page": "rss", }) for item in rss.parse(rss_data): yield item yield { "label": ">> Next page", "path": plugin.url_for(term and "search_result_page" or "default_nyaa_page", cats=cats, sort=sort, order=order, term=term, offset=offset + 1), "is_playable": False, }
def btdigg_index(): plugin.redirect(plugin.url_for("btdigg_search"))
def btdigg_search(): query = plugin.request.args_dict.pop("query", None) if not query: query = plugin.keyboard("", "streamajoker - BTDigg - Search") if query: plugin.redirect(plugin.url_for("btdigg_page", query=query, sort=SORT_POPULARITY, page=0, **plugin.request.args_dict))
def eztv_shows_by_letter(letter): import re import xbmc import xbmcgui from bs4 import BeautifulSoup from contextlib import nested, closing from itertools import izip, groupby from concurrent import futures from streamajoker.scrapers import ungenerate from streamajoker.utils import terminating, url_get, SafeDialogProgress from streamajoker import tvdb with shelf("it.eztv.shows") as eztv_shows: if not eztv_shows: response = url_get("%s/showlist/" % BASE_URL, headers=HEADERS) soup = BeautifulSoup(response, "html5lib") nodes = soup.findAll("a", "thread_link") for node in nodes: show_id, show_named_id = node["href"].split("/")[2:4] show_name = node.text show_first_letter = show_name[0].lower() if re.match("\d+", show_first_letter): show_first_letter = "0-9" eztv_shows.setdefault(show_first_letter, {}).update({ show_id: { "id": show_id, "named_id": show_named_id, "name": node.text, } }) shows_list = sorted(eztv_shows[letter.lower()].values(), key=lambda x: x["name"].lower()) with closing(SafeDialogProgress(delay_close=0)) as dialog: dialog.create(plugin.name) dialog.update(percent=0, line1="Fetching serie information...", line2="", line3="") state = {"done": 0} def on_serie(future): data = future.result() state["done"] += 1 dialog.update( percent=int(state["done"] * 100.0 / len(shows_list)), line2=data and data["seriesname"] or "", ) with futures.ThreadPoolExecutor(max_workers=5) as pool_tvdb: tvdb_list = [pool_tvdb.submit(tvdb.search, show["name"], True) for show in shows_list] [future.add_done_callback(on_serie) for future in tvdb_list] while not all(job.done() for job in tvdb_list): if dialog.iscanceled(): return xbmc.sleep(100) tvdb_list = [job.result() for job in tvdb_list] for i, (eztv_show, tvdb_show) in enumerate(izip(shows_list, tvdb_list)): if tvdb_show: item = tvdb.get_list_item(tvdb_show) item.update({ "path": plugin.url_for("eztv_get_show_seasons", show_id=eztv_show["id"], tvdb_id=tvdb_show["id"]) }) yield item else: yield { "label": eztv_show["name"], "path": plugin.url_for("eztv_get_show_seasons", show_id=eztv_show["id"]) }
def yify_show_data(callback): import xbmc import xbmcgui from contextlib import nested, closing from itertools import izip, chain from concurrent import futures from streamajoker import tmdb from streamajoker.utils import url_get_json, terminating, SafeDialogProgress plugin.set_content("movies") args = dict((k, v[0]) for k, v in plugin.request.args.items()) current_page = int(args["set"]) limit = int(args["limit"]) with closing(SafeDialogProgress(delay_close=0)) as dialog: dialog.create(plugin.name) dialog.update(percent=0, line1="Fetching movie information...", line2="", line3="") try: search_result = url_get_json("%s/api/list.json" % BASE_URL, params=args, headers=HEADERS) except: plugin.notify("Unable to connect to %s." % BASE_URL) raise movies = search_result.get("MovieList") or [] if not movies: return state = {"done": 0} def on_movie(future): data = future.result() state["done"] += 1 dialog.update( percent=int(state["done"] * 100.0 / len(movies)), line2=data.get("title") or data.get("MovieTitleClean") or "", ) with futures.ThreadPoolExecutor(max_workers=2) as pool_tmdb: tmdb_list = [pool_tmdb.submit(tmdb.get, movie["ImdbCode"]) for movie in movies] [future.add_done_callback(on_movie) for future in tmdb_list] while not all(job.done() for job in tmdb_list): if dialog.iscanceled(): return xbmc.sleep(100) tmdb_list = map(lambda job: job.result(), tmdb_list) for movie, tmdb_meta in izip(movies, tmdb_list): if tmdb_meta: item = tmdb.get_list_item(tmdb_meta) if args.get("quality") == "all" and movie["Quality"] != "720p": item["label"] = "%s (%s)" % (item["label"], movie["Quality"]) item.update({"path": plugin.url_for("play", uri=movie["TorrentMagnetUrl"]), "is_playable": True}) item.setdefault("info", {}).update( { "count": movie["MovieID"], "genre": "%s (%s S:%s P:%s)" % (item["info"]["genre"], movie["Size"], movie["TorrentSeeds"], movie["TorrentPeers"]), "plot_outline": tmdb_meta["overview"], "video_codec": "h264", } ) width = 1920 height = 1080 if movie["Quality"] == "720p": width = 1280 height = 720 item.setdefault("stream_info", {}).update( {"video": {"codec": "h264", "width": width, "height": height}, "audio": {"codec": "aac"}} ) yield item if current_page < (int(search_result["MovieCount"]) / limit): next_args = args.copy() next_args["set"] = int(next_args["set"]) + 1 yield {"label": ">> Next page", "path": plugin.url_for(callback, **next_args)}
def eztv_get_episodes_for_season(show_id, season): import copy import random from bs4 import BeautifulSoup from itertools import izip from concurrent import futures from streamajoker.utils import first, terminating, url_get from streamajoker import tvdb plugin.set_content("episodes") season = int(season) tvdb_id = first(plugin.request.args.get("tvdb_id")) with futures.ThreadPoolExecutor(max_workers=2) as pool: def _eztv_get_show(): return BeautifulSoup(url_get("%s/shows/%s/" % (BASE_URL, show_id), headers=HEADERS), "html5lib") soup = pool.submit(_eztv_get_show) if tvdb_id: tvdb_show = pool.submit(tvdb.get_all_meta, plugin.request.args["tvdb_id"][0]) soup = soup.result() items = [] fanarts = [] if tvdb_id: tvdb_show = tvdb_show.result() fanarts = list([banner for banner in tvdb_show["banners"] if banner["bannertype"] == "fanart"]) random.shuffle(fanarts) items = list(tvdb.build_episode_list_items(tvdb_show, int(season))) text_nodes = soup.findAll("a", "epinfo") href_nodes = soup.findAll("a", "magnet") season_nodes = izip(text_nodes, href_nodes) season_nodes = filter(lambda x: get_episode_data_from_name(x[0].text)[0] == season, season_nodes) for i, (node_text, node_magnet) in enumerate(season_nodes): season, episode = get_episode_data_from_name(node_text.text) if tvdb_id and episode >= 0: item = copy.deepcopy(items[int(episode) - 1]) for pattern, suffix in (("720p", "(HD)"), ("1080p", "(FullHD)"), ("repack", "(REPACK)"), ("proper", "(PROPER)")): if pattern in node_text.text.lower(): item["label"] = "%s %s" % (item["label"], suffix) else: item = { "label": node_text.text, } item.setdefault("info", {}).update({ "tvshowtitle": node_text.text, "title": item["label"], }) stream_info = {} if "x264" in node_text.text: stream_info["codec"] = item["info"]["video_codec"] = "h264" if "xvid" in node_text.text.lower(): stream_info["codec"] = item["info"]["video_codec"] = "xvid" if "720p" in node_text.text: stream_info["width"] = 1280 stream_info["height"] = 720 if "1080p" in node_text.text: stream_info["width"] = 1920 stream_info["height"] = 1080 item.update({ "path": plugin.url_for("play", uri=node_magnet["href"]), "stream_info": {"video": stream_info}, "is_playable": True, }) if fanarts: item.setdefault("properties", {}).update({ "fanart_image": fanarts[i % len(fanarts)]["bannerpath"], }) yield item
def extratorrent_search(): query = plugin.request.args_dict.pop("query", None) if not query: query = plugin.keyboard("", "streamajoker - ExtraTorrent - Search") if query: plugin.redirect(plugin.url_for("extratorrent_page_search", type="search", search=query, **plugin.request.args_dict))
def yify_index(): return [ {"label": "Search", "path": plugin.url_for("yify_search")}, {"label": "Browse by Genre", "path": plugin.url_for("yify_genres")}, { "label": "Most Popular", "path": plugin.url_for( "yify_movies", sort="seeds", order="desc", quality="all", set=1, limit=MOVIES_PER_PAGE ), }, { "label": " in 720p", "path": plugin.url_for( "yify_movies", sort="seeds", order="desc", quality="720p", set=1, limit=MOVIES_PER_PAGE ), }, { "label": " in 1080p", "path": plugin.url_for( "yify_movies", sort="seeds", order="desc", quality="1080p", set=1, limit=MOVIES_PER_PAGE ), }, { "label": " in 3D", "path": plugin.url_for( "yify_movies", sort="seeds", order="desc", quality="3D", set=1, limit=MOVIES_PER_PAGE ), }, { "label": "Best Rated", "path": plugin.url_for( "yify_movies", sort="rating", order="desc", quality="all", set=1, limit=MOVIES_PER_PAGE ), }, { "label": " in 720p", "path": plugin.url_for( "yify_movies", sort="rating", order="desc", quality="720p", set=1, limit=MOVIES_PER_PAGE ), }, { "label": " in 1080p", "path": plugin.url_for( "yify_movies", sort="rating", order="desc", quality="1080p", set=1, limit=MOVIES_PER_PAGE ), }, { "label": " in 3D", "path": plugin.url_for( "yify_movies", sort="rating", order="desc", quality="3D", set=1, limit=MOVIES_PER_PAGE ), }, { "label": "Most Recent", "path": plugin.url_for( "yify_movies", sort="date", order="desc", quality="all", set=1, limit=MOVIES_PER_PAGE ), }, { "label": " in 720p", "path": plugin.url_for( "yify_movies", sort="date", order="desc", quality="720p", set=1, limit=MOVIES_PER_PAGE ), }, { "label": " in 1080p", "path": plugin.url_for( "yify_movies", sort="date", order="desc", quality="1080p", set=1, limit=MOVIES_PER_PAGE ), }, { "label": " in 3D", "path": plugin.url_for( "yify_movies", sort="date", order="desc", quality="3D", set=1, limit=MOVIES_PER_PAGE ), }, ]
def parse(data, content_type=None): import xbmc import xml.etree.ElementTree as ET from itertools import izip_longest from concurrent import futures from contextlib import nested, closing from streamajoker.utils import SafeDialogProgress, get_quality_from_name, get_show_info_from_name, normalize_release_tags from streamajoker import tmdb root = ET.fromstring(data) def _text(node, path): n = node.find(path) if n is not None: return n.text def _attr(node, path, attrib): n = node.find(path) if n is not None: return n.attrib.get(attrib) items = [] for node in root.getiterator("item"): item = { "title": _text(node, "title"), "description": _text(node, "description"), "category": _text(node, "category"), "pub_date": _text(node, "pubDate"), "seeds": _text(node, ".//{%(torrent)s}seeds" % NSMAP) or _text(node, "numSeeders") or _text(node, "seeders"), "peers": _text(node, ".//{%(torrent)s}peers" % NSMAP) or _text(node, "numLeechers") or _text(node, "leechers"), "content_length": _text(node, ".//{%(torrent)s}contentLength" % NSMAP) or _attr(node, ".//enclosure", "length"), "href": _text(node, ".//{%(torrent)s}magnetURI" % NSMAP) or _attr(node, ".//enclosure", "url") or _text(node, "./link"), "filename": _text(node, ".//{%(torrent)s}fileName" % NSMAP), } find_image(item) check_imdb_id(item) items.append(item) tmdb_list = [] if content_type: with closing(SafeDialogProgress(delay_close=0)) as dialog: dialog.create(plugin.name) dialog.update(percent=0, line1="Fetching torrent information...", line2="", line3="") with futures.ThreadPoolExecutor(max_workers=POOL_WORKERS) as pool: futures = [] for item in items: if item.get("imdb_id"): futures.append(pool.submit(tmdb.get, item["imdb_id"])) else: futures.append(None) state = {"done": 0} def on_item(future): state["done"] += 1 dialog.update( percent=int(state["done"] * 100.0 / len(filter(None, futures))), ) [future.add_done_callback(on_item) for future in futures if future] while not all(future.done() for future in futures if future): if dialog.iscanceled(): return xbmc.sleep(100) tmdb_list = [future and future.result() or None for future in futures] for item, tmdb_data in izip_longest(items, tmdb_list): if tmdb_data: list_item = tmdb.get_list_item(tmdb_data) release_tags = normalize_release_tags(item["title"], list_item["label"]) if release_tags: list_item["label"] = "%s (%s)" % (list_item["label"], release_tags) else: list_item = { "label": item["title"], "icon": item.get("img") or "", "thumbnail": item.get("img") or "", "info": { "genre": item["category"], } } list_item.update({ "path": plugin.url_for("play", uri=item["href"]), "is_playable": True, }) list_item.setdefault("info", {}).update({ "genre": "%s (S:%s P:%s)" % (list_item.get("info", {}).get("genre") or "", item["seeds"], item["peers"]), }) list_item.setdefault("stream_info", {}).update(get_quality_from_name(item["title"])) yield list_item
def bitsnoop_index(): plugin.redirect(plugin.url_for("bitsnoop_search"))