def iplay_page(cat, page): import re from bs4 import BeautifulSoup from urlparse import urljoin from xbmctorrent.utils import url_get page = int(page) url = urljoin(BASE_URL, "test/proxy.php") html_data = url_get(url, params = {"cat": cat, "page": page}, headers = HEADERS) soup = BeautifulSoup(html_data, "html5lib") nodes = soup.findAll('a', 'torrent') next_page = { "label": "Next page...", "path": plugin.url_for("iplay_page", cat = cat, page = page + 1), "is_playable": False, } for node in nodes: text = "%s" % node.get('title') torrent_node = node.parent.find('img', {"class": "dld"}).parent yield { "label": text, "path": plugin.url_for("play", uri=torrent_node["href"]), "is_playable": False, } #print 'DBG URL:' + url yield next_page
def cpb_listAll(cat, page): from bs4 import BeautifulSoup from xbmctorrent.utils import url_get url_adr = "%s/view_cat.php?categorie=%s&page=%s" % (BASE_URL, cat, page) #view_cat.php?categorie=films&page=1 html_data = url_get(url_adr) soup = BeautifulSoup(html_data, "html5lib") name_nodes = soup.findAll('div', re.compile('ligne[0,1]')) for name_node in name_nodes: title = name_node.find("a").text tds = name_node.parent.findAll("td") seed = name_node.find("div","up").text leech = name_node.find("div","down").text color = getColor(seed, leech) title = title + " [COLOR %s][S:%s|L:%s][/COLOR]" %(color, seed, leech) html_uri=name_node.find("a")["href"] torrent_basename = os.path.basename(html_uri) uri_addr= BASE_URL + "/_torrents/" + torrent_basename.replace(".html",".torrent") img_addr = BASE_URL + "/_pictures/" + torrent_basename.replace(".html",".jpg") yield { "label": title, "path": plugin.url_for("play", uri=uri_addr), "is_playable": True, "thumbnail" : img_addr , "properties": { "fanart_image" : img_addr , } , } yield { "label": ">> Next page", "path": plugin.url_for("cpb_listAll", cat=cat, page=int(page) + 1), "is_playable": False, }
def piratebay_page(root, page): import re from bs4 import BeautifulSoup from urlparse import urljoin from xbmctorrent.utils import url_get page = int(page) html_data = url_get(urljoin(BASE_URL, "%s/%d/7/100,200,500" % (root, page)), headers=HEADERS) soup = BeautifulSoup(html_data, "html5lib") nodes = soup.findAll("div", "detName") for node in nodes: seeds, peers = map(lambda x: x.text, node.parent.parent.findAll("td")[2:]) magnet_node = node.parent.findAll("a")[1] desc_node = node.parent.findAll("font", "detDesc")[0] size = re.search("Size (.*?),", desc_node.text).group(1) text = "%s (%s S:%s P:%s)" % (node.a.text, size.replace( " ", " "), seeds, peers) yield { "label": text, "path": plugin.url_for("play", uri=magnet_node["href"]), "is_playable": True, } yield { "label": ">> Next page", "path": plugin.url_for("piratebay_page", root=root, page=page + 1), "is_playable": False, }
def kat_porn(): return [ { "label": "XXX", "path": plugin.url_for("kat_page", root="xxx", page=1, sort_field="seeders", sort_order="desc") }, { "label": " HD", "path": plugin.url_for("kat_page", root="xxx-hd-video", page=1, sort_field="seeders", sort_order="desc") }, { "label": " Hentai", "path": plugin.url_for("kat_page", root="hentai", page=1, sort_field="seeders", sort_order="desc") }, ]
def cpb_index(): cats = [ {"label": "Recherche", "path": plugin.url_for("cpb_search")}, {"label": "Films", "path": plugin.url_for("cpb_movies")}, {"label": "Series", "path": plugin.url_for("cpb_series")}, ] return cats
def kat_anime(): return [ { "label": "Search Anime", "path": plugin.url_for( "kat_search", query_suffix="category:anime", sort_field="seeders", sort_order="desc" ), }, { "label": "All Anime", "path": plugin.url_for("kat_page", root="tv", page=1, sort_field="seeders", sort_order="desc"), }, { "label": " HD", "path": plugin.url_for( "kat_search", query="(1080p OR 720p) category:anime", sort_field="seeders", sort_order="desc" ), }, { "label": " 720p", "path": plugin.url_for("kat_search", query="720p category:anime", sort_field="seeders", sort_order="desc"), }, { "label": " 1080p", "path": plugin.url_for("kat_search", query="1080p category:anime", sort_field="seeders", sort_order="desc"), }, ]
def kat_index(): cats = [ { "label": "Search", "path": plugin.url_for("kat_search", sort_field="seeders", sort_order="desc") }, { "label": "Movies", "path": plugin.url_for("kat_movies") }, { "label": "Series", "path": plugin.url_for("kat_series", sort_field="seeders", sort_order="desc") }, { "label": "Anime", "path": plugin.url_for("kat_anime") }, ] if plugin.get_setting("p**n", bool): cats += [ { "label": "XXX", "path": plugin.url_for("kat_porn") }, ] return cats
def btdigg_page(query, sort, page): from bs4 import BeautifulSoup from xbmctorrent.utils import url_get html_data = url_get("%s/search" % BASE_URL, headers=HEADERS, params={ "order": sort, "q": query, "p": page, }) soup = BeautifulSoup(html_data, "html5lib") name_nodes = soup.findAll("td", "torrent_name") attr_nodes = soup.findAll("table", "torrent_name_tbl")[1::2] for name_node, attr_node in zip(name_nodes, attr_nodes): attrs = attr_node.findAll("span", "attr_val") title = "%s (%s, DLs:%s)" % (name_node.find("a").text, attrs[0].text, attrs[2].text) yield { "label": title, "path": plugin.url_for("play", uri=attr_node.find("a")["href"]), "is_playable": True, } yield { "label": ">> Next page", "path": plugin.url_for("btdigg_page", query=query, sort=sort, page=int(page) + 1), "is_playable": False, }
def _eztv_shows_by_letter(letter): with closing(xbmcgui.DialogProgress()) as dialog: dialog.create(plugin.name) dialog.update(percent=0, line1="Fetching serie information...", line2="", line3="") state = {"done": 0} def on_serie(data): state["done"] += 1 dialog.update( percent=int(state["done"] * 100.0 / len(shows_list)), line2=data and data["seriesname"] or "", ) with terminating(ThreadPool(5)) as pool_tvdb: tvdb_list = [pool_tvdb.apply_async(tvdb.search, [show["name"], True], callback=on_serie) for show in shows_list] while not all(job.ready() for job in tvdb_list): if dialog.iscanceled(): dialog.close() return xbmc.sleep(50) tvdb_list = [job.get() for job in tvdb_list] for i, (eztv_show, tvdb_show) in enumerate(izip(shows_list, tvdb_list)): if tvdb_show: item = tvdb.get_list_item(tvdb_show) item.update({ "path": plugin.url_for("eztv_get_show_seasons", show_id=eztv_show["id"], tvdb_id=tvdb_show["id"]) }) yield item else: yield { "label": eztv_show["name"], "path": plugin.url_for("eztv_get_show_seasons", show_id=eztv_show["id"]) }
def ilcorsaronero_page(root, page): from bs4 import BeautifulSoup from urlparse import urljoin from xbmctorrent.utils import url_get page = int(page) html_data = url_get(urljoin(BASE_URL, "%s&page=%d" % (root, page)), headers=HEADERS) soup = BeautifulSoup(html_data, "html5lib") nodes = soup.findAll("a", class_="tab") for node in nodes: size, null, data, seeds, peers = map(lambda x: (x.font.text if x.font is not None else None), node.parent.parent.findAll("td")[-5:]) text = "%s (%s S:%s P:%s %s)" % (node.text, size, seeds, peers, data) yield { "label": text, "path": plugin.url_for("ilcorsaronero_play", uri=node["href"]), "is_playable": True, } if root.find('search') == -1: yield { "label": ">> Next page", "path": plugin.url_for("ilcorsaronero_page", root=root, page=page + 1), "is_playable": False, }
def piratebay_page(root, page): import re from bs4 import BeautifulSoup from urlparse import urljoin from xbmctorrent.utils import url_get page = int(page) html_data = url_get(urljoin(BASE_URL, "%s/%d/7/100,200,500" % (root, page)), headers=HEADERS) soup = BeautifulSoup(html_data, "html5lib") nodes = soup.findAll("div", "detName") for node in nodes: seeds, peers = map(lambda x: x.text, node.parent.parent.findAll("td")[2:]) magnet_node = node.parent.findAll("a")[1] desc_node = node.parent.findAll("font", "detDesc")[0] size = re.search("Size (.*?),", desc_node.text).group(1) text = "%s (%s S:%s P:%s)" % (node.a.text, size.replace(" ", " "), seeds, peers) yield { "label": text, "path": plugin.url_for("play", uri=magnet_node["href"]), "is_playable": True, } yield { "label": ">> Next page", "path": plugin.url_for("piratebay_page", root=root, page=page + 1), "is_playable": False, }
def cpb_page(cat, query, sort, page): log.debug("THE CAT = " + cat + " & THE Q = " + query) if not "cpasbien" in query: url_adr = BASE_URL + "/" + cat + "/" + query.replace(" ", "+") + "/page-" + str(page) + "," + sort else: url_adr = BASE_URL + "/view_cat.php?categorie=" + cat + "&page=" + str(page) + "&trie=" + sort[5:] log.debug("THE URL = " + url_adr) html_data = url_get(url_adr) soup = BeautifulSoup(html_data, "html5lib") name_nodes = soup.findAll("td", "torrent-aff") for name_node in name_nodes: title = name_node.find("a").text html_uri = name_node.find("a")["href"] torrent_basename = os.path.basename(html_uri) uri_addr = BASE_URL + "/_torrents/" + torrent_basename.replace(".html", ".torrent") img_addr = BASE_URL + "/_pictures/" + torrent_basename.replace(".html", ".jpg") yield { "label": title, "path": plugin.url_for("play", uri=uri_addr), "is_playable": True, "thumbnail": img_addr, "properties": {"fanart_image": img_addr}, } yield { "label": ">> Next page", "path": plugin.url_for("cpb_page", cat=cat, query=query, sort=sort, page=int(page) + 1), "is_playable": False, }
def kat_anime(): return [ {"label": "Search Anime", "path": plugin.url_for("kat_search", query_suffix="category:anime", sort_field="seeders", sort_order="desc")}, {"label": "All Anime", "path": plugin.url_for("kat_page", root="tv", page=1, sort_field="seeders", sort_order="desc")}, {"label": " HD", "path": plugin.url_for("kat_search", query="(1080p OR 720p) category:anime", sort_field="seeders", sort_order="desc")}, {"label": " 720p", "path": plugin.url_for("kat_search", query="720p category:anime", sort_field="seeders", sort_order="desc")}, {"label": " 1080p", "path": plugin.url_for("kat_search", query="1080p category:anime", sort_field="seeders", sort_order="desc")}, ]
def lostfilm_all(page): from json import loads from contextlib import closing from xbmctorrent.utils import SafeDialogProgress page = int(page) with closing(SafeDialogProgress(delay_close=0)) as dialog: dialog.create(plugin.name) dialog.update(percent=0, line1=u"Получение информации ...", line2="", line3="") params = {"act": "serial", "type": "search", "o": page * 30, "t": 0, "s": 2} data = [] tvshows = [] try: while True: json_data = url_get(AJAX_URL, params=params) data = loads(json_data).get("data", []) tvshows += data if len(data) < 10 or len(tvshows) >= 30: break params["o"] += len(data) except Exception: plugin.log.error("Unexpected error: %s" % sys.exc_info()[0]) xbmcgui.Dialog().ok(plugin.name, u"Не удалось получить данные от сервера") return done = 0 for tvshow in tvshows: if dialog.iscanceled(): return item = _lostfilm_updateitem_from_db({ "label": "[COLOR FFFFFFFF][B]%s[/B][/COLOR] [%s]" % (tvshow["title"], tvshow["title_orig"]), "path": plugin.url_for("lostfilm_tvshow", alias=tvshow["alias"]), "is_playable": False, "info": { "title": "%s [%s]" % (tvshow["title"], tvshow["title_orig"]) } }, tvshow) done += 1 dialog.update( percent=int(done * 100.0 / len(tvshows)), line2=item.get("info", {}).get("tvshowtitle", "") or item.get("info", {}).get("title", "") or item["label"], line3="" ) yield item _lostfilm_close_dbase() if len(data) == 10: yield { "label": "[COLOR FF00FF00][Далее >][/COLOR]", "path": plugin.url_for("lostfilm_all", page=page + 1), }
def rutor_index(): yield { "label": u"[COLOR FFFFFF00][ Поиск ][/COLOR]", "path": plugin.url_for("rutor_search"), } for cat in CATEGORIES: yield { "label": cat[1], "path": plugin.url_for("rutor_page", catind=cat[0], page=0, query=str(None)), }
def kat_index(): cats = [ {"label": "Search", "path": plugin.url_for("kat_search", sort_field="seeders", sort_order="desc")}, {"label": "Movies", "path": plugin.url_for("kat_movies")}, {"label": "Series", "path": plugin.url_for("kat_series", sort_field="seeders", sort_order="desc")}, {"label": "Anime", "path": plugin.url_for("kat_anime")}, ] if plugin.get_setting("p**n", bool): cats += [{"label": "XXX", "path": plugin.url_for("kat_porn")}] return cats
def _fn(*a, **kwds): items = fn(*a, **kwds) if items is not None: for item in items: if item.get("is_playable"): label = item["label"].encode("utf-8") item.setdefault("context_menu", []).extend([ ("Add to Movies", "XBMC.RunPlugin(%s)" % plugin.url_for("library_add", content_type="movies", label=label, href=item["path"])), ("Add to TV", "XBMC.RunPlugin(%s)" % plugin.url_for("library_add", content_type="tvshows", label=label, href=item["path"])), ]) yield item
def _fn(*a, **kwds): items = fn(*a, **kwds) if items is not None: from xbmctorrent.magnet import ensure_magnet for item in items: if item.get("is_playable"): item.setdefault("context_menu", []).extend([ ("Add to Movies", "XBMC.RunPlugin(%s)" % plugin.url_for("library_add", content_type="movies", href=item["path"])), ("Add to TV", "XBMC.RunPlugin(%s)" % plugin.url_for("library_add", content_type="tvshows", href=item["path"])), ]) yield item
def rutor_play(tid, pulsar): from contextlib import closing from bencode import bdecode from urlparse import urljoin from xbmctorrent.magnet import generate_magnet from xbmctorrent.utils import first, SafeDialogProgress from xbmctorrent.acestream import ace_supported with closing(SafeDialogProgress(delay_close=0)) as dialog: dialog.create(plugin.name) dialog.update(percent=0, line1="Получение информации о раздаче...") torrent_url = urljoin(BASE_URL, "download/%s" % tid) try: metadata = bdecode(url_get(torrent_url, headers=HEADERS)) except Exception: import xbmcgui plugin.log.error("Unexpected error: %s" % sys.exc_info()[0]) xbmcgui.Dialog().ok(plugin.name, "Не удалось получить данные от сервера") return dialog.update(percent=100, line1="Готово") if "files" in metadata["info"]: # and ace_supported(): items = [] for index, info in enumerate(metadata["info"]["files"]): name = "/".join(info["path"]) if not _rutor_valid_file(name): continue if plugin.get_setting("torrent_engine", int) == 1 and ace_supported(): path = plugin.url_for("torrent_play", url=torrent_url, index=index, name=name) else: path = plugin.url_for("play_file", uri=generate_magnet(metadata, name), index=index) items.append({"label": name, "path": path}) items = sorted(items, key=lambda x: x["label"]) if len(items) == 1: # start playback if torrent contains only one file plugin.redirect(items[0]["path"]) else: # ask to select which file to play import xbmcgui select_items = [item["label"] for item in items] select = xbmcgui.Dialog().select("Выберите файл для проигрывания", select_items) if select >= 0: plugin.redirect(items[select]["path"]) else: name = metadata["info"].get("name") or " / ".join(first(metadata["info"]["files"])["path"]) or "rutor.org" if plugin.get_setting("torrent_engine", int) == 1 and ace_supported(): path = plugin.url_for("torrent_play", url=torrent_url, index=0, name=name) else: path = plugin.url_for(["play", "play_with_pulsar"][int(pulsar)], uri=generate_magnet(metadata, name)) plugin.redirect(path)
def extratorrent_index(): cats = [ { "label": "Search Movies", "path": plugin.url_for("extratorrent_search", cid="4", content_type="movies") }, { "label": "Search TV Episodes", "path": plugin.url_for("extratorrent_search", cid="8") }, { "label": "Search Anime", "path": plugin.url_for("extratorrent_search", cid="1") }, { "label": "Popular Movies", "path": plugin.url_for("extratorrent_page", type_="popular", cid="4", content_type="movies") }, { "label": "Popular TV Episodes", "path": plugin.url_for("extratorrent_page", type_="popular", cid="8") }, { "label": "Popular Anime", "path": plugin.url_for("extratorrent_page", type_="popular", cid="1") }, { "label": "Last Movies", "path": plugin.url_for("extratorrent_page", type_="last", cid="4", content_type="movies") }, { "label": "Last TV Episodes", "path": plugin.url_for("extratorrent_page", type_="last", cid="8") }, { "label": "Last Anime", "path": plugin.url_for("extratorrent_page", type_="last", cid="1") }, ] return cats
def rutracker_play(tid, pulsar): from contextlib import closing from bencode import bdecode from urlparse import urljoin from xbmctorrent.magnet import generate_magnet from xbmctorrent.utils import first, SafeDialogProgress from xbmctorrent.acestream import ace_supported with closing(SafeDialogProgress(delay_close=0)) as dialog: dialog.create(plugin.name) dialog.update(percent=0, line1="Получение информации о раздаче...") try: data = url_get(urljoin(BASE_URL, "dl.php?t=%s" % tid)) metadata = bdecode(data) except Exception: plugin.log.error("Unexpected error: %s " % format_exc().split('\n')[-2]) xbmcgui.Dialog().ok(plugin.name, "Не удалось получить данные от сервера") return dialog.update(percent=100, line1="Готово") if "files" in metadata["info"]: # and ace_supported(): items = [] for index, info in enumerate(metadata["info"]["files"]): name = "/".join(info["path"]) decname = _rutracker_decode_try(name) if not _rutracker_valid_file(decname): continue name = uenc(decname) if plugin.get_setting("torrent_engine", int) == 1 and ace_supported(): path = plugin.url_for("play_torrent_raw", raw=data, index=index, name=name) else: path = plugin.url_for("play_file", uri=generate_magnet(metadata, name), index=index) items.append({"label": name, "path": path}) if len(items) == 1: # start playback if torrent contains only one file plugin.redirect(items[0]["path"]) else: select_items = [item["label"] for item in items] select = xbmcgui.Dialog().select("Выберите файл для проигрывания", select_items) if select >= 0: plugin.redirect(items[select]["path"]) else: name = metadata["info"].get("name") or " / ".join(first(metadata["info"]["files"])["path"]) or "rutracker.org" if plugin.get_setting("torrent_engine", int) == 1 and ace_supported(): path = plugin.url_for("play_torrent_raw", raw=data, index=0, name=name) else: path = plugin.url_for(["play", "play_with_pulsar"][int(pulsar)], uri=generate_magnet(metadata, name)) plugin.redirect(path)
def kat_movies(): return [ {"label": "Search Movies", "path": plugin.url_for("kat_search", query_suffix="category:movies", sort_field="seeders", sort_order="desc", content_type="movies")}, {"label": "Genres", "path": plugin.url_for("kat_movies_genres")}, {"label": "Most Popular", "path": plugin.url_for("kat_page", root="movies", page=1, sort_field="seeders", sort_order="desc", content_type="movies")}, {"label": " in 720p", "path": plugin.url_for("kat_search", query="720p category:movies", sort_field="seeders", sort_order="desc", content_type="movies")}, {"label": " in 1080p", "path": plugin.url_for("kat_search", query="1080p category:movies", sort_field="seeders", sort_order="desc", content_type="movies")}, {"label": " in 3D", "path": plugin.url_for("kat_search", query="category:3d-movies", sort_field="seeders", sort_order="desc", content_type="movies")}, {"label": "Most Recent", "path": plugin.url_for("kat_page", root="movies", page=1, sort_field="time_add", sort_order="desc", content_type="movies")}, {"label": " in 720p", "path": plugin.url_for("kat_search", query="720p category:movies", sort_field="time_add", sort_order="desc", content_type="movies")}, {"label": " in 1080p", "path": plugin.url_for("kat_search", query="1080p category:movies", sort_field="time_add", sort_order="desc", content_type="movies")}, {"label": " in 3D", "path": plugin.url_for("kat_search", query="category:3d-movies", sort_field="time_add", sort_order="desc", content_type="movies")}, ]
def eztv_get_show_seasons(show_id): import random from bs4 import BeautifulSoup from itertools import groupby from multiprocessing.pool import ThreadPool from xbmctorrent.utils import first, terminating, url_get from xbmctorrent import tvdb # plugin.set_content("seasons") tvdb_id = first(plugin.request.args.get("tvdb_id")) with terminating(ThreadPool(2)) as pool: def _eztv_get_show(): plugin.log.info("Getting show") response = url_get("http://eztv.it/shows/%s/" % show_id, headers=HEADERS) plugin.log.info("Got show") return BeautifulSoup(response, "html5lib") soup = pool.apply_async(_eztv_get_show) if tvdb_id: tvdb_show = pool.apply_async(tvdb.get_all_meta, [plugin.request.args["tvdb_id"][0]]) soup = soup.get() fanarts = [] if tvdb_id: tvdb_show = tvdb_show.get() fanarts = list([banner for banner in tvdb_show["banners"] if banner["bannertype"] == "fanart"]) random.shuffle(fanarts) for node in soup.findAll("a", "epinfo"): if get_episode_data_from_name(node.text)[0] < 0: plugin.redirect(plugin.url_for("eztv_get_episodes_for_season", show_id=show_id, season=-1, tvdb_id=tvdb_id)) return # This is needed because we need to be able to redirect if no seasons are found. items = [] for i, (season, episodes) in enumerate(groupby(soup.findAll("a", "epinfo"), lambda node: get_episode_data_from_name(node.text)[0])): if tvdb_id: item = tvdb.get_season_list_item(tvdb_show, season) else: item = { "label": "Season %d" % season } item.update({ "path": plugin.url_for("eztv_get_episodes_for_season", show_id=show_id, season=season, tvdb_id=tvdb_id), }) # if fanarts: item.setdefault("properties", {}).update({ "fanart_image": fanarts[i % len(fanarts)]["bannerpath"], }) items.append(item) return items
def kat_porn(): return [ { "label": "XXX", "path": plugin.url_for("kat_page", root="xxx", page=1, sort_field="seeders", sort_order="desc"), }, { "label": " HD", "path": plugin.url_for("kat_page", root="xxx-hd-video", page=1, sort_field="seeders", sort_order="desc"), }, { "label": " Hentai", "path": plugin.url_for("kat_page", root="hentai", page=1, sort_field="seeders", sort_order="desc"), }, ]
def cpb_serie_index(): return [ {"label": "0, A, B", "path": plugin.url_for("cpb_page", action=ACTION_SERIES, query="0-a-b", sort=SORT_DATE + SORT_DIRECTION_DESC, page=0)}, {"label": "C, D, E", "path": plugin.url_for("cpb_page", action=ACTION_SERIES, query="c-d-e", sort=SORT_DATE + SORT_DIRECTION_DESC, page=0)}, {"label": "F, G, H", "path": plugin.url_for("cpb_page", action=ACTION_SERIES, query="f-g-h", sort=SORT_DATE + SORT_DIRECTION_DESC, page=0)}, {"label": "I, J, K", "path": plugin.url_for("cpb_page", action=ACTION_SERIES, query="i-j-k", sort=SORT_DATE + SORT_DIRECTION_DESC, page=0)}, {"label": "L, M, N", "path": plugin.url_for("cpb_page", action=ACTION_SERIES, query="l-m-n", sort=SORT_DATE + SORT_DIRECTION_DESC, page=0)}, {"label": "O, P, Q", "path": plugin.url_for("cpb_page", action=ACTION_SERIES, query="o-p-q", sort=SORT_DATE + SORT_DIRECTION_DESC, page=0)}, {"label": "R, S, T", "path": plugin.url_for("cpb_page", action=ACTION_SERIES, query="r-s-t", sort=SORT_DATE + SORT_DIRECTION_DESC, page=0)}, {"label": "U, V, W", "path": plugin.url_for("cpb_page", action=ACTION_SERIES, query="u-v-w", sort=SORT_DATE + SORT_DIRECTION_DESC, page=0)}, {"label": "X, Y, Z", "path": plugin.url_for("cpb_page", action=ACTION_SERIES, query="x-y-z", sort=SORT_DATE + SORT_DIRECTION_DESC, page=0)}, ]
def kat_page(root, page, sort_field, sort_order): from urlparse import urljoin from xbmctorrent.scrapers import rss from xbmctorrent.utils import url_get content_type = plugin.request.args_dict.get("content_type") if content_type: plugin.set_content(content_type) page = int(page) page_data = url_get(urljoin(BASE_URL, "%s/%d" % (root, page)), headers=HEADERS, params={ "rss": "1", "field": sort_field, "sorder": sort_order }) for item in rss.parse(page_data, content_type): yield item yield { "label": ">> Next page", "path": plugin.url_for("kat_page", root=root, page=page + 1, sort_field=sort_field, sort_order=sort_order, **plugin.request.args_dict), }
def cpb_series(): url_adr = "%s/series/" % BASE_URL html_movies = url_get(url_adr) soup = BeautifulSoup(html_movies, "html5lib") s2 = soup.find("div", "affichage-sous-categorie").find_all( "a") #,"affichage-sous-categorie") sous_cat = [] for aa in s2: if aa.text != "": sous_cat_label = (aa.text).encode('ascii', 'replace') sous_cat_name = "films" sous_cat_query = string.split(aa.get('href'), '/')[-2] sous_cat_sort = SORT_DATE + SORT_DIRECTION_DESC sous_cat_page = 0 sous_cat += [ { "label": sous_cat_label, "path": plugin.url_for("cpb_page", cat=sous_cat_name, query=sous_cat_query, sort=sous_cat_sort, page=sous_cat_page) }, ] return sous_cat
def index(): for module in MODULES: yield { "label": module["name"], "thumbnail": module["image"], "path": plugin.url_for(module["view"]), }
def cpb_last(type): from bs4 import BeautifulSoup from xbmctorrent.utils import url_get url_adr = "%s/derniers-torrents.php" % BASE_URL if type != TYPE_A: url_adr = url_adr + "?filtre=%s" % type html_data = url_get(url_adr) soup = BeautifulSoup(html_data, "html5lib") name_nodes = soup.findAll('div', re.compile('ligne[0,1]')) for name_node in name_nodes: title = name_node.find("a").text tds = name_node.parent.findAll("td") seed = name_node.find("div","up").text leech = name_node.find("div","down").text color = getColor(seed, leech) title = title + " [COLOR %s][S:%s|L:%s][/COLOR]" %(color, seed, leech) html_uri=name_node.find("a")["href"] torrent_basename = os.path.basename(html_uri) uri_addr= BASE_URL + "/_torrents/" + torrent_basename.replace(".html",".torrent") img_addr = BASE_URL + "/_pictures/" + torrent_basename.replace(".html",".jpg") yield { "label": title, "path": plugin.url_for("play", uri=uri_addr), "is_playable": True, "thumbnail" : img_addr , "properties": { "fanart_image" : img_addr , } , }
def kat_page(root, page, sort_field, sort_order): from urlparse import urljoin from xbmctorrent.scrapers import rss from xbmctorrent.utils import url_get content_type = plugin.request.args_dict.get("content_type") if content_type: plugin.set_content(content_type) page = int(page) page_data = url_get( urljoin(BASE_URL, "%s/%d" % (root, page)), headers=HEADERS, params={"rss": "1", "field": sort_field, "sorder": sort_order}, ) for item in rss.parse(page_data, content_type): yield item yield { "label": ">> Next page", "path": plugin.url_for( "kat_page", root=root, page=page + 1, sort_field=sort_field, sort_order=sort_order, **plugin.request.args_dict ), }
def kickass_index(): return [ { "label": "Movies", "path": plugin.url_for("kickass_page", root="movies", page=1, content_type="movies") }, { "label": "Search", "path": plugin.url_for("kickass_search") }, ]
def eztv_get_show_seasons(show_id): import random import re from itertools import groupby from concurrent import futures from xbmctorrent.utils import first, terminating, url_get from xbmctorrent import tvdb plugin.set_content("seasons") tvdb_id = first(plugin.request.args.get("tvdb_id")) with futures.ThreadPoolExecutor(max_workers=2) as pool: def _eztv_get_show(): plugin.log.info("Getting show") response = url_get("%s/shows/%s/" % (BASE_URL, show_id), headers=HEADERS) plugin.log.info("Got show") return response seasons_html = pool.submit(_eztv_get_show) if tvdb_id: tvdb_show = pool.submit(tvdb.get_all_meta, plugin.request.args["tvdb_id"][0]) seasons_html = seasons_html.result() fanarts = [] seasons = {} iter_list = re.findall(r'<a .*?class="epinfo".*?>(.*?)</a.*?>', seasons_html) for node_episode in iter_list: season, episode = get_episode_data_from_name(node_episode) seasons.setdefault(season, {})[episode] = True if tvdb_id: tvdb_show = tvdb_show.result() fanarts = list([ banner for banner in tvdb_show["banners"] if banner["bannertype"] == "fanart" ]) random.shuffle(fanarts) for i, season in enumerate(reversed(sorted(seasons.keys()))): item = tvdb_id and tvdb.get_season_list_item(tvdb_show, season) or {} item.update({ "label": "Season %d [%d episodes]" % (season, len(seasons[season])), "path": plugin.url_for("eztv_get_episodes_for_season", show_id=show_id, season=season, tvdb_id=tvdb_id), }) if fanarts: item.setdefault("properties", {}).update({ "fanart_image": fanarts[i % len(fanarts)]["bannerpath"], }) yield item
def cpb_index(): cats = [ { "label": "Recherche", "path": plugin.url_for("cpb_search") }, { "label": "Films", "path": plugin.url_for("cpb_movies") }, { "label": "Series", "path": plugin.url_for("cpb_series") }, ] return cats
def nyaa_page(cats, offset, sort, order, term=""): from xbmctorrent.scrapers import rss from xbmctorrent.utils import url_get offset = int(offset) rss_data = url_get(BASE_URL, headers=HEADERS, params={ "cats": cats, "offset": "%d" % offset, "sort": sort, "order": order, "term": term, "page": "rss", }) for item in rss.parse(rss_data): yield item yield { "label": ">> Next page", "path": plugin.url_for(term and "search_result_page" or "default_nyaa_page", cats=cats, sort=sort, order=order, term=term, offset=offset + 1), "is_playable": False, }
def torrents3d_resuls(query, page): import re from bs4 import BeautifulSoup from urlparse import urljoin from xbmctorrent.utils import url_get page = int(page) params = {"nm": query, "start": page * PAGE_SIZE} html_data = url_get(urljoin(BASE_URL, "tracker.php"), params=params, headers=HEADERS) soup = BeautifulSoup(html_data, "html5lib") nodes = soup.findAll("tr", id=re.compile("tor_(\d+)")) for node in nodes: download_node = node.find("a", class_="tr-dl") # skip closed if not download_node: continue article_href = node.find("a", class_="genmed")["href"] size = download_node.text.replace(" ", " ").strip() title, _, _, seeds, peers, _ = map(lambda x: x.text.strip(), node.findAll("td")[3:]) text = "[COLOR FFFFFFCC][%s][/COLOR] %s (%s S:%s P:%s)" % ( _torrents3d_cleantitle(title) + (size, seeds, peers)) yield { "label": text, "path": plugin.url_for("torrents3d_play", article=re.search(".*\/(\d+)\/", article_href).group(1)), "is_playable": True, } if len(nodes) == PAGE_SIZE: next_page = { "label": u"[COLOR FFFFFF00][Далее >>][/COLOR]", "path": plugin.url_for("torrents3d_resuls", query=query, page=page + 1), "is_playable": False, } yield next_page
def extratorrent_index(): cats = [ {"label": "Search Movies", "path": plugin.url_for("extratorrent_search", cid="4", content_type="movies")}, {"label": "Search TV Episodes", "path": plugin.url_for("extratorrent_search", cid="8")}, {"label": "Search Anime", "path": plugin.url_for("extratorrent_search", cid="1")}, {"label": "Popular Movies", "path": plugin.url_for("extratorrent_page", type_="popular", cid="4", content_type="movies")}, {"label": "Popular TV Episodes", "path": plugin.url_for("extratorrent_page", type_="popular", cid="8")}, {"label": "Popular Anime", "path": plugin.url_for("extratorrent_page", type_="popular", cid="1")}, {"label": "Last Movies", "path": plugin.url_for("extratorrent_page", type_="last", cid="4", content_type="movies")}, {"label": "Last TV Episodes", "path": plugin.url_for("extratorrent_page", type_="last", cid="8")}, {"label": "Last Anime", "path": plugin.url_for("extratorrent_page", type_="last", cid="1")}, ] return cats
def yify_search(): query = plugin.request.args.get("query") if query: query = query[0] else: query = plugin.keyboard("", "XBMCtorrent - YIFY - Search") if query: plugin.redirect(plugin.url_for("yify_search_query", keywords=query, quality="all", set=1, limit=MOVIES_PER_PAGE))
def nyaa_search(): query = plugin.request.args.get("query") if query: query = query[0] else: query = plugin.keyboard("", "XBMCtorrent - NyaaTorrents - Search") if query: plugin.redirect(plugin.url_for("search_result_page", page="search", cats="0_0", filter=0, term=query, offset=1, sort=SORT_DATE, order=SORT_DESCENDING))
def yify_genres(): for genre in GENRES: yield { "label": genre, "path": plugin.url_for( "yify_genre", genre=genre, sort="seeds", order="desc", quality="all", set=1, limit=MOVIES_PER_PAGE ), }
def kat_search(): import urllib query = plugin.request.args_dict.pop("query", None) suffix = plugin.request.args_dict.pop("query_suffix", "") if not query: query = plugin.keyboard("", "XBMCtorrent - Kickass Torrent - Search") if query: plugin.redirect(plugin.url_for("kat_page", root="usearch/%s" % urllib.quote("%s %s verified:1" % (query, suffix), safe=""), page=1, **plugin.request.args_dict))
def torrents3d_page(section, page): import re from bs4 import BeautifulSoup from urlparse import urljoin page = int(page) html_data = url_get(urljoin(BASE_URL, "section/%s/%s/" % (section, PAGE_SIZE * page)), headers=HEADERS) soup = BeautifulSoup(html_data, "html5lib") regexp = re.compile("tr-(\d+)") nodes = soup.findAll("tr", id=regexp) print len(nodes) for node in nodes: tds = node.findAll("td") if len(tds) < 5: continue size, seeds = map(lambda x: x.text.strip(), tds[1:3]) article_node = tds[0] article_href = article_node.find("a", class_="tt-text") stereo, title = _torrents3d_cleantitle(article_href.text) text = "[%s] %s (%s S:%s)" % (stereo, title, size.replace( " ", " "), seeds) yield { "label": text, "path": plugin.url_for("torrents3d_play", article=article_href["id"][3:]), "is_playable": True, "info": { "title": title } } if len(nodes) == PAGE_SIZE: next_page = { "label": u"[COLOR FFFFFF00][Далее >>][/COLOR]", "path": plugin.url_for("torrents3d_page", section=section, page=page + 1), "is_playable": False, } yield next_page
def torrents3d_search(): query = plugin.request.args.get("query") if query: query = query[0] else: query = plugin.keyboard("", "Torrents3D.ru - Search") if query: plugin.redirect(plugin.url_for("torrents3d_resuls", query=query, page=0))
def eztv_index(): import string for letter in ["0-9"] + list(string.ascii_uppercase): yield { "label": letter, "path": plugin.url_for("eztv_shows_by_letter", letter=letter), "is_playable": False, }
def make_cats(root, prefix=""): for cat in root: yield { "label": "%s%s" % (prefix, cat[0]), "path": plugin.url_for("default_nyaa_page", cats=cat[1], offset=0, sort=SORT_SEEDERS, order=SORT_DESCENDING), } if len(cat) > 2: for entry in make_cats(cat[2], prefix="%s " % prefix): yield entry
def make_cats(root, prefix=""): for cat in root: yield { "label": "%s%s" % (prefix, cat[0]), "path": plugin.url_for("piratebay_page", root="/browse/%d" % cat[1], page=0), } if len(cat) > 2: for entry in make_cats(cat[2], prefix="%s " % prefix): yield entry
def cpb_page(cat, query, sort, page): log.debug("THE CAT = " + cat + " & THE Q = " + query) if not "cpasbien" in query: url_adr = BASE_URL + "/" + cat + "/" + query.replace( " ", "+") + "/page-" + str(page) + "," + sort else: url_adr = BASE_URL + "/view_cat.php?categorie=" + cat + "&page=" + str( page) + "&trie=" + sort[5:] log.debug("THE URL = " + url_adr) html_data = url_get(url_adr) soup = BeautifulSoup(html_data, "html5lib") name_nodes = soup.findAll("td", "torrent-aff") for name_node in name_nodes: title = name_node.find("a").text html_uri = name_node.find("a")["href"] torrent_basename = os.path.basename(html_uri) uri_addr = BASE_URL + "/_torrents/" + torrent_basename.replace( ".html", ".torrent") img_addr = BASE_URL + "/_pictures/" + torrent_basename.replace( ".html", ".jpg") yield { "label": title, "path": plugin.url_for("play", uri=uri_addr), "is_playable": True, "thumbnail": img_addr, "properties": { "fanart_image": img_addr, }, } yield { "label": ">> Next page", "path": plugin.url_for("cpb_page", cat=cat, query=query, sort=sort, page=int(page) + 1), "is_playable": False, }
def piratebay_search(): import urllib query = plugin.request.args.get("query") if query: query = query[0] else: query = plugin.keyboard("", "XBMCtorrent - The Pirate Bay - Search") if query: plugin.redirect(plugin.url_for("piratebay_page", root="/search/%s" % urllib.quote(query), page=0))
def extratorrent_search(): query = plugin.request.args_dict.pop("query", None) if not query: query = plugin.keyboard("", "XBMCtorrent - ExtraTorrent - Search") if query: plugin.redirect( plugin.url_for("extratorrent_page_search", type="search", search=query, **plugin.request.args_dict))
def make_cats(root, prefix=""): for cat in root: yield { "label": "%s%s" % (prefix, cat[0]), "path": plugin.url_for("torrents3d_page", section=cat[1], page=0), } if len(cat) > 2: for entry in make_cats(cat[2], prefix="%s " % prefix): yield entry
def btdigg_search(): query = plugin.request.args_dict.pop("query", None) if not query: query = plugin.keyboard("", "XBMCtorrent - BTDigg - Search") if query: plugin.redirect( plugin.url_for("btdigg_page", query=query, sort=SORT_POPULARITY, page=0, **plugin.request.args_dict))
def torrents3d_search(): import urllib query = plugin.request.args.get("query") if query: query = query[0] else: query = plugin.keyboard("", "Torrents3D.ru - Search") if query: plugin.redirect( plugin.url_for("torrents3d_resuls", query=query, page=0))
def rutracker_index(): for cat in CATEGORIES: yield { "label": cat[1], "path": plugin.url_for("rutracker_page", catind=cat[0], page=0, query=cat[0]), }