def piratebay_page(root, page):
  import xbmc
  import xbmcgui
  from concurrent import futures
  from contextlib import nested, closing
  from bs4 import BeautifulSoup
  from urlparse import urljoin
  from xbmctorrentV2.utils import url_get
  from itertools import izip, chain
  from xbmctorrentV2.utils import url_get_json, terminating, SafeDialogProgress

  with closing(SafeDialogProgress(delay_close=0)) as dialog:
    dialog.create(plugin.name)
    dialog.update(percent=0, line1="Fetching index page...", line2="", line3="")

    plugin.set_content("movies")
    page = int(page)

    try:
        html_data = url_get(urljoin(BASE_URL, "%s/%d/7/100,200,500" % (root, page)), headers=HEADERS)
    except:
        dialog = xbmcgui.Dialog()
        dialog.ok("Piratebay","Timeout")
        return

    soup = BeautifulSoup(html_data, "html5lib")
    nodes = soup.findAll("div", "detName")

    state = {"done": 0}
    def on_movie(future):
        data = future.result()
        state["done"] += 1
        dialog.update(
            percent=int(state["done"] * 100.0 / len(nodes)),
            line2=data.get("label") or "",
        )

    dialog.update(percent=0, line1="Fetching movie information...", line2="", line3="")
    with futures.ThreadPoolExecutor(max_workers=10) as pool_tmdb:
        tmdb_list = [pool_tmdb.submit(piratebay_record, node) for node in nodes]
        [future.add_done_callback(on_movie) for future in tmdb_list]
        while not all(job.done() for job in tmdb_list):
                if dialog.iscanceled():
                    return
                xbmc.sleep(200)

    tmdb_list = map(lambda job: job.result(), tmdb_list)
    for node, item in izip(nodes, tmdb_list):
        yield node.item

    yield {
        "label": ">> Next page",
        "path": plugin.url_for("piratebay_page", root=root, page=page + 1),
        "is_playable": False,
    }
 def _fn(*a, **kwds):
     import hashlib
     basename = "xbmctorrentV2.route.%s" % hashlib.sha1(plugin.request.path).hexdigest()
     with shelf(basename, ttl=kwargs.get("ttl") or 0) as result:
         if not result.get("value"):
             ret = fn(*a, **kwds)
             import types
             if isinstance(ret, types.GeneratorType):
                 ret = list(ret)
             result["value"] = ret
         if kwargs.get("content_type"):
             plugin.set_content(kwargs.get("content_type"))
         return result["value"]
def bitsnoop_page(root, page):
    from urlparse import urljoin
    from xbmctorrentV2.scrapers import rss
    from xbmctorrentV2.utils import url_get

    content_type = plugin.request.args_dict.get("content_type")
    if content_type:
        plugin.set_content(content_type)

    page = int(page)
    page_data = url_get(urljoin(BASE_URL, "%s/%d/" % (root, page)), headers=HEADERS, params={
        "fmt": "rss",
        "sort": "n_s",
        "dir": "desc",
    })
    return rss.parse(page_data)
def extratorrent_page(type_="", cid="", search="", page=1):
    from urlparse import urljoin
    from xbmctorrentV2.scrapers import rss
    from xbmctorrentV2.utils import url_get

    content_type = plugin.request.args_dict.pop("content_type", None)
    if content_type:
        plugin.set_content(content_type)

    params = {
        "type": type_,
        "search": search,
        "cid": cid,
    }
    params.update(plugin.request.args_dict)
    page_data = url_get(urljoin(BASE_URL, "/rss.xml"), headers=HEADERS, params=params)
    return rss.parse(page_data, content_type)
def eztv_get_show_seasons(show_id):
    import random
    from bs4 import BeautifulSoup
    from itertools import groupby
    from concurrent import futures
    from xbmctorrentV2.utils import first, terminating, url_get
    from xbmctorrentV2 import tvdb

    plugin.set_content("seasons")

    tvdb_id = first(plugin.request.args.get("tvdb_id"))
    with futures.ThreadPoolExecutor(max_workers=2) as pool:
        def _eztv_get_show():
            plugin.log.info("Getting show")
            response = url_get("%s/shows/%s/" % (BASE_URL, show_id), headers=HEADERS)
            plugin.log.info("Got show")
            return BeautifulSoup(response, "html5lib")
        soup = pool.submit(_eztv_get_show)
        if tvdb_id:
            tvdb_show = pool.submit(tvdb.get_all_meta, plugin.request.args["tvdb_id"][0])

        soup = soup.result()
        fanarts = []
        if tvdb_id:
            tvdb_show = tvdb_show.result()
            fanarts = list([banner for banner in tvdb_show["banners"] if banner["bannertype"] == "fanart"])
            random.shuffle(fanarts)

        seasons = {}
        for node_episode in soup.findAll("a", "epinfo"):
            season, episode = get_episode_data_from_name(node_episode.text)
            seasons.setdefault(season, {})[episode] = True

        for i, season in enumerate(reversed(sorted(seasons.keys()))):
            item = tvdb_id and tvdb.get_season_list_item(tvdb_show, season) or {}
            item.update({
                "label": "Season %d [%d episodes]" % (season, len(seasons[season])),
                "path": plugin.url_for("eztv_get_episodes_for_season", show_id=show_id, season=season, tvdb_id=tvdb_id),
            })
            if fanarts:
                item.setdefault("properties", {}).update({
                    "fanart_image": fanarts[i % len(fanarts)]["bannerpath"],
                })
            yield item
def kat_page(root, page, sort_field, sort_order):
    from urlparse import urljoin
    from xbmctorrentV2.scrapers import rss
    from xbmctorrentV2.utils import url_get

    content_type = plugin.request.args_dict.get("content_type")
    if content_type:
        plugin.set_content(content_type)

    page = int(page)
    page_data = url_get(urljoin(BASE_URL, "%s/%d" % (root, page)), headers=HEADERS, params={
        "rss": "1",
        "field": sort_field,
        "sorder": sort_order
    })
    for item in rss.parse(page_data, content_type):
        yield item
    yield {
        "label": ">> Next page",
        "path": plugin.url_for("kat_page", root=root, page=page + 1, sort_field=sort_field, sort_order=sort_order, **plugin.request.args_dict),
    }
def eztv_get_episodes_for_season(show_id, season):
    import copy
    import random
    from bs4 import BeautifulSoup
    from itertools import izip
    from concurrent import futures
    from xbmctorrentV2.utils import first, terminating, url_get
    from xbmctorrentV2 import tvdb

    plugin.set_content("episodes")

    season = int(season)
    tvdb_id = first(plugin.request.args.get("tvdb_id"))
    with futures.ThreadPoolExecutor(max_workers=2) as pool:
        def _eztv_get_show():
            return BeautifulSoup(url_get("%s/shows/%s/" % (BASE_URL, show_id), headers=HEADERS), "html5lib")
        soup = pool.submit(_eztv_get_show)
        if tvdb_id:
            tvdb_show = pool.submit(tvdb.get_all_meta, plugin.request.args["tvdb_id"][0])

        soup = soup.result()
        items = []
        fanarts = []
        if tvdb_id:
            tvdb_show = tvdb_show.result()
            fanarts = list([banner for banner in tvdb_show["banners"] if banner["bannertype"] == "fanart"])
            random.shuffle(fanarts)
            items = list(tvdb.build_episode_list_items(tvdb_show, int(season)))
        text_nodes = soup.findAll("a", "epinfo")
        href_nodes = soup.findAll("a", "magnet")
        season_nodes = izip(text_nodes, href_nodes)
        season_nodes = filter(lambda x: get_episode_data_from_name(x[0].text)[0] == season, season_nodes)

        for i, (node_text, node_magnet) in enumerate(season_nodes):
            season, episode = get_episode_data_from_name(node_text.text)
            if tvdb_id and episode >= 0:
                item = copy.deepcopy(items[int(episode) - 1])
                for pattern, suffix in (("720p", "(HD)"), ("1080p", "(FullHD)"), ("repack", "(REPACK)"), ("proper", "(PROPER)")):
                    if pattern in node_text.text.lower():
                        item["label"] = "%s %s" % (item["label"], suffix)
            else:
                item = {
                    "label": node_text.text,
                }
            item.setdefault("info", {}).update({
                "tvshowtitle": node_text.text,
                "title": item["label"],
            })
            stream_info = {}
            if "x264" in node_text.text:
                stream_info["codec"] = item["info"]["video_codec"] = "h264"
            if "xvid" in node_text.text.lower():
                stream_info["codec"] = item["info"]["video_codec"] = "xvid"
            if "720p" in node_text.text:
                stream_info["width"] = 1280
                stream_info["height"] = 720
            if "1080p" in node_text.text:
                stream_info["width"] = 1920
                stream_info["height"] = 1080
            item.update({
                "path": plugin.url_for("play", uri=node_magnet["href"]),
                "stream_info": {"video": stream_info},
                "is_playable": True,
            })
            if fanarts:
                item.setdefault("properties", {}).update({
                    "fanart_image": fanarts[i % len(fanarts)]["bannerpath"],
                })
            yield item
def yify_show_data(callback):
    import xbmc
    import xbmcgui
    from contextlib import nested, closing
    from itertools import izip, chain
    from concurrent import futures
    from xbmctorrentV2 import tmdb
    from xbmctorrentV2.utils import url_get_json, terminating, SafeDialogProgress

    plugin.set_content("movies")
    args = dict((k, v[0]) for k, v in plugin.request.args.items())

    current_page = int(args["set"])
    limit = int(args["limit"])

    with closing(SafeDialogProgress(delay_close=0)) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0, line1="Fetching movie information...", line2="", line3="")

        try:
            search_result = url_get_json("%s/api/list.json" % BASE_URL, params=args, headers=HEADERS)
        except:
            plugin.notify("Unable to connect to %s." % BASE_URL)
            raise
        movies = search_result.get("MovieList") or []

        if not movies:
            return

        state = {"done": 0}
        def on_movie(future):
            data = future.result()
            state["done"] += 1
            dialog.update(
                percent=int(state["done"] * 100.0 / len(movies)),
                line2=data.get("title") or data.get("MovieTitleClean") or "",
            )

        with futures.ThreadPoolExecutor(max_workers=2) as pool_tmdb:
            tmdb_list = [pool_tmdb.submit(tmdb.get, movie["ImdbCode"]) for movie in movies]
            [future.add_done_callback(on_movie) for future in tmdb_list]
            while not all(job.done() for job in tmdb_list):
                if dialog.iscanceled():
                    return
                xbmc.sleep(100)

        tmdb_list = map(lambda job: job.result(), tmdb_list)
        for movie, tmdb_meta in izip(movies, tmdb_list):
            if tmdb_meta:
                item = tmdb.get_list_item(tmdb_meta)
                if args.get("quality") == "all" and movie["Quality"] != "720p":
                    item["label"] = "%s (%s)" % (item["label"], movie["Quality"])
                item.update({
                    "path": plugin.url_for("play", uri=movie["TorrentMagnetUrl"]),
                    "is_playable": True,
                })
                item.setdefault("info", {}).update({
                    "count": movie["MovieID"],
                    "genre": "%s (%s S:%s P:%s)" % (item["info"]["genre"], movie["Size"], movie["TorrentSeeds"], movie["TorrentPeers"]),
                    "plot_outline": tmdb_meta["overview"],
                    "video_codec": "h264",
                })
                width = 1920
                height = 1080
                if movie["Quality"] == "720p":
                    width = 1280
                    height = 720
                item.setdefault("stream_info", {}).update({
                    "video": {
                        "codec": "h264",
                        "width": width,
                        "height": height,
                    },
                    "audio": {
                        "codec": "aac",
                    },
                })
                yield item

        if current_page < (int(search_result["MovieCount"]) / limit):
            next_args = args.copy()
            next_args["set"] = int(next_args["set"]) + 1
            yield {
                "label": ">> Next page",
                "path": plugin.url_for(callback, **next_args),
            }