Example #1
0
def kat_page(root, page, sort_field, sort_order):
    from urlparse import urljoin
    from xbmctorrent.scrapers import rss
    from xbmctorrent.utils import url_get

    content_type = plugin.request.args_dict.get("content_type")
    if content_type:
        plugin.set_content(content_type)

    page = int(page)
    page_data = url_get(
        urljoin(BASE_URL, "%s/%d" % (root, page)),
        headers=HEADERS,
        params={"rss": "1", "field": sort_field, "sorder": sort_order},
    )
    for item in rss.parse(page_data, content_type):
        yield item
    yield {
        "label": ">> Next page",
        "path": plugin.url_for(
            "kat_page",
            root=root,
            page=page + 1,
            sort_field=sort_field,
            sort_order=sort_order,
            **plugin.request.args_dict
        ),
    }
Example #2
0
def piratebay_page(root, page):
  import xbmc
  import xbmcgui
  from concurrent import futures
  from contextlib import nested, closing
  from bs4 import BeautifulSoup
  from urlparse import urljoin
  from xbmctorrent.utils import url_get
  from itertools import izip, chain
  from xbmctorrent.utils import url_get_json, terminating, SafeDialogProgress

  with closing(SafeDialogProgress(delay_close=0)) as dialog:
    dialog.create(plugin.name)
    dialog.update(percent=0, line1="Fetching index page...", line2="", line3="")

    plugin.set_content("movies")
    page = int(page)

    try:
        html_data = url_get(urljoin(BASE_URL, "%s/%d/7/100,200,500" % (root, page)), headers=HEADERS)
    except:
        dialog = xbmcgui.Dialog()
        dialog.ok("Piratebay","Timeout")
        return

    soup = BeautifulSoup(html_data, "html5lib")
    nodes = soup.findAll("div", "detName")

    state = {"done": 0}
    def on_movie(future):
        data = future.result()
        state["done"] += 1
        dialog.update(
            percent=int(state["done"] * 100.0 / len(nodes)),
            line2=data.get("label") or "",
        )

    dialog.update(percent=0, line1="Fetching movie information...", line2="", line3="")
    with futures.ThreadPoolExecutor(max_workers=10) as pool_tmdb:
        tmdb_list = [pool_tmdb.submit(piratebay_record, node) for node in nodes]
        [future.add_done_callback(on_movie) for future in tmdb_list]
        while not all(job.done() for job in tmdb_list):
                if dialog.iscanceled():
                    return
                xbmc.sleep(200)

    tmdb_list = map(lambda job: job.result(), tmdb_list)
    for node, item in izip(nodes, tmdb_list):
        yield node.item

    yield {
        "label": ">> Next page",
        "path": plugin.url_for("piratebay_page", root=root, page=page + 1),
        "is_playable": False,
    }
Example #3
0
 def _fn(*a, **kwds):
     import hashlib
     basename = "xbmctorrent.route.%s" % hashlib.sha1(plugin.request.path).hexdigest()
     with shelf(basename, ttl=kwargs.get("ttl") or 0) as result:
         if not result.get("value"):
             ret = fn(*a, **kwds)
             import types
             if isinstance(ret, types.GeneratorType):
                 ret = list(ret)
             result["value"] = ret
         if kwargs.get("content_type"):
             plugin.set_content(kwargs.get("content_type"))
         return result["value"]
Example #4
0
def bitsnoop_page(root, page):
    from urlparse import urljoin
    from xbmctorrent.scrapers import rss
    from xbmctorrent.utils import url_get

    content_type = plugin.request.args_dict.get("content_type")
    if content_type:
        plugin.set_content(content_type)

    page = int(page)
    page_data = url_get(urljoin(BASE_URL, "%s/%d/" % (root, page)), headers=HEADERS, params={
        "fmt": "rss",
        "sort": "n_s",
        "dir": "desc",
    })
    return rss.parse(page_data)
Example #5
0
def extratorrent_page(type_="", cid="", search="", page=1):
    from urlparse import urljoin
    from xbmctorrent.scrapers import rss
    from xbmctorrent.utils import url_get

    content_type = plugin.request.args_dict.pop("content_type", None)
    if content_type:
        plugin.set_content(content_type)

    params = {
        "type": type_,
        "search": search,
        "cid": cid,
    }
    params.update(plugin.request.args_dict)
    page_data = url_get(urljoin(BASE_URL, "/rss.xml"), headers=HEADERS, params=params)
    return rss.parse(page_data, content_type)
Example #6
0
def eztv_get_show_seasons(show_id):
    import random
    import re
    from itertools import groupby
    from concurrent import futures
    from xbmctorrent.utils import first, terminating, url_get
    from xbmctorrent import tvdb

    plugin.set_content("seasons")

    tvdb_id = first(plugin.request.args.get("tvdb_id"))
    with futures.ThreadPoolExecutor(max_workers=2) as pool:
        def _eztv_get_show():
            plugin.log.info("Getting show")
            response = url_get("%s/shows/%s/" % (BASE_URL, show_id), headers=HEADERS)
            plugin.log.info("Got show")
            return response
        seasons_html = pool.submit(_eztv_get_show)
        if tvdb_id:
            tvdb_show = pool.submit(tvdb.get_all_meta, plugin.request.args["tvdb_id"][0])

        seasons_html = seasons_html.result()
        fanarts = []
        seasons = {}
        iter_list = re.findall(r'<a .*?class="epinfo".*?>(.*?)</a.*?>', seasons_html)
        for node_episode in iter_list:
            season, episode = get_episode_data_from_name(node_episode)
            seasons.setdefault(season, {})[episode] = True

        if tvdb_id:
            tvdb_show = tvdb_show.result()
            fanarts = list([banner for banner in tvdb_show["banners"] if banner["bannertype"] == "fanart"])
            random.shuffle(fanarts)

        for i, season in enumerate(reversed(sorted(seasons.keys()))):
            item = tvdb_id and tvdb.get_season_list_item(tvdb_show, season) or {}
            item.update({
                "label": "Season %d [%d episodes]" % (season, len(seasons[season])),
                "path": plugin.url_for("eztv_get_episodes_for_season", show_id=show_id, season=season, tvdb_id=tvdb_id),
            })
            if fanarts:
                item.setdefault("properties", {}).update({
                    "fanart_image": fanarts[i % len(fanarts)]["bannerpath"],
                })
            yield item
Example #7
0
def extratorrent_page(type_="", cid="", search="", page=1):
    from urlparse import urljoin
    from xbmctorrent.scrapers import rss
    from xbmctorrent.utils import url_get

    content_type = plugin.request.args_dict.pop("content_type", None)
    if content_type:
        plugin.set_content(content_type)

    params = {
        "type": type_,
        "search": search,
        "cid": cid,
    }
    params.update(plugin.request.args_dict)
    page_data = url_get(urljoin(BASE_URL, "/rss.xml"),
                        headers=HEADERS,
                        params=params)
    return rss.parse(page_data, content_type)
Example #8
0
def kat_page(root, page, sort_field, sort_order):
    from urlparse import urljoin
    from xbmctorrent.scrapers import rss
    from xbmctorrent.utils import url_get

    content_type = plugin.request.args_dict.get("content_type")
    if content_type:
        plugin.set_content(content_type)

    page = int(page)
    page_data = url_get(urljoin(BASE_URL, "%s/%d" % (root, page)), headers=HEADERS, params={
        "rss": "1",
        "field": sort_field,
        "sorder": sort_order
    })
    for item in rss.parse(page_data, content_type):
        yield item
    yield {
        "label": ">> Next page",
        "path": plugin.url_for("kat_page", root=root, page=page + 1, sort_field=sort_field, sort_order=sort_order, **plugin.request.args_dict),
    }
Example #9
0
def eztv_get_episodes_for_season(show_id, season):
    import copy
    import random
    from bs4 import BeautifulSoup
    from itertools import izip
    from concurrent import futures
    from xbmctorrent.utils import first, terminating, url_get
    from xbmctorrent import tvdb

    plugin.set_content("episodes")

    season = int(season)
    tvdb_id = first(plugin.request.args.get("tvdb_id"))
    with futures.ThreadPoolExecutor(max_workers=2) as pool:
        def _eztv_get_show():
            return BeautifulSoup(url_get("%s/shows/%s/" % (BASE_URL, show_id), headers=HEADERS), "html5lib")
        soup = pool.submit(_eztv_get_show)
        if tvdb_id:
            tvdb_show = pool.submit(tvdb.get_all_meta, plugin.request.args["tvdb_id"][0])

        soup = soup.result()
        items = []
        fanarts = []
        if tvdb_id:
            tvdb_show = tvdb_show.result()
            fanarts = list([banner for banner in tvdb_show["banners"] if banner["bannertype"] == "fanart"])
            random.shuffle(fanarts)
            items = list(tvdb.build_episode_list_items(tvdb_show, int(season)))
        text_nodes = soup.findAll("a", "epinfo")
        href_nodes = soup.findAll("a", "magnet")
        season_nodes = izip(text_nodes, href_nodes)
        season_nodes = filter(lambda x: get_episode_data_from_name(x[0].text)[0] == season, season_nodes)

        for i, (node_text, node_magnet) in enumerate(season_nodes):
            season, episode = get_episode_data_from_name(node_text.text)
            if tvdb_id and episode >= 0:
                item = copy.deepcopy(items[int(episode) - 1])
                for pattern, suffix in (("720p", "(HD)"), ("1080p", "(FullHD)"), ("repack", "(REPACK)"), ("proper", "(PROPER)")):
                    if pattern in node_text.text.lower():
                        item["label"] = "%s %s" % (item["label"], suffix)
            else:
                item = {
                    "label": node_text.text,
                }
            item.setdefault("info", {}).update({
                "tvshowtitle": node_text.text,
                "title": item["label"],
                "season": season,
                "episode": episode,
            })
            stream_info = {}
            if "x264" in node_text.text:
                stream_info["codec"] = item["info"]["video_codec"] = "h264"
            if "xvid" in node_text.text.lower():
                stream_info["codec"] = item["info"]["video_codec"] = "xvid"
            if "720p" in node_text.text:
                stream_info["width"] = 1280
                stream_info["height"] = 720
            if "1080p" in node_text.text:
                stream_info["width"] = 1920
                stream_info["height"] = 1080
            item.update({
                "path": plugin.url_for("play", uri=node_magnet["href"]),
                "stream_info": {"video": stream_info},
                "is_playable": True,
            })
            if fanarts:
                item.setdefault("properties", {}).update({
                    "fanart_image": fanarts[i % len(fanarts)]["bannerpath"],
                })
            yield item
Example #10
0
def rutracker_search_page(catind, page, search=None, search_id=None):
    import urllib, xbmc
    from bs4 import BeautifulSoup, SoupStrainer
    from urlparse import urljoin
    from contextlib import closing
    from itertools import izip
    from concurrent import futures
    from multiprocessing.pool import ThreadPool
    from xbmctorrent.utils import terminating, SafeDialogProgress
    from urlparse import urlparse

    scraper_name = ""
    category = ([cat for cat in CATEGORIES if cat[0] == catind]
                or [("0", u"", "", "")])[0]
    scraper_name = category[3]
    plugin.set_content(category[2])
    if plugin.request.args.get("search_id"):
        search_id = plugin.request.args.get("search_id")[0]
    page = int(page)
    catind = int(catind)

    with closing(SafeDialogProgress(delay_close=0)) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0,
                      line1=u"Получение информации о раздачах...",
                      line2="",
                      line3="")

        items = []
        try:
            start_index = 0
            url = urljoin(BASE_URL, "search.php?")

            if search_id:
                params = {}
                params["nm"] = search
                if int(page) > 0:
                    params["start"] = int(page) * 50
                params["id"] = search_id
                html_data = url_get(url, headers=HEADERS, params=params)
            else:
                post_body = {"nm": search, "fsf": catind}
                html_data = url_get(url, headers=HEADERS, post=post_body)

            soup = BeautifulSoup(html_data, "html5lib")

            node = soup.find("a", class_=["pg"])
            if node:
                r = search_id_parser.search(node['href'])
                if r:
                    plugin.log.debug("Search id found: " + str(r.group(1)))
                    search_id = str(r.group(1))

            nodes = soup.findAll("a", class_=["topictitle"])

            for link in nodes:
                try:
                    title = _rutracker_cleantitle(link.text)
                    r = topic_id_parser.search(link['href'])
                    if r:
                        id = r.group(1)
                        label = "%s" % (title)
                        item = {
                            "label":
                            label,
                            "path":
                            plugin.url_for("rutracker_play", tid=id),
                            "info": {
                                "title": title
                            },
                            "is_playable":
                            False,
                            "context_menu":
                            [("Play with Pulsar",
                              actions.update_view(
                                  plugin.url_for("rutracker_play_pulsar",
                                                 tid=id)))]
                        }
                        items.append(item)
                        plugin.log.debug("Item added: " +
                                         title.encode('utf-8'))
                except:
                    plugin.log.error("Unexpected error: %s \r Skipping item" %
                                     format_exc().split('\n')[-2])
        except:
            plugin.log.error("Unexpected error: %s" %
                             format_exc().split('\n')[-2])
            xbmcgui.Dialog().ok(plugin.name,
                                "Не удалось получить данные от сервера")
            return

        def _get_torrent_info(item):

            from xbmctorrent.search import scrapers as search
            from xbmctorrent.utils import get_quality_from_name

            try:
                scrapers = search.Scrapers()
                if not plugin.get_setting("rutracker_usesearch", bool):
                    meta = scrapers.default(item)
                else:
                    meta = scrapers.scraper(scraper_name, item)
                plugin.log.debug("RUTRACKER: Meta information received")
                meta["path"] = item["path"]
                meta["is_playable"] = item["is_playable"]
                meta.setdefault("stream_info", {}).update(
                    get_quality_from_name(meta['label']))
                plugin.log.debug("RUTRACKER: Meta path updated")
                return meta
            except:
                plugin.log.error(
                    "RUTRACKER: Unexpected error: %s parsing item [%s]" %
                    (format_exc().split('\n')[-2], str(item)))
                return scrapers.default(item)

        state = {"done": 0}

        def on_done(data):
            state["done"] += 1
            dialog.update(
                percent=int(state["done"] * 100.0 / len(items)),
                line2=data["info"].get("title") or data.get("label") or "",
            )

        with terminating(ThreadPool(5)) as pool:
            jobs = [
                pool.apply_async(_get_torrent_info, [item], callback=on_done)
                for item in items
            ]
            while not all(job.ready() for job in jobs):
                if dialog.iscanceled():
                    return
                xbmc.sleep(100)

        for job in jobs:
            try:
                item = job.get()
                del item["search"]
                del item["subdir"]
                yield item
            except:
                plugin.log.error(
                    "RUTRACKER: Unexpected error: %s parsing item [%s]" %
                    (format_exc().split('\n')[-2], str(item)))
        if search_id:
            next_page = {
                "label":
                u"[Далее >]",
                "path":
                plugin.url_for("rutracker_search_page",
                               catind=catind,
                               page=page + 1,
                               search=search,
                               search_id=search_id),
                "is_playable":
                False,
            }
            yield next_page
Example #11
0
def eztv_shows_by_letter(letter):
    import re
    import xbmc
    import xbmcgui
    from bs4 import BeautifulSoup
    from contextlib import nested, closing
    from itertools import izip, groupby
    from multiprocessing.pool import ThreadPool
    from xbmctorrent.scrapers import ungenerate
    from xbmctorrent.utils import terminating, url_get
    from xbmctorrent import tvdb

    plugin.set_content("tvshows")
    shows_cache = plugin.get_storage("eztv_shows") # 1 day cache
    if not len(shows_cache.keys()):
        response = url_get("http://eztv.it/showlist/", headers=HEADERS)
        soup = BeautifulSoup(response, "html5lib")
        nodes = soup.findAll("a", "thread_link")
        for node in nodes:
            show_id, show_named_id = node["href"].split("/")[2:4]
            show_name = node.text
            show_first_letter = show_name[0].lower()
            if re.match("\d+", show_first_letter):
                show_first_letter = "0-9"
            shows_cache.setdefault(show_first_letter, {}).update({
                show_id: {
                    "id": show_id,
                    "named_id": show_named_id,
                    "name": node.text,
                }
            })

    shows_list = sorted(shows_cache[letter.lower()].values(), key=lambda x: x["name"].lower())

    @plugin.cached()
    @ungenerate
    def _eztv_shows_by_letter(letter):
        with closing(xbmcgui.DialogProgress()) as dialog:
            dialog.create(plugin.name)
            dialog.update(percent=0, line1="Fetching serie information...", line2="", line3="")

            state = {"done": 0}
            def on_serie(data):
                state["done"] += 1
                dialog.update(
                    percent=int(state["done"] * 100.0 / len(shows_list)),
                    line2=data and data["seriesname"] or "",
                )

            with terminating(ThreadPool(5)) as pool_tvdb:
                tvdb_list = [pool_tvdb.apply_async(tvdb.search, [show["name"], True], callback=on_serie) for show in shows_list]
                while not all(job.ready() for job in tvdb_list):
                    if dialog.iscanceled():
                        dialog.close()
                        return
                    xbmc.sleep(50)

        tvdb_list = [job.get() for job in tvdb_list]
        for i, (eztv_show, tvdb_show) in enumerate(izip(shows_list, tvdb_list)):
            if tvdb_show:
                item = tvdb.get_list_item(tvdb_show)
                item.update({
                    "path": plugin.url_for("eztv_get_show_seasons", show_id=eztv_show["id"], tvdb_id=tvdb_show["id"])
                })
                yield item
            else:
                yield {
                    "label": eztv_show["name"],
                    "path": plugin.url_for("eztv_get_show_seasons", show_id=eztv_show["id"])
                }
    return _eztv_shows_by_letter(letter)
Example #12
0
def rutor_page(catind, page, query = None):
    import urllib, xbmc
    from bs4 import BeautifulSoup, SoupStrainer
    from urlparse import urljoin
    from contextlib import closing
    from itertools import izip
    from concurrent import futures
    from multiprocessing.pool import ThreadPool
    from xbmctorrent.utils import terminating, SafeDialogProgress

    scraper_name = ""
    category = ([cat for cat in CATEGORIES if cat[0] == catind] or [("0", u"", "", "")])[0]
    scraper_name = category[3]
    plugin.set_content(category[2])

    page = int(page)
    catind = int(catind)

    mode = "browse"
    if query and query != str(None):
        mode = "search"

    with closing(SafeDialogProgress(delay_close=0)) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0, line1=u"Получение информации о раздачах...", line2="", line3="")

        nodes = []
        try:
            html_data = url_get(urljoin(BASE_URL, "%s/%d/%d/0/0/%s" % (mode, page, catind, query)), headers=HEADERS)
            soup = BeautifulSoup(html_data, "html5lib")
            nodes = soup.findAll("tr", class_=["gai","tum"])
        except:
            import xbmcgui
            plugin.log.error("Unexpected error: %s" % sys.exc_info()[0])
            xbmcgui.Dialog().ok(plugin.name, "Не удалось получить данные от сервера")
            return

        if not nodes or len(nodes)==0:
            yield {
                "label": u"[ Не найдено ]",
                "path" : plugin.url_for("rutor_index"),
                "is_playable": False,
            }
            return

        # store length before filter
        nodeslen = len(nodes)
        nodes = [node for node in _rutor_filter_nodes(nodes)]

        items = []
        for node in nodes:
            cells = node.findAll("td")
            seeds, peers = map(lambda x: x.text.strip(), cells[len(cells)-1].findAll("span"))
            links = cells[1].findAll("a")
            magnet_node, title_node = [links[len(links)-2], links[len(links)-1]]
            size = cells[len(cells)-2].text
            tid = int(title_node["href"][9:title_node["href"].find(u"/", 9)])
            title = _rutor_cleantitle(title_node.text)
            label = "%s | %s (S:%s P:%s)" % (title, size, seeds, peers)
            item = {
                "label" : label,
                "path"  : plugin.url_for("rutor_details", catind=catind, tid=tid),
                "info"  : { "title" : title },
                "is_playable": False,
            }
            items.append(item)

        def _get_torrent_info(item):
            from xbmctorrent.search import scrapers as search
            from xbmctorrent.utils import get_quality_from_name

            scrapers = search.Scrapers()

            if not plugin.get_setting("rutor_usesearch", bool):
                meta = scrapers.default(item)
            else:
                meta = scrapers.scraper(scraper_name, item)

            meta["path"] = item["path"]
            meta["is_playable"] = item["is_playable"]
            meta.setdefault("stream_info", {}).update(get_quality_from_name(meta['label']))
            return meta

        state = {"done": 0}
        def on_done(data):
            state["done"] += 1
            dialog.update(
                percent=int(state["done"] * 100.0 / len(nodes)),
                line2 = data["info"].get("title") or data.get("label") or "",
            )

        with terminating(ThreadPool(5)) as pool: 
            jobs = [pool.apply_async(_get_torrent_info, [item], callback=on_done) for item in items]
            while not all(job.ready() for job in jobs):
                if dialog.iscanceled():
                    return
                xbmc.sleep(100)

        import hashlib

        passed = {}
        for job in jobs: 
            item = job.get()
            sha1 = hashlib.sha1(uenc(item.get("subdir") and item["subdir"] or (item.get("search") and item["search"] or item["label"]))).hexdigest()
            if not passed.get(sha1):
                passed[sha1] = True
                del item["search"]
                del item["subdir"]
                yield item
                
        if nodeslen == 100:
            next_page = {
                "label": u"[Далее >]",
                "path" : plugin.url_for("rutor_page", catind=catind, page=page + 1, query=query),
                "is_playable": False,
            }
            yield next_page
Example #13
0
def rutracker_page(catind, page, query=None):
    from bs4 import BeautifulSoup
    from urlparse import urljoin
    from concurrent import futures
    from contextlib import closing
    from xbmctorrent.utils import SafeDialogProgress

    scraper_name = ""
    category = ([cat for cat in CATEGORIES if cat[0] == catind] or [("0", u"", "", "")])[0]
    scraper_name = category[3]
    plugin.set_content(category[2])
    page = int(page)

    with closing(SafeDialogProgress(delay_close=0)) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0, line1=u"Получение информации о раздачах...", line2="", line3="")

        items = []
        try:
            url = urljoin(BASE_URL, "viewforum.php?f=%s&start=%s" % (query, str(page * 50)))
            html_data = url_get(url, headers=HEADERS)
            soup = BeautifulSoup(html_data, "html5lib")
            # find subforums
            nodes = soup.findAll("h4", class_=["forumlink"])

            # Add search item on topmost page:
            if catind == query:
                yield {
                    "label": u"[COLOR FFFFFF00][ Поиск ][/COLOR]",
                    "path": plugin.url_for("rutracker_search", catind=catind),
                }

            for node in nodes:
                link = node.find("a")
                link_href = link["href"]
                # find forum id in href:
                forumid = int(link_href[link_href.find(u"=") + 1:len(link_href)])
                item = {
                    "label": link.text,
                    "path": plugin.url_for("rutracker_page", catind=catind, page=0, query=forumid),
                    "info": {"title": link.text},
                    "is_playable": False,
                }
                yield item

            nodes = soup.findAll("a", class_="torTopic")

            for node in nodes:
                id = node["id"].replace("tt-", "")
                title = _rutracker_cleantitle(node.text)
                parent_node = node.parent.parent.parent
                size_node = parent_node.find("a", class_=["dl-stub"])

                if size_node:
                    size = _rutracker_cleantitle(size_node.text)
                    seeds = parent_node.find("span", class_=["seedmed"]).b.text
                    peers = parent_node.find("span", class_=["leechmed"]).b.text
                    label = "%s | %s (S:%s P:%s)" % (title, size, seeds, peers)
                    item = {
                        "label": label,
                        "path": plugin.url_for("rutracker_play", tid=id),
                        "info": {"title": title},
                        "is_playable": True,
                        "context_menu": [
                            ("Play with Pulsar", actions.update_view(plugin.url_for("rutracker_play_pulsar", tid=id)))
                        ]
                    }
                    items.append(item)
        except Exception:
            plugin.log.error("Unexpected error: %s" % format_exc().split('\n')[-2])
            xbmcgui.Dialog().ok(plugin.name, "Не удалось получить данные от сервера")
            return

        with futures.ThreadPoolExecutor(max_workers=5) as pool:
            from xbmctorrent.utils import get_item_info

            usesearch = plugin.get_setting("rutracker_usesearch", bool)
            jobs = [pool.submit(get_item_info, i, usesearch and scraper_name or None) for i in items]
            state = {"done": 0}

            def on_done(future):
                if not future.cancelled():
                    state["done"] += 1
                    data = future.result()
                    dialog.update(
                        percent=int(state["done"] * 100.0 / len(jobs)),
                        line2=data["info"].get("title") or data.get("label") or "",
                    )

            [job.add_done_callback(on_done) for job in jobs]
            while not all(job.done() for job in jobs):
                if dialog.iscanceled():
                    [job.cancel() for job in jobs]
                    return
                xbmc.sleep(100)
            items = [job.result() for job in jobs if not job.cancelled()]

        for item in items:
            del item["search"]
            del item["subdir"]
            yield item

        next_page = {
            "label": u"[COLOR FF00FF00][Далее >][/COLOR]",
            "path": plugin.url_for("rutracker_page", catind=catind, page=page + 1, query=query),
            "is_playable": False,
        }
        yield next_page
Example #14
0
def kickass_page(root, page):
    import re
    import xbmc
    import xbmcgui
    from bs4 import BeautifulSoup
    from contextlib import closing
    from concurrent import futures
    from urlparse import urljoin
    from xbmctorrent import tmdb
    from xbmctorrent.utils import first, terminating, url_get, get_quality_from_name

    content_type = first(plugin.request.args.get("content_type")) or None
    if content_type:
        plugin.set_content(content_type)

    page = int(page)
    page_data = url_get(urljoin(BASE_URL, "%s/%d" % (root, page)))
    soup = BeautifulSoup(page_data, "html5lib")
    torrent_nodes = [node.parent for node in soup.findAll("td", "torrentnameCell")]

    def _get_torrent_info_with_meta(url):
        torrent_info = get_torrent_info(url)
        if torrent_info["movie"] and torrent_info["imdb_id"]:
            torrent_info["tmdb"] = tmdb.get(torrent_info["imdb_id"])
        return torrent_info


    with closing(xbmcgui.DialogProgress()) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0, line1="Fetching torrent information...", line2="", line3="")

        state = {"done": 0}
        def on_torrent(future):
            data = future.result()
            state["done"] += 1
            dialog.update(
                percent=int(state["done"] * 100.0 / len(torrent_nodes)),
                line2=data.get("title") or "",
            )

        with futures.ThreadPoolExecutor(max_workers=3) as pool:
            movies = []
            for node in torrent_nodes:
                info_nodes = node.findAll("td")
                a_node = first(info_nodes[0].select("div.torrentname a.plain"))
                movies.append(pool.submit(_get_torrent_info_with_meta, urljoin(BASE_URL, a_node["href"])))
            [future.add_done_callback(on_torrent) for future in movies]
            while not all(job.done() for job in movies):
                if dialog.iscanceled():
                    return
                xbmc.sleep(100)

    movies = [movie.result() for movie in movies]
    for i, movie in enumerate(movies):
        if movie.get("tmdb"):
            item = tmdb.get_list_item(movie["tmdb"])
            proper_kat_title = re.sub("\s+", " ", re.sub(r"[().-]", " ", movie["title"]))
            item["label"] = "%s (%s)" % (item["label"], re.sub(r"(?i)%s" % item["label"], "", proper_kat_title).strip())
        else:
            item = {
                "label": movie["title"],
                "info": {
                    "genre": ", ".join(movie["genres"]),
                }
            }
        lower_title = movie["title"].lower()
        if "x264" in lower_title:
            item.setdefault("stream_info", {})["codec"] = item.setdefault("info", {})["video_codec"] = "h264"
        if "xvid" in lower_title:
            item.setdefault("stream_info", {})["codec"] = item.setdefault("info", {})["video_codec"] = "xvid"
        if "720p" in lower_title:
            item.setdefault("stream_info", {}).update({
                "width": 1280,
                "height": 720,
            })
        if "1080p" in lower_title:
            item.setdefault("stream_info", {}).update({
                "width": 1920,
                "height": 1080,
            })
        item.update({
            "path": plugin.url_for("play", magnet=movie["magnet_url"]),
            "is_playable": True,
        })
        item.setdefault("info", {}).update({
            "count": i,
            "genre": "%s (S:%s P:%s)" % (item.get("info", {}).get("genre") or "", movie["seeders"], movie["leechers"]),
        })
        item.setdefault("stream_info", {}).update({
            "video": get_quality_from_name(movie["title"]),
        })
        yield item
    yield {
        "label": "Next page...",
        "path": plugin.url_for("kickass_page", root=root, page=page + 1, content_type=content_type),
    }
Example #15
0
def kickass_page(root, page):
    import re
    import xbmc
    import xbmcgui
    from bs4 import BeautifulSoup
    from contextlib import closing
    from concurrent import futures
    from urlparse import urljoin
    from xbmctorrent import tmdb
    from xbmctorrent.utils import first, terminating, url_get, get_quality_from_name

    content_type = first(plugin.request.args.get("content_type")) or None
    if content_type:
        plugin.set_content(content_type)

    page = int(page)
    page_data = url_get(urljoin(BASE_URL, "%s/%d" % (root, page)))
    soup = BeautifulSoup(page_data, "html5lib")
    torrent_nodes = [
        node.parent for node in soup.findAll("td", "torrentnameCell")
    ]

    def _get_torrent_info_with_meta(url):
        torrent_info = get_torrent_info(url)
        if torrent_info["movie"] and torrent_info["imdb_id"]:
            torrent_info["tmdb"] = tmdb.get(torrent_info["imdb_id"])
        return torrent_info

    with closing(xbmcgui.DialogProgress()) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0,
                      line1="Fetching torrent information...",
                      line2="",
                      line3="")

        state = {"done": 0}

        def on_torrent(future):
            data = future.result()
            state["done"] += 1
            dialog.update(
                percent=int(state["done"] * 100.0 / len(torrent_nodes)),
                line2=data.get("title") or "",
            )

        with futures.ThreadPoolExecutor(max_workers=3) as pool:
            movies = []
            for node in torrent_nodes:
                info_nodes = node.findAll("td")
                a_node = first(info_nodes[0].select("div.torrentname a.plain"))
                movies.append(
                    pool.submit(_get_torrent_info_with_meta,
                                urljoin(BASE_URL, a_node["href"])))
            [future.add_done_callback(on_torrent) for future in movies]
            while not all(job.done() for job in movies):
                if dialog.iscanceled():
                    return
                xbmc.sleep(100)

    movies = [movie.result() for movie in movies]
    for i, movie in enumerate(movies):
        if movie.get("tmdb"):
            item = tmdb.get_list_item(movie["tmdb"])
            proper_kat_title = re.sub("\s+", " ",
                                      re.sub(r"[().-]", " ", movie["title"]))
            item["label"] = "%s (%s)" % (item["label"],
                                         re.sub(r"(?i)%s" % item["label"], "",
                                                proper_kat_title).strip())
        else:
            item = {
                "label": movie["title"],
                "info": {
                    "genre": ", ".join(movie["genres"]),
                }
            }
        lower_title = movie["title"].lower()
        if "x264" in lower_title:
            item.setdefault("stream_info", {})["codec"] = item.setdefault(
                "info", {})["video_codec"] = "h264"
        if "xvid" in lower_title:
            item.setdefault("stream_info", {})["codec"] = item.setdefault(
                "info", {})["video_codec"] = "xvid"
        if "720p" in lower_title:
            item.setdefault("stream_info", {}).update({
                "width": 1280,
                "height": 720,
            })
        if "1080p" in lower_title:
            item.setdefault("stream_info", {}).update({
                "width": 1920,
                "height": 1080,
            })
        item.update({
            "path": plugin.url_for("play", magnet=movie["magnet_url"]),
            "is_playable": True,
        })
        item.setdefault("info", {}).update({
            "count":
            i,
            "genre":
            "%s (S:%s P:%s)" % (item.get("info", {}).get("genre")
                                or "", movie["seeders"], movie["leechers"]),
        })
        item.setdefault("stream_info", {}).update({
            "video":
            get_quality_from_name(movie["title"]),
        })
        yield item
    yield {
        "label":
        "Next page...",
        "path":
        plugin.url_for("kickass_page",
                       root=root,
                       page=page + 1,
                       content_type=content_type),
    }
Example #16
0
def yify_show_data(callback):
    import xbmc
    import xbmcgui
    from contextlib import nested, closing
    from itertools import izip, chain
    from multiprocessing.pool import ThreadPool
    from xbmctorrent import tmdb
    from xbmctorrent.utils import url_get_json, terminating

    plugin.set_content("movies")
    args = dict((k, v[0]) for k, v in plugin.request.args.items())

    current_page = int(args["set"])
    limit = int(args["limit"])

    with closing(xbmcgui.DialogProgress()) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0, line1="Fetching movie information...", line2="", line3="")

        search_result = url_get_json("%s/api/list.json" % BASE_URL, params=args, headers=HEADERS)
        movies = search_result["MovieList"]

        state = {"done": 0}
        def on_movie(data):
            state["done"] += 1
            dialog.update(
                percent=int(state["done"] * 100.0 / (len(movies) * 2)),
                line2=data.get("title") or data.get("MovieTitleClean") or "",
            )

        with nested(terminating(ThreadPool(2)), terminating(ThreadPool(2))) as (pool_yify, pool_tmdb):
            yify_list = [pool_yify.apply_async(yify_get, [movie["MovieID"]], callback=on_movie) for movie in movies]
            tmdb_list = [pool_tmdb.apply_async(tmdb.get, [movie["ImdbCode"]], callback=on_movie) for movie in movies]
            while not all(job.ready() for job in chain(yify_list, tmdb_list)):
                if dialog.iscanceled():
                    dialog.close()
                    return
                xbmc.sleep(50)

        yify_list = map(lambda job: job.get(), yify_list)
        tmdb_list = map(lambda job: job.get(), tmdb_list)
        for movie, tmdb_meta in izip(yify_list, tmdb_list):
            if tmdb_meta:
                item = tmdb.get_list_item(tmdb_meta)
                if args.get("quality") == "all" and movie["Quality"] != "720p":
                    item["label"] = "%s (%s)" % (item["label"], movie["Quality"])
                item.update({
                    "path": plugin.url_for("play", magnet=movie["TorrentMagnetUrl"]),
                    "is_playable": True,
                })
                item.setdefault("info", {}).update({
                    "count": movie["MovieID"],
                    "genre": "%s (%s S:%s P:%s)" % (item["info"]["genre"], movie["Size"], movie["TorrentSeeds"], movie["TorrentPeers"]),
                    "trailer": YOUTUBE_ACTION % movie["YoutubeTrailerID"],
                    "plot_outline": movie["ShortDescription"],
                    "video_codec": "h264",
                    "mpaa": movie["AgeRating"],
                })
                width, height = map(int, movie["Resolution"].split("*"))
                item.setdefault("stream_info", {}).update({
                    "video": {
                        "codec": "h264",
                        "width": width,
                        "height": height,
                        "aspect": float(width) / float(height),
                    },
                    "audio": {
                        "codec": "aac",
                        "language": movie["Language"],
                    },
                })
                yield item

        if current_page < (int(search_result["MovieCount"]) / limit):
            next_args = args.copy()
            next_args["set"] = int(next_args["set"]) + 1
            yield {
                "label": ">> Next page",
                "path": plugin.url_for(callback, **next_args),
            }
Example #17
0
def rutor_details(catind, tid):
    import xbmcgui
    from bs4 import BeautifulSoup
    from urlparse import urljoin
    from xbmctorrent.utils import get_quality_from_name
    from xbmctorrent.search import scrapers as search

    scraper_name = ""
    category = ([cat for cat in CATEGORIES if cat[0] == catind] or [("0", u"", "", "")])[0]
    scraper_name = category[3]
    plugin.set_content(category[2])

    tid = int(tid)
    try:
        html_data = url_get(urljoin(BASE_URL, "torrent/%d/" % tid), headers=HEADERS)
    except Exception:
        xbmcgui.Dialog().ok(plugin.name, "Не удалось получить данные от сервера")
        return

    soup = BeautifulSoup(html_data, "html5lib")
    if len(soup.select("div#download")) == 0:
        # it may occur if site redirects a request to his mirror
        xbmcgui.Dialog().ok(plugin.name, "Раздачи не существует. Обновите список")
        return

    div_index = soup.select("div#index")

    scrapers = search.Scrapers()

    details = soup.select("table#details")[0].findAll("tr")
    shift = 1 if len(details[1].select("a")) > 0 else 0
    seeds = details[3 + shift].contents[1].text
    peers = details[4 + shift].contents[1].text
    size = details[7 + shift].contents[1].text
    size = size[:size.find(" ")]
    title = _rutor_cleantitle(soup.h1.text)
    label = "%s | %s (S:%s P:%s)" % (title, size, seeds, peers)
    item = {
        "label": label,
        "info": {"title": title}
    }

    if plugin.get_setting("rutor_usesearch", bool):
        meta = scrapers.scraper(scraper_name, item)
    else:
        meta = scrapers.default(item)

    meta["path"] = plugin.url_for("rutor_play", tid=tid)
    meta["is_playable"] = True
    meta.setdefault("stream_info", {}).update(get_quality_from_name(meta['label']))
    meta["context_menu"] = [("Play with Pulsar", actions.update_view(plugin.url_for("rutor_play_pulsar", tid=tid)))]

    del meta["search"]
    del meta["subdir"]
    yield meta

    if len(div_index) > 0:
        nodes = div_index[0].findAll("tr", class_=["gai", "tum"])
        nodes = [node for node in _rutor_filter_nodes(nodes)]

        for node in nodes:
            cells = node.findAll("td")
            seeds, peers = map(lambda x: x.text.strip(), cells[len(cells) - 1].findAll("span"))
            donwload_node, magnet_node, title_node = cells[1].findAll("a")
            size = cells[len(cells) - 2].text
            title = _rutor_cleantitle(title_node.text)
            tid = int(title_node["href"][9:title_node["href"].find(u"/", 9)])
            label = "%s | %s (S:%s P:%s)" % (title, size, seeds, peers)

            item = scrapers.default({
                "label": label,
                "info": {"title": title}
            })
            item.update({
                "path": plugin.url_for("rutor_play", tid=tid),
                "is_playable": True,
                "thumbnail": meta["thumbnail"],
                "icon": meta["icon"]
            })
            item["info"].update(meta["info"])
            item["properties"].update(meta["properties"])
            item.setdefault("stream_info", {}).update(get_quality_from_name(item['label']))
            item["context_menu"] = [("Play with Pulsar", actions.update_view(plugin.url_for("rutor_play_pulsar", tid=tid)))]

            del item["search"]
            del item["subdir"]
            yield item
def rutracker_page(catind, page, query=None):
    import urllib, xbmc
    from bs4 import BeautifulSoup, SoupStrainer
    from urlparse import urljoin
    from contextlib import closing
    from itertools import izip
    from concurrent import futures
    from multiprocessing.pool import ThreadPool
    from xbmctorrent.utils import terminating, SafeDialogProgress
    from urlparse import urlparse

    scraper_name = ""
    category = ([cat for cat in CATEGORIES if cat[0] == catind] or [("0", u"", "", "")])[0]
    scraper_name = category[3]
    plugin.set_content(category[2])
    page = int(page)

    with closing(SafeDialogProgress(delay_close=0)) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0, line1=u"Получение информации о раздачах...", line2="", line3="")

        items = []
        try:
            start_index = 0
            url = urljoin(BASE_URL, "viewforum.php?f=%s&start=%s" % (query, str(page * 50)))
            html_data = url_get(url, headers=HEADERS)
            soup = BeautifulSoup(html_data, "html5lib")
            # find subforums
            nodes = soup.findAll("h4", class_=["forumlink"])

            #Add search item on topmost page:
            if catind == query:
                yield {
                    "label": u"[COLOR FFFFFF00][ Поиск ][/COLOR]",
                    "path": plugin.url_for("rutracker_search", catind=catind),
                }

            plugin.log.debug("Loading forum nodes")
            for node in nodes:
                link = node.find("a")
                plugin.log.debug("Forum link: " + str(link))
                link_href = link["href"]
                # find forum id in href:
                forumid = int(link_href[link_href.find(u"=") + 1:len(link_href)])
                item = {
                    "label": link.text,
                    "path": plugin.url_for("rutracker_page", catind=catind, page=0, query=forumid),
                    "info": {"title": link.text},
                    "is_playable": False,
                }
                yield item

            nodes = soup.findAll("td", class_=["topic_id"])

            for node in nodes:
                id = node["id"]
                title_node = node.parent.find(id='tt-%s' % str(id))
                title = _rutracker_cleantitle(title_node.text)
                row_node = node.parent \
                    #find "size" table cell - it should have download link
                size_td = row_node.find_all("td")[2]

                #check if size node has download link to torrent file:
                if size_td:
                    size_link = size_td.find("a", class_=["small"])
                    if size_link:
                        size = size_link.text
                        seeds = size_td.find("span", class_=["seedmed"]).b.text
                        peers = size_td.find("span", class_=["leechmed"]).b.text
                        size = size_td.find("a", class_=["small"]).text
                        label = "%s | %s (S:%s P:%s)" % ( title, size, seeds, peers)
                        item = {
                            "label": label,
                            "path": plugin.url_for("rutracker_play", tid=id),
                            "info": {"title": title},
                            "is_playable": False,
                            "context_menu": [
                                ("Play with Pulsar", actions.update_view(plugin.url_for("rutracker_play_pulsar", tid=id)))
                            ]
                        }
                        items.append(item)
                        plugin.log.debug("Item added: " + title.encode('utf-8'))
        except:

            plugin.log.error("Unexpected error: %s" % format_exc().split('\n')[-2])
            xbmcgui.Dialog().ok(plugin.name, "Не удалось получить данные от сервера")
            return

        def _get_torrent_info(item):

            from xbmctorrent.search import scrapers as search
            from xbmctorrent.utils import get_quality_from_name

            try:
                scrapers = search.Scrapers()
                if not plugin.get_setting("rutracker_usesearch", bool):
                    meta = scrapers.default(item)
                else:
                    meta = scrapers.scraper(scraper_name, item)

                meta["path"] = item["path"]
                meta["is_playable"] = item["is_playable"]
                meta.setdefault("stream_info", {}).update(get_quality_from_name(meta['label']))
                return meta
            except:
                plugin.log.error("Unexpected error: %s" % format_exc().split('\n')[-2])
                return scrapers.default(item)

        state = {"done": 0}

        def on_done(data):
            state["done"] += 1
            dialog.update(
                percent=int(state["done"] * 100.0 / len(items)),
                line2=data["info"].get("title") or data.get("label") or "",
            )

        with terminating(ThreadPool(5)) as pool:
            jobs = [pool.apply_async(_get_torrent_info, [item], callback=on_done) for item in items]
            while not all(job.ready() for job in jobs):
                if dialog.iscanceled():
                    return
                xbmc.sleep(100)

        for job in jobs:
            item = job.get()
            del item["search"]
            del item["subdir"]
            yield item

        next_page = {
            "label": u"[Далее >]",
            "path": plugin.url_for("rutracker_page", catind=catind, page=page + 1, query=query),
            "is_playable": False,
        }
        yield next_page
Example #19
0
def rutor_details(catind, tid):
    import urllib
    from bs4 import BeautifulSoup, SoupStrainer
    from urlparse import urljoin
    from xbmctorrent.utils import get_quality_from_name
    from xbmctorrent.search import scrapers as search

    scraper_name = ""
    category = ([cat for cat in CATEGORIES if cat[0] == catind]
                or [("0", u"", "", "")])[0]
    scraper_name = category[3]
    plugin.set_content(category[2])

    tid = int(tid)
    try:
        html_data = url_get(urljoin(BASE_URL, "torrent/%d/" % tid),
                            headers=HEADERS)
    except:
        import xbmcgui
        xbmcgui.Dialog().ok(plugin.name,
                            "Не удалось получить данные от сервера")
        return

    soup = BeautifulSoup(html_data, "html5lib")
    div_download = soup.select("div#download")[0]
    div_index = soup.select("div#index")

    if len(div_index) == 0:
        plugin.redirect(plugin.url_for("rutor_play", tid=tid))
        return

    scrapers = search.Scrapers()

    details = soup.select("table#details")[0].findAll("tr")
    seeds = details[4].contents[1].text
    peers = details[5].contents[1].text
    size = details[8].contents[1].text
    size = size[:size.find(" ")]
    title = _rutor_cleantitle(soup.h1.text)
    label = "%s | %s (S:%s P:%s)" % (title, size, seeds, peers)
    item = {"label": label, "info": {"title": title}}

    if plugin.get_setting("rutor_usesearch", bool):
        meta = scrapers.scraper(scraper_name, item)
    else:
        meta = scrapers.default(item)

    meta["path"] = plugin.url_for("rutor_play", tid=tid)
    meta["is_playable"] = False
    meta.setdefault("stream_info",
                    {}).update(get_quality_from_name(meta['label']))
    meta["context_menu"] = [
        ("Play with Pulsar",
         actions.update_view(plugin.url_for("rutor_play_pulsar", tid=tid)))
    ]

    del meta["search"]
    del meta["subdir"]
    yield meta

    if len(div_index) > 0:
        nodes = div_index[0].findAll("tr", class_=["gai", "tum"])
        nodes = [node for node in _rutor_filter_nodes(nodes)]

        for node in nodes:
            cells = node.findAll("td")
            seeds, peers = map(lambda x: x.text.strip(),
                               cells[len(cells) - 1].findAll("span"))
            donwload_node, magnet_node, title_node = cells[1].findAll("a")
            size = cells[len(cells) - 2].text
            title = _rutor_cleantitle(title_node.text)
            tid = int(title_node["href"][9:title_node["href"].find(u"/", 9)])
            label = "%s | %s (S:%s P:%s)" % (title, size, seeds, peers)

            item = scrapers.default({"label": label, "info": {"title": title}})
            item.update({
                "path": plugin.url_for("rutor_play", tid=tid),
                "is_playable": False,
                "thumbnail": meta["thumbnail"],
                "icon": meta["icon"]
            })
            item["info"].update(meta["info"])
            item["properties"].update(meta["properties"])
            item.setdefault("stream_info",
                            {}).update(get_quality_from_name(item['label']))
            item["context_menu"] = [("Play with Pulsar",
                                     actions.update_view(
                                         plugin.url_for("rutor_play_pulsar",
                                                        tid=tid)))]

            del item["search"]
            del item["subdir"]
            yield item
Example #20
0
def rutor_page(catind, page, query=None):
    import urllib, xbmc
    from bs4 import BeautifulSoup, SoupStrainer
    from urlparse import urljoin
    from contextlib import closing
    from itertools import izip
    from concurrent import futures
    from multiprocessing.pool import ThreadPool
    from xbmctorrent.utils import terminating, SafeDialogProgress

    scraper_name = ""
    category = ([cat for cat in CATEGORIES if cat[0] == catind]
                or [("0", u"", "", "")])[0]
    scraper_name = category[3]
    plugin.set_content(category[2])

    page = int(page)
    catind = int(catind)

    mode = "browse"
    if query and query != str(None):
        mode = "search"

    with closing(SafeDialogProgress(delay_close=0)) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0,
                      line1=u"Получение информации о раздачах...",
                      line2="",
                      line3="")

        nodes = []
        try:
            html_data = url_get(urljoin(
                BASE_URL, "%s/%d/%d/0/0/%s" % (mode, page, catind, query)),
                                headers=HEADERS)
            soup = BeautifulSoup(html_data, "html5lib")
            nodes = soup.findAll("tr", class_=["gai", "tum"])
        except:
            import xbmcgui
            plugin.log.error("Unexpected error: %s" % sys.exc_info()[0])
            xbmcgui.Dialog().ok(plugin.name,
                                "Не удалось получить данные от сервера")
            return

        if not nodes or len(nodes) == 0:
            yield {
                "label": u"[ Не найдено ]",
                "path": plugin.url_for("rutor_index"),
                "is_playable": False,
            }
            return

        # store length before filter
        nodeslen = len(nodes)
        nodes = [node for node in _rutor_filter_nodes(nodes)]

        items = []
        for node in nodes:
            cells = node.findAll("td")
            seeds, peers = map(lambda x: x.text.strip(),
                               cells[len(cells) - 1].findAll("span"))
            links = cells[1].findAll("a")
            magnet_node, title_node = [
                links[len(links) - 2], links[len(links) - 1]
            ]
            size = cells[len(cells) - 2].text
            tid = int(title_node["href"][9:title_node["href"].find(u"/", 9)])
            title = _rutor_cleantitle(title_node.text)
            label = "%s | %s (S:%s P:%s)" % (title, size, seeds, peers)
            item = {
                "label": label,
                "path": plugin.url_for("rutor_details", catind=catind,
                                       tid=tid),
                "info": {
                    "title": title
                },
                "is_playable": False,
            }
            items.append(item)

        def _get_torrent_info(item):
            from xbmctorrent.search import scrapers as search
            from xbmctorrent.utils import get_quality_from_name

            scrapers = search.Scrapers()

            if not plugin.get_setting("rutor_usesearch", bool):
                meta = scrapers.default(item)
            else:
                meta = scrapers.scraper(scraper_name, item)

            meta["path"] = item["path"]
            meta["is_playable"] = item["is_playable"]
            meta.setdefault("stream_info",
                            {}).update(get_quality_from_name(meta['label']))
            return meta

        state = {"done": 0}

        def on_done(data):
            state["done"] += 1
            dialog.update(
                percent=int(state["done"] * 100.0 / len(nodes)),
                line2=data["info"].get("title") or data.get("label") or "",
            )

        with terminating(ThreadPool(5)) as pool:
            jobs = [
                pool.apply_async(_get_torrent_info, [item], callback=on_done)
                for item in items
            ]
            while not all(job.ready() for job in jobs):
                if dialog.iscanceled():
                    return
                xbmc.sleep(100)

        import hashlib

        passed = {}
        for job in jobs:
            item = job.get()
            sha1 = hashlib.sha1(
                uenc(
                    item.get("subdir") and item["subdir"]
                    or (item.get("search") and item["search"]
                        or item["label"]))).hexdigest()
            if not passed.get(sha1):
                passed[sha1] = True
                del item["search"]
                del item["subdir"]
                yield item

        if nodeslen == 100:
            next_page = {
                "label":
                u"[Далее >]",
                "path":
                plugin.url_for("rutor_page",
                               catind=catind,
                               page=page + 1,
                               query=query),
                "is_playable":
                False,
            }
            yield next_page
Example #21
0
def eztv_shows_by_letter(letter):
    import re
    import xbmc
    import xbmcgui
    import re
    from contextlib import nested, closing
    from itertools import izip, groupby
    from concurrent import futures
    from xbmctorrent.scrapers import ungenerate
    from xbmctorrent.utils import terminating, url_get, SafeDialogProgress
    from xbmctorrent import tvdb

    plugin.set_content("tvshows")

    with shelf("it.eztv.shows") as eztv_shows:
        if not eztv_shows:
            showlist = url_get("%s/showlist/" % BASE_URL, headers=HEADERS)
            nodes_text = re.findall(r'<a .*?class="thread_link".*?>(.*?)</a.*?>', showlist)
            nodes_href = re.findall(r'<a .*?class="thread_link".*?>', showlist)
            nodes_href = map(lambda x: re.findall(r'href="(.*?)"', x)[0], nodes_href)
            for node_text, node_href in izip(nodes_text, nodes_href):
                show_id, show_named_id = node_href.split("/")[2:4]
                show_name = node_text
                show_first_letter = show_name[0].lower()
                if re.match("\d+", show_first_letter):
                    show_first_letter = "0-9"
                eztv_shows.setdefault(show_first_letter, {}).update({
                    show_id: {
                        "id": show_id,
                        "named_id": show_named_id,
                        "name": node_text,
                    }
                })

    shows_list = sorted(eztv_shows[letter.lower()].values(), key=lambda x: x["name"].lower())

    with closing(SafeDialogProgress(delay_close=0)) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0, line1="Fetching serie information...", line2="", line3="")

        state = {"done": 0}
        def on_serie(future):
            data = future.result()
            state["done"] += 1
            dialog.update(
                percent=int(state["done"] * 100.0 / len(shows_list)),
                line2=data and data["seriesname"] or "",
            )

        with futures.ThreadPoolExecutor(max_workers=5) as pool_tvdb:
            tvdb_list = [pool_tvdb.submit(tvdb.search, show["name"], True) for show in shows_list]
            [future.add_done_callback(on_serie) for future in tvdb_list]
            while not all(job.done() for job in tvdb_list):
                if dialog.iscanceled():
                    return
                xbmc.sleep(100)

    tvdb_list = [job.result() for job in tvdb_list]
    for i, (eztv_show, tvdb_show) in enumerate(izip(shows_list, tvdb_list)):
        if tvdb_show:
            item = tvdb.get_list_item(tvdb_show)
            item.update({
                "path": plugin.url_for("eztv_get_show_seasons", show_id=eztv_show["id"], tvdb_id=tvdb_show["id"])
            })
            yield item
        else:
            yield {
                "label": eztv_show["name"],
                "path": plugin.url_for("eztv_get_show_seasons", show_id=eztv_show["id"])
            }
Example #22
0
def rutracker_page(catind, page, query=None):
    import urllib, xbmc
    from bs4 import BeautifulSoup, SoupStrainer
    from urlparse import urljoin
    from contextlib import closing
    from itertools import izip
    from concurrent import futures
    from multiprocessing.pool import ThreadPool
    from xbmctorrent.utils import terminating, SafeDialogProgress
    from urlparse import urlparse

    scraper_name = ""
    category = ([cat for cat in CATEGORIES if cat[0] == catind]
                or [("0", u"", "", "")])[0]
    scraper_name = category[3]
    plugin.set_content(category[2])
    page = int(page)

    with closing(SafeDialogProgress(delay_close=0)) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0,
                      line1=u"Получение информации о раздачах...",
                      line2="",
                      line3="")

        items = []
        try:
            start_index = 0
            url = urljoin(
                BASE_URL,
                "viewforum.php?f=%s&start=%s" % (query, str(page * 50)))
            html_data = url_get(url, headers=HEADERS)
            soup = BeautifulSoup(html_data, "html5lib")
            # find subforums
            nodes = soup.findAll("h4", class_=["forumlink"])

            #Add search item on topmost page:
            if catind == query:
                yield {
                    "label": u"[COLOR FFFFFF00][ Поиск ][/COLOR]",
                    "path": plugin.url_for("rutracker_search", catind=catind),
                }

            plugin.log.debug("Loading forum nodes")
            for node in nodes:
                link = node.find("a")
                plugin.log.debug("Forum link: " + str(link))
                link_href = link["href"]
                # find forum id in href:
                forumid = int(link_href[link_href.find(u"=") +
                                        1:len(link_href)])
                item = {
                    "label":
                    link.text,
                    "path":
                    plugin.url_for("rutracker_page",
                                   catind=catind,
                                   page=0,
                                   query=forumid),
                    "info": {
                        "title": link.text
                    },
                    "is_playable":
                    False,
                }
                yield item

            nodes = soup.findAll("td", class_=["topic_id"])

            for node in nodes:
                id = node["id"]
                title_node = node.parent.find(id='tt-%s' % str(id))
                title = _rutracker_cleantitle(title_node.text)
                row_node = node.parent \
                    #find "size" table cell - it should have download link

                size_td = row_node.find_all("td")[2]

                #check if size node has download link to torrent file:
                if size_td:
                    size_link = size_td.find("a", class_=["small"])
                    if size_link:
                        size = size_link.text
                        seeds = size_td.find("span", class_=["seedmed"]).b.text
                        peers = size_td.find("span",
                                             class_=["leechmed"]).b.text
                        size = size_td.find("a", class_=["small"]).text
                        label = "%s | %s (S:%s P:%s)" % (title, size, seeds,
                                                         peers)
                        item = {
                            "label":
                            label,
                            "path":
                            plugin.url_for("rutracker_play", tid=id),
                            "info": {
                                "title": title
                            },
                            "is_playable":
                            False,
                            "context_menu":
                            [("Play with Pulsar",
                              actions.update_view(
                                  plugin.url_for("rutracker_play_pulsar",
                                                 tid=id)))]
                        }
                        items.append(item)
                        plugin.log.debug("Item added: " +
                                         title.encode('utf-8'))
        except:

            plugin.log.error("Unexpected error: %s" %
                             format_exc().split('\n')[-2])
            xbmcgui.Dialog().ok(plugin.name,
                                "Не удалось получить данные от сервера")
            return

        def _get_torrent_info(item):

            from xbmctorrent.search import scrapers as search
            from xbmctorrent.utils import get_quality_from_name

            try:
                scrapers = search.Scrapers()
                if not plugin.get_setting("rutracker_usesearch", bool):
                    meta = scrapers.default(item)
                else:
                    meta = scrapers.scraper(scraper_name, item)

                meta["path"] = item["path"]
                meta["is_playable"] = item["is_playable"]
                meta.setdefault("stream_info", {}).update(
                    get_quality_from_name(meta['label']))
                return meta
            except:
                plugin.log.error("Unexpected error: %s" %
                                 format_exc().split('\n')[-2])
                return scrapers.default(item)

        state = {"done": 0}

        def on_done(data):
            state["done"] += 1
            dialog.update(
                percent=int(state["done"] * 100.0 / len(items)),
                line2=data["info"].get("title") or data.get("label") or "",
            )

        with terminating(ThreadPool(5)) as pool:
            jobs = [
                pool.apply_async(_get_torrent_info, [item], callback=on_done)
                for item in items
            ]
            while not all(job.ready() for job in jobs):
                if dialog.iscanceled():
                    return
                xbmc.sleep(100)

        for job in jobs:
            item = job.get()
            del item["search"]
            del item["subdir"]
            yield item

        next_page = {
            "label":
            u"[Далее >]",
            "path":
            plugin.url_for("rutracker_page",
                           catind=catind,
                           page=page + 1,
                           query=query),
            "is_playable":
            False,
        }
        yield next_page
Example #23
0
def rutracker_search_page(catind, page, search=None, search_id=None):
    from bs4 import BeautifulSoup
    from urlparse import urljoin
    from concurrent import futures
    from contextlib import closing
    from xbmctorrent.utils import SafeDialogProgress

    scraper_name = ""
    category = ([cat for cat in CATEGORIES if cat[0] == catind] or [("0", u"", "", "")])[0]
    scraper_name = category[3]
    plugin.set_content(category[2])
    if plugin.request.args.get("search_id"):
        search_id = plugin.request.args.get("search_id")[0]
    page = int(page)
    catind = int(catind)

    with closing(SafeDialogProgress(delay_close=0)) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0, line1=u"Получение информации о раздачах...", line2="", line3="")

        items = []
        try:
            url = urljoin(BASE_URL, "search.php?")

            if search_id:
                params = {}
                params["nm"] = search
                if int(page) > 0:
                    params["start"] = int(page) * 50
                params["id"] = search_id
                html_data = url_get(url, headers=HEADERS, params=params)
            else:
                post_body = {"nm": search, "fsf": catind}
                html_data = url_get(url, headers=HEADERS, post=post_body)

            soup = BeautifulSoup(html_data, "html5lib")

            node = soup.find("a", class_=["pg"])
            if node:
                r = search_id_parser.search(node['href'])
                if r:
                    plugin.log.debug("Search id found: " + str(r.group(1)))
                    search_id = str(r.group(1))

            for link in soup.findAll("a", class_=["topictitle"]):
                try:
                    title = _rutracker_cleantitle(link.text)
                    r = topic_id_parser.search(link['href'])
                    if r:
                        id = r.group(1)
                        label = "%s" % (title)
                        item = {
                            "label": label,
                            "path": plugin.url_for("rutracker_play", tid=id),
                            "info": {"title": title},
                            "is_playable": True,
                            "context_menu": [
                                ("Play with Pulsar", actions.update_view(plugin.url_for("rutracker_play_pulsar", tid=id)))
                            ]
                        }
                        items.append(item)
                except Exception:
                    plugin.log.error("Unexpected error: %s \r Skipping item" % format_exc().split('\n')[-2])
        except Exception:
            plugin.log.error("Unexpected error: %s" % format_exc().split('\n')[-2])
            xbmcgui.Dialog().ok(plugin.name, "Не удалось получить данные от сервера")
            return

        with futures.ThreadPoolExecutor(max_workers=5) as pool:
            from xbmctorrent.utils import get_item_info

            usesearch = plugin.get_setting("rutracker_usesearch", bool)
            jobs = [pool.submit(get_item_info, i, usesearch and scraper_name or None) for i in items]
            state = {"done": 0}

            def on_done(future):
                if not future.cancelled():
                    state["done"] += 1
                    data = future.result()
                    dialog.update(
                        percent=int(state["done"] * 100.0 / len(jobs)),
                        line2=data["info"].get("title") or data.get("label") or "",
                    )

            [job.add_done_callback(on_done) for job in jobs]
            while not all(job.done() for job in jobs):
                if dialog.iscanceled():
                    [job.cancel() for job in jobs]
                    return
                xbmc.sleep(100)
            items = [job.result() for job in jobs if not job.cancelled()]

        for item in items:
            del item["search"]
            del item["subdir"]
            yield item

        if search_id:
            next_page = {
                "label": u"[COLOR FF00FF00][Далее >][/COLOR]",
                "path": plugin.url_for("rutracker_search_page", catind=catind, page=page + 1, search=search, search_id=search_id),
                "is_playable": False,
            }
            yield next_page
Example #24
0
def rutor_page(catind, page, query=None):
    import hashlib
    import xbmc
    from bs4 import BeautifulSoup
    from urlparse import urljoin
    from concurrent import futures
    from contextlib import closing
    from xbmctorrent.utils import SafeDialogProgress

    scraper_name = ""
    category = ([cat for cat in CATEGORIES if cat[0] == catind] or [("0", u"", "", "")])[0]
    scraper_name = category[3]
    plugin.set_content(category[2])

    page = int(page)
    catind = int(catind)

    mode = "browse"
    if query and query != str(None):
        mode = "search"

    with closing(SafeDialogProgress(delay_close=0)) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0, line1=u"Получение информации о раздачах...", line2="", line3="")

        nodes = []
        try:
            html_data = url_get(urljoin(BASE_URL, "%s/%d/%d/0/0/%s" % (mode, page, catind, query)), headers=HEADERS)
            soup = BeautifulSoup(html_data, "html5lib")
            nodes = soup.findAll("tr", class_=["gai", "tum"])
        except Exception:
            import xbmcgui
            plugin.log.error("Unexpected error: %s" % sys.exc_info()[0])
            xbmcgui.Dialog().ok(plugin.name, "Не удалось получить данные от сервера")
            return

        if not nodes or len(nodes) == 0:
            yield {
                "label": u"[ Не найдено ]",
                "path": plugin.url_for("rutor_index"),
                "is_playable": False,
            }
            return

        # store length before filter
        nodeslen = len(nodes)
        nodes = [node for node in _rutor_filter_nodes(nodes)]

        items = []
        for node in nodes:
            cells = node.findAll("td")
            seeds, peers = map(lambda x: x.text.strip(), cells[len(cells) - 1].findAll("span"))
            links = cells[1].findAll("a")
            title_node = links[len(links) - 1]
            size = cells[len(cells) - 2].text
            tid = int(title_node["href"][9:title_node["href"].find(u"/", 9)])
            title = _rutor_cleantitle(title_node.text)
            label = "%s | %s (S:%s P:%s)" % (title, size, seeds, peers)
            item = {
                "label": label,
                "path": plugin.url_for("rutor_details", catind=catind, tid=tid),
                "info": {"title": title},
                "is_playable": False,
            }
            items.append(item)

        with futures.ThreadPoolExecutor(max_workers=5) as pool:
            from xbmctorrent.utils import get_item_info

            usesearch = plugin.get_setting("rutor_usesearch", bool)
            jobs = [pool.submit(get_item_info, i, usesearch and scraper_name or None) for i in items]
            state = {"done": 0}

            def on_done(future):
                if not future.cancelled():
                    state["done"] += 1
                    data = future.result()
                    dialog.update(
                        percent=int(state["done"] * 100.0 / len(jobs)),
                        line2=data["info"].get("title") or data.get("label") or "",
                    )

            [job.add_done_callback(on_done) for job in jobs]
            while not all(job.done() for job in jobs):
                if dialog.iscanceled():
                    [job.cancel() for job in jobs]
                    return
                xbmc.sleep(100)
            items = [job.result() for job in jobs if not job.cancelled()]

        passed = {}
        for item in items:
            sha1 = hashlib.sha1(uenc(item.get("subdir") and item["subdir"] or (item.get("search") and item["search"] or item["label"]))).hexdigest()
            if mode == "search" or not passed.get(sha1):
                passed[sha1] = True
                del item["search"]
                del item["subdir"]
                yield item

        if nodeslen == 100:
            next_page = {
                "label": u"[COLOR FF00FF00][Далее >][/COLOR]",
                "path": plugin.url_for("rutor_page", catind=catind, page=page + 1, query=query),
                "is_playable": False,
            }
            yield next_page
Example #25
0
def yify_show_data(callback):
    import xbmc
    import xbmcgui
    from contextlib import nested, closing
    from itertools import izip, chain
    from concurrent import futures
    from xbmctorrent import tmdb
    from xbmctorrent.utils import url_get_json, terminating, SafeDialogProgress

    plugin.set_content("movies")
    args = dict((k, v[0]) for k, v in plugin.request.args.items())

    current_page = int(args["set"])
    limit = int(args["limit"])

    with closing(SafeDialogProgress(delay_close=0)) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0,
                      line1="Fetching movie information...",
                      line2="",
                      line3="")

        try:
            search_result = url_get_json("%s/api/list.json" % BASE_URL,
                                         params=args,
                                         headers=HEADERS)
        except:
            plugin.notify("Unable to connect to %s." % BASE_URL)
            raise
        movies = search_result.get("MovieList") or []

        if not movies:
            return

        state = {"done": 0}

        def on_movie(future):
            data = future.result()
            state["done"] += 1
            dialog.update(
                percent=int(state["done"] * 100.0 / len(movies)),
                line2=data.get("title") or data.get("MovieTitleClean") or "",
            )

        with futures.ThreadPoolExecutor(max_workers=2) as pool_tmdb:
            tmdb_list = [
                pool_tmdb.submit(tmdb.get, movie["ImdbCode"])
                for movie in movies
            ]
            [future.add_done_callback(on_movie) for future in tmdb_list]
            while not all(job.done() for job in tmdb_list):
                if dialog.iscanceled():
                    return
                xbmc.sleep(100)

        tmdb_list = map(lambda job: job.result(), tmdb_list)
        for movie, tmdb_meta in izip(movies, tmdb_list):
            if tmdb_meta:
                magnet_link = urllib.quote_plus(
                    movie["TorrentMagnetUrl"].encode("utf-8"))
                item = tmdb.get_list_item(tmdb_meta)
                if args.get("quality") == "all" and movie["Quality"] != "720p":
                    item["label"] = "%s (%s)" % (item["label"],
                                                 movie["Quality"])
                item.update({
                    "path":
                    "plugin://plugin.video.pulsar/play?uri=" + magnet_link,
                    # "path": plugin.url_for("play", uri=movie["TorrentMagnetUrl"]),
                    "is_playable": True,
                })
                item.setdefault("info", {}).update({
                    "count":
                    movie["MovieID"],
                    "genre":
                    "%s (%s S:%s P:%s)" %
                    (item["info"]["genre"], movie["Size"],
                     movie["TorrentSeeds"], movie["TorrentPeers"]),
                    "plot_outline":
                    tmdb_meta["overview"],
                    "video_codec":
                    "h264",
                })
                width = 1920
                height = 1080
                if movie["Quality"] == "720p":
                    width = 1280
                    height = 720
                item.setdefault("stream_info", {}).update({
                    "video": {
                        "codec": "h264",
                        "width": width,
                        "height": height,
                    },
                    "audio": {
                        "codec": "aac",
                    },
                })
                yield item

        if current_page < (int(search_result["MovieCount"]) / limit):
            next_args = args.copy()
            next_args["set"] = int(next_args["set"]) + 1
            yield {
                "label": ">> Next page",
                "path": plugin.url_for(callback, **next_args),
            }
Example #26
0
def yify_show_data(callback):
    import xbmc
    import xbmcgui
    from contextlib import nested, closing
    from itertools import izip, chain
    from concurrent import futures
    from xbmctorrent import tmdb
    from xbmctorrent.utils import url_get_json, terminating, SafeDialogProgress

    plugin.set_content("movies")
    args = dict((k, v[0]) for k, v in plugin.request.args.items())

    current_page = int(args["set"])
    limit = int(args["limit"])

    with closing(SafeDialogProgress(delay_close=0)) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0, line1="Fetching movie information...", line2="", line3="")

        try:
            search_result = url_get_json("%s/api/list.json" % BASE_URL, params=args, headers=HEADERS)
        except:
            plugin.notify("Unable to connect to %s." % BASE_URL)
            raise
        movies = search_result.get("MovieList") or []

        if not movies:
            return

        state = {"done": 0}
        def on_movie(future):
            data = future.result()
            state["done"] += 1
            dialog.update(
                percent=int(state["done"] * 100.0 / len(movies)),
                line2=data.get("title") or data.get("MovieTitleClean") or "",
            )

        with futures.ThreadPoolExecutor(max_workers=2) as pool_tmdb:
            tmdb_list = [pool_tmdb.submit(tmdb.get, movie["ImdbCode"]) for movie in movies]
            [future.add_done_callback(on_movie) for future in tmdb_list]
            while not all(job.done() for job in tmdb_list):
                if dialog.iscanceled():
                    return
                xbmc.sleep(100)

        tmdb_list = map(lambda job: job.result(), tmdb_list)
        for movie, tmdb_meta in izip(movies, tmdb_list):
            if tmdb_meta:
                item = tmdb.get_list_item(tmdb_meta)
                if args.get("quality") == "all" and movie["Quality"] != "720p":
                    item["label"] = "%s (%s)" % (item["label"], movie["Quality"])
                item.update({
                    "path": plugin.url_for("play", uri=movie["TorrentMagnetUrl"]),
                    "is_playable": True,
                })
                item.setdefault("info", {}).update({
                    "count": movie["MovieID"],
                    "genre": "%s (%s S:%s P:%s)" % (item["info"]["genre"], movie["Size"], movie["TorrentSeeds"], movie["TorrentPeers"]),
                    "plot_outline": tmdb_meta["overview"],
                    "video_codec": "h264",
                })
                width = 1920
                height = 1080
                if movie["Quality"] == "720p":
                    width = 1280
                    height = 720
                item.setdefault("stream_info", {}).update({
                    "video": {
                        "codec": "h264",
                        "width": width,
                        "height": height,
                    },
                    "audio": {
                        "codec": "aac",
                    },
                })
                yield item

        if current_page < (int(search_result["MovieCount"]) / limit):
            next_args = args.copy()
            next_args["set"] = int(next_args["set"]) + 1
            yield {
                "label": ">> Next page",
                "path": plugin.url_for(callback, **next_args),
            }
def rutracker_search_page(catind, page, search=None, search_id = None):
    import urllib, xbmc
    from bs4 import BeautifulSoup, SoupStrainer
    from urlparse import urljoin
    from contextlib import closing
    from itertools import izip
    from concurrent import futures
    from multiprocessing.pool import ThreadPool
    from xbmctorrent.utils import terminating, SafeDialogProgress
    from urlparse import urlparse

    scraper_name = ""
    category = ([cat for cat in CATEGORIES if cat[0] == catind] or [("0", u"", "", "")])[0]
    scraper_name = category[3]
    plugin.set_content(category[2])
    if plugin.request.args.get("search_id"):
        search_id = plugin.request.args.get("search_id")[0]
    page = int(page)
    catind = int(catind)

    with closing(SafeDialogProgress(delay_close=0)) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0, line1=u"Получение информации о раздачах...", line2="", line3="")

        items = []
        try:
            start_index = 0
            url = urljoin(BASE_URL, "search.php?")

            if search_id:
                params = {}
                params["nm"] = search
                if int(page) > 0:
                    params["start"] = int(page) * 50
                params["id"] = search_id
                html_data = url_get(url, headers=HEADERS, params = params)
            else:
                post_body = {"nm": search, "fsf": catind}
                html_data = url_get(url, headers=HEADERS, post = post_body)

            soup = BeautifulSoup(html_data, "html5lib")

            node = soup.find("a", class_ =["pg"])
            if node:
                r = search_id_parser.search(node['href'])
                if r:
                    plugin.log.debug("Search id found: " + str(r.group(1)))
                    search_id = str(r.group(1))

            nodes = soup.findAll("a", class_=["topictitle"])

            for link in nodes:
                try:
                    title = _rutracker_cleantitle(link.text)
                    r = topic_id_parser.search(link['href'])
                    if r:
                        id = r.group(1)
                        label = "%s" % ( title)
                        item = {
                            "label": label,
                            "path": plugin.url_for("rutracker_play", tid=id),
                            "info": {"title": title},
                            "is_playable": False,
                            "context_menu": [
                                ("Play with Pulsar", actions.update_view(plugin.url_for("rutracker_play_pulsar", tid=id)))
                            ]
                        }
                        items.append(item)
                        plugin.log.debug("Item added: " + title.encode('utf-8'))
                except:
                    plugin.log.error("Unexpected error: %s \r Skipping item" % format_exc().split('\n')[-2])
        except:
            plugin.log.error("Unexpected error: %s" % format_exc().split('\n')[-2])
            xbmcgui.Dialog().ok(plugin.name, "Не удалось получить данные от сервера")
            return

        def _get_torrent_info(item):

            from xbmctorrent.search import scrapers as search
            from xbmctorrent.utils import get_quality_from_name

            try:
                scrapers = search.Scrapers()
                if not plugin.get_setting("rutracker_usesearch", bool):
                    meta = scrapers.default(item)
                else:
                    meta = scrapers.scraper(scraper_name, item)
                plugin.log.debug("RUTRACKER: Meta information received")
                meta["path"] = item["path"]
                meta["is_playable"] = item["is_playable"]
                meta.setdefault("stream_info", {}).update(get_quality_from_name(meta['label']))
                plugin.log.debug("RUTRACKER: Meta path updated")
                return meta
            except:
                plugin.log.error("RUTRACKER: Unexpected error: %s parsing item [%s]" % (format_exc().split('\n')[-2],str(item)))
                return scrapers.default(item)

        state = {"done": 0}

        def on_done(data):
            state["done"] += 1
            dialog.update(
                percent=int(state["done"] * 100.0 / len(items)),
                line2=data["info"].get("title") or data.get("label") or "",
            )

        with terminating(ThreadPool(5)) as pool:
            jobs = [pool.apply_async(_get_torrent_info, [item], callback=on_done) for item in items]
            while not all(job.ready() for job in jobs):
                if dialog.iscanceled():
                    return
                xbmc.sleep(100)

        for job in jobs:
            try:
                item = job.get()
                del item["search"]
                del item["subdir"]
                yield item
            except:
                plugin.log.error("RUTRACKER: Unexpected error: %s parsing item [%s]" % (format_exc().split('\n')[-2],str(item)))
        if search_id:
            next_page = {
                "label": u"[Далее >]",
                "path": plugin.url_for("rutracker_search_page", catind=catind, page=page + 1, search=search,search_id = search_id),
                "is_playable": False,
            }
            yield next_page