示例#1
0
    def _eztv_shows_by_letter(letter):
        with closing(xbmcgui.DialogProgress()) as dialog:
            dialog.create(plugin.name)
            dialog.update(percent=0, line1="Fetching serie information...", line2="", line3="")

            state = {"done": 0}
            def on_serie(data):
                state["done"] += 1
                dialog.update(
                    percent=int(state["done"] * 100.0 / len(shows_list)),
                    line2=data and data["seriesname"] or "",
                )

            with terminating(ThreadPool(5)) as pool_tvdb:
                tvdb_list = [pool_tvdb.apply_async(tvdb.search, [show["name"], True], callback=on_serie) for show in shows_list]
                while not all(job.ready() for job in tvdb_list):
                    if dialog.iscanceled():
                        dialog.close()
                        return
                    xbmc.sleep(50)

        tvdb_list = [job.get() for job in tvdb_list]
        for i, (eztv_show, tvdb_show) in enumerate(izip(shows_list, tvdb_list)):
            if tvdb_show:
                item = tvdb.get_list_item(tvdb_show)
                item.update({
                    "path": plugin.url_for("eztv_get_show_seasons", show_id=eztv_show["id"], tvdb_id=tvdb_show["id"])
                })
                yield item
            else:
                yield {
                    "label": eztv_show["name"],
                    "path": plugin.url_for("eztv_get_show_seasons", show_id=eztv_show["id"])
                }
示例#2
0
def eztv_get_show_seasons(show_id):
    import random
    from bs4 import BeautifulSoup
    from itertools import groupby
    from multiprocessing.pool import ThreadPool
    from xbmctorrent.utils import first, terminating, url_get
    from xbmctorrent import tvdb

    # plugin.set_content("seasons")

    tvdb_id = first(plugin.request.args.get("tvdb_id"))
    with terminating(ThreadPool(2)) as pool:
        def _eztv_get_show():
            plugin.log.info("Getting show")
            response = url_get("http://eztv.it/shows/%s/" % show_id, headers=HEADERS)
            plugin.log.info("Got show")
            return BeautifulSoup(response, "html5lib")
        soup = pool.apply_async(_eztv_get_show)
        if tvdb_id:
            tvdb_show = pool.apply_async(tvdb.get_all_meta, [plugin.request.args["tvdb_id"][0]])

        soup = soup.get()
        fanarts = []
        if tvdb_id:
            tvdb_show = tvdb_show.get()
            fanarts = list([banner for banner in tvdb_show["banners"] if banner["bannertype"] == "fanart"])
            random.shuffle(fanarts)

        for node in soup.findAll("a", "epinfo"):
            if get_episode_data_from_name(node.text)[0] < 0:
                plugin.redirect(plugin.url_for("eztv_get_episodes_for_season", show_id=show_id, season=-1, tvdb_id=tvdb_id))
                return

        # This is needed because we need to be able to redirect if no seasons are found.
        items = []
        for i, (season, episodes) in enumerate(groupby(soup.findAll("a", "epinfo"), lambda node: get_episode_data_from_name(node.text)[0])):
            if tvdb_id:
                item = tvdb.get_season_list_item(tvdb_show, season)
            else:
                item = {
                    "label": "Season %d" % season
                }
            item.update({
                "path": plugin.url_for("eztv_get_episodes_for_season", show_id=show_id, season=season, tvdb_id=tvdb_id),
            })
            # if fanarts:
            item.setdefault("properties", {}).update({
                "fanart_image": fanarts[i % len(fanarts)]["bannerpath"],
            })
            items.append(item)
        return items
示例#3
0
def rutracker_page(catind, page, query=None):
    import urllib, xbmc
    from bs4 import BeautifulSoup, SoupStrainer
    from urlparse import urljoin
    from contextlib import closing
    from itertools import izip
    from concurrent import futures
    from multiprocessing.pool import ThreadPool
    from xbmctorrent.utils import terminating, SafeDialogProgress
    from urlparse import urlparse

    scraper_name = ""
    category = ([cat for cat in CATEGORIES if cat[0] == catind]
                or [("0", u"", "", "")])[0]
    scraper_name = category[3]
    plugin.set_content(category[2])
    page = int(page)

    with closing(SafeDialogProgress(delay_close=0)) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0,
                      line1=u"Получение информации о раздачах...",
                      line2="",
                      line3="")

        items = []
        try:
            start_index = 0
            url = urljoin(
                BASE_URL,
                "viewforum.php?f=%s&start=%s" % (query, str(page * 50)))
            html_data = url_get(url, headers=HEADERS)
            soup = BeautifulSoup(html_data, "html5lib")
            # find subforums
            nodes = soup.findAll("h4", class_=["forumlink"])

            #Add search item on topmost page:
            if catind == query:
                yield {
                    "label": u"[COLOR FFFFFF00][ Поиск ][/COLOR]",
                    "path": plugin.url_for("rutracker_search", catind=catind),
                }

            plugin.log.debug("Loading forum nodes")
            for node in nodes:
                link = node.find("a")
                plugin.log.debug("Forum link: " + str(link))
                link_href = link["href"]
                # find forum id in href:
                forumid = int(link_href[link_href.find(u"=") +
                                        1:len(link_href)])
                item = {
                    "label":
                    link.text,
                    "path":
                    plugin.url_for("rutracker_page",
                                   catind=catind,
                                   page=0,
                                   query=forumid),
                    "info": {
                        "title": link.text
                    },
                    "is_playable":
                    False,
                }
                yield item

            nodes = soup.findAll("td", class_=["topic_id"])

            for node in nodes:
                id = node["id"]
                title_node = node.parent.find(id='tt-%s' % str(id))
                title = _rutracker_cleantitle(title_node.text)
                row_node = node.parent \
                    #find "size" table cell - it should have download link

                size_td = row_node.find_all("td")[2]

                #check if size node has download link to torrent file:
                if size_td:
                    size_link = size_td.find("a", class_=["small"])
                    if size_link:
                        size = size_link.text
                        seeds = size_td.find("span", class_=["seedmed"]).b.text
                        peers = size_td.find("span",
                                             class_=["leechmed"]).b.text
                        size = size_td.find("a", class_=["small"]).text
                        label = "%s | %s (S:%s P:%s)" % (title, size, seeds,
                                                         peers)
                        item = {
                            "label":
                            label,
                            "path":
                            plugin.url_for("rutracker_play", tid=id),
                            "info": {
                                "title": title
                            },
                            "is_playable":
                            False,
                            "context_menu":
                            [("Play with Pulsar",
                              actions.update_view(
                                  plugin.url_for("rutracker_play_pulsar",
                                                 tid=id)))]
                        }
                        items.append(item)
                        plugin.log.debug("Item added: " +
                                         title.encode('utf-8'))
        except:

            plugin.log.error("Unexpected error: %s" %
                             format_exc().split('\n')[-2])
            xbmcgui.Dialog().ok(plugin.name,
                                "Не удалось получить данные от сервера")
            return

        def _get_torrent_info(item):

            from xbmctorrent.search import scrapers as search
            from xbmctorrent.utils import get_quality_from_name

            try:
                scrapers = search.Scrapers()
                if not plugin.get_setting("rutracker_usesearch", bool):
                    meta = scrapers.default(item)
                else:
                    meta = scrapers.scraper(scraper_name, item)

                meta["path"] = item["path"]
                meta["is_playable"] = item["is_playable"]
                meta.setdefault("stream_info", {}).update(
                    get_quality_from_name(meta['label']))
                return meta
            except:
                plugin.log.error("Unexpected error: %s" %
                                 format_exc().split('\n')[-2])
                return scrapers.default(item)

        state = {"done": 0}

        def on_done(data):
            state["done"] += 1
            dialog.update(
                percent=int(state["done"] * 100.0 / len(items)),
                line2=data["info"].get("title") or data.get("label") or "",
            )

        with terminating(ThreadPool(5)) as pool:
            jobs = [
                pool.apply_async(_get_torrent_info, [item], callback=on_done)
                for item in items
            ]
            while not all(job.ready() for job in jobs):
                if dialog.iscanceled():
                    return
                xbmc.sleep(100)

        for job in jobs:
            item = job.get()
            del item["search"]
            del item["subdir"]
            yield item

        next_page = {
            "label":
            u"[Далее >]",
            "path":
            plugin.url_for("rutracker_page",
                           catind=catind,
                           page=page + 1,
                           query=query),
            "is_playable":
            False,
        }
        yield next_page
示例#4
0
def rutracker_search_page(catind, page, search=None, search_id=None):
    import urllib, xbmc
    from bs4 import BeautifulSoup, SoupStrainer
    from urlparse import urljoin
    from contextlib import closing
    from itertools import izip
    from concurrent import futures
    from multiprocessing.pool import ThreadPool
    from xbmctorrent.utils import terminating, SafeDialogProgress
    from urlparse import urlparse

    scraper_name = ""
    category = ([cat for cat in CATEGORIES if cat[0] == catind]
                or [("0", u"", "", "")])[0]
    scraper_name = category[3]
    plugin.set_content(category[2])
    if plugin.request.args.get("search_id"):
        search_id = plugin.request.args.get("search_id")[0]
    page = int(page)
    catind = int(catind)

    with closing(SafeDialogProgress(delay_close=0)) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0,
                      line1=u"Получение информации о раздачах...",
                      line2="",
                      line3="")

        items = []
        try:
            start_index = 0
            url = urljoin(BASE_URL, "search.php?")

            if search_id:
                params = {}
                params["nm"] = search
                if int(page) > 0:
                    params["start"] = int(page) * 50
                params["id"] = search_id
                html_data = url_get(url, headers=HEADERS, params=params)
            else:
                post_body = {"nm": search, "fsf": catind}
                html_data = url_get(url, headers=HEADERS, post=post_body)

            soup = BeautifulSoup(html_data, "html5lib")

            node = soup.find("a", class_=["pg"])
            if node:
                r = search_id_parser.search(node['href'])
                if r:
                    plugin.log.debug("Search id found: " + str(r.group(1)))
                    search_id = str(r.group(1))

            nodes = soup.findAll("a", class_=["topictitle"])

            for link in nodes:
                try:
                    title = _rutracker_cleantitle(link.text)
                    r = topic_id_parser.search(link['href'])
                    if r:
                        id = r.group(1)
                        label = "%s" % (title)
                        item = {
                            "label":
                            label,
                            "path":
                            plugin.url_for("rutracker_play", tid=id),
                            "info": {
                                "title": title
                            },
                            "is_playable":
                            False,
                            "context_menu":
                            [("Play with Pulsar",
                              actions.update_view(
                                  plugin.url_for("rutracker_play_pulsar",
                                                 tid=id)))]
                        }
                        items.append(item)
                        plugin.log.debug("Item added: " +
                                         title.encode('utf-8'))
                except:
                    plugin.log.error("Unexpected error: %s \r Skipping item" %
                                     format_exc().split('\n')[-2])
        except:
            plugin.log.error("Unexpected error: %s" %
                             format_exc().split('\n')[-2])
            xbmcgui.Dialog().ok(plugin.name,
                                "Не удалось получить данные от сервера")
            return

        def _get_torrent_info(item):

            from xbmctorrent.search import scrapers as search
            from xbmctorrent.utils import get_quality_from_name

            try:
                scrapers = search.Scrapers()
                if not plugin.get_setting("rutracker_usesearch", bool):
                    meta = scrapers.default(item)
                else:
                    meta = scrapers.scraper(scraper_name, item)
                plugin.log.debug("RUTRACKER: Meta information received")
                meta["path"] = item["path"]
                meta["is_playable"] = item["is_playable"]
                meta.setdefault("stream_info", {}).update(
                    get_quality_from_name(meta['label']))
                plugin.log.debug("RUTRACKER: Meta path updated")
                return meta
            except:
                plugin.log.error(
                    "RUTRACKER: Unexpected error: %s parsing item [%s]" %
                    (format_exc().split('\n')[-2], str(item)))
                return scrapers.default(item)

        state = {"done": 0}

        def on_done(data):
            state["done"] += 1
            dialog.update(
                percent=int(state["done"] * 100.0 / len(items)),
                line2=data["info"].get("title") or data.get("label") or "",
            )

        with terminating(ThreadPool(5)) as pool:
            jobs = [
                pool.apply_async(_get_torrent_info, [item], callback=on_done)
                for item in items
            ]
            while not all(job.ready() for job in jobs):
                if dialog.iscanceled():
                    return
                xbmc.sleep(100)

        for job in jobs:
            try:
                item = job.get()
                del item["search"]
                del item["subdir"]
                yield item
            except:
                plugin.log.error(
                    "RUTRACKER: Unexpected error: %s parsing item [%s]" %
                    (format_exc().split('\n')[-2], str(item)))
        if search_id:
            next_page = {
                "label":
                u"[Далее >]",
                "path":
                plugin.url_for("rutracker_search_page",
                               catind=catind,
                               page=page + 1,
                               search=search,
                               search_id=search_id),
                "is_playable":
                False,
            }
            yield next_page
示例#5
0
def rutor_page(catind, page, query = None):
    import urllib, xbmc
    from bs4 import BeautifulSoup, SoupStrainer
    from urlparse import urljoin
    from contextlib import closing
    from itertools import izip
    from concurrent import futures
    from multiprocessing.pool import ThreadPool
    from xbmctorrent.utils import terminating, SafeDialogProgress

    scraper_name = ""
    category = ([cat for cat in CATEGORIES if cat[0] == catind] or [("0", u"", "", "")])[0]
    scraper_name = category[3]
    plugin.set_content(category[2])

    page = int(page)
    catind = int(catind)

    mode = "browse"
    if query and query != str(None):
        mode = "search"

    with closing(SafeDialogProgress(delay_close=0)) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0, line1=u"Получение информации о раздачах...", line2="", line3="")

        nodes = []
        try:
            html_data = url_get(urljoin(BASE_URL, "%s/%d/%d/0/0/%s" % (mode, page, catind, query)), headers=HEADERS)
            soup = BeautifulSoup(html_data, "html5lib")
            nodes = soup.findAll("tr", class_=["gai","tum"])
        except:
            import xbmcgui
            plugin.log.error("Unexpected error: %s" % sys.exc_info()[0])
            xbmcgui.Dialog().ok(plugin.name, "Не удалось получить данные от сервера")
            return

        if not nodes or len(nodes)==0:
            yield {
                "label": u"[ Не найдено ]",
                "path" : plugin.url_for("rutor_index"),
                "is_playable": False,
            }
            return

        # store length before filter
        nodeslen = len(nodes)
        nodes = [node for node in _rutor_filter_nodes(nodes)]

        items = []
        for node in nodes:
            cells = node.findAll("td")
            seeds, peers = map(lambda x: x.text.strip(), cells[len(cells)-1].findAll("span"))
            links = cells[1].findAll("a")
            magnet_node, title_node = [links[len(links)-2], links[len(links)-1]]
            size = cells[len(cells)-2].text
            tid = int(title_node["href"][9:title_node["href"].find(u"/", 9)])
            title = _rutor_cleantitle(title_node.text)
            label = "%s | %s (S:%s P:%s)" % (title, size, seeds, peers)
            item = {
                "label" : label,
                "path"  : plugin.url_for("rutor_details", catind=catind, tid=tid),
                "info"  : { "title" : title },
                "is_playable": False,
            }
            items.append(item)

        def _get_torrent_info(item):
            from xbmctorrent.search import scrapers as search
            from xbmctorrent.utils import get_quality_from_name

            scrapers = search.Scrapers()

            if not plugin.get_setting("rutor_usesearch", bool):
                meta = scrapers.default(item)
            else:
                meta = scrapers.scraper(scraper_name, item)

            meta["path"] = item["path"]
            meta["is_playable"] = item["is_playable"]
            meta.setdefault("stream_info", {}).update(get_quality_from_name(meta['label']))
            return meta

        state = {"done": 0}
        def on_done(data):
            state["done"] += 1
            dialog.update(
                percent=int(state["done"] * 100.0 / len(nodes)),
                line2 = data["info"].get("title") or data.get("label") or "",
            )

        with terminating(ThreadPool(5)) as pool: 
            jobs = [pool.apply_async(_get_torrent_info, [item], callback=on_done) for item in items]
            while not all(job.ready() for job in jobs):
                if dialog.iscanceled():
                    return
                xbmc.sleep(100)

        import hashlib

        passed = {}
        for job in jobs: 
            item = job.get()
            sha1 = hashlib.sha1(uenc(item.get("subdir") and item["subdir"] or (item.get("search") and item["search"] or item["label"]))).hexdigest()
            if not passed.get(sha1):
                passed[sha1] = True
                del item["search"]
                del item["subdir"]
                yield item
                
        if nodeslen == 100:
            next_page = {
                "label": u"[Далее >]",
                "path" : plugin.url_for("rutor_page", catind=catind, page=page + 1, query=query),
                "is_playable": False,
            }
            yield next_page
示例#6
0
def kickass_page(root, page):
    import re
    import xbmc
    import xbmcgui
    from bs4 import BeautifulSoup
    from contextlib import closing
    from multiprocessing.pool import ThreadPool
    from urlparse import urljoin
    from xbmctorrent import tmdb
    from xbmctorrent.utils import first, terminating, url_get, get_quality_from_name

    content_type = first(plugin.request.args.get("content_type")) or None
    if content_type:
        plugin.set_content(content_type)

    page = int(page)
    page_data = url_get(urljoin(BASE_URL, "%s/%d" % (root, page)))
    soup = BeautifulSoup(page_data, "html5lib")
    torrent_nodes = [node.parent for node in soup.findAll("td", "torrentnameCell")]

    def _get_torrent_info_with_meta(url):
        torrent_info = get_torrent_info(url)
        if torrent_info["movie"] and torrent_info["imdb_id"]:
            torrent_info["tmdb"] = tmdb.get(torrent_info["imdb_id"])
        return torrent_info


    with closing(xbmcgui.DialogProgress()) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0, line1="Fetching torrent information...", line2="", line3="")

        state = {"done": 0}
        def on_torrent(data):
            state["done"] += 1
            dialog.update(
                percent=int(state["done"] * 100.0 / len(torrent_nodes)),
                line2=data.get("title") or "",
            )

        with terminating(ThreadPool(5)) as pool:
            movies = []
            for node in torrent_nodes:
                info_nodes = node.findAll("td")
                a_node = first(info_nodes[0].select("div.torrentname a.plain"))
                movies.append(pool.apply_async(_get_torrent_info_with_meta, [urljoin(BASE_URL, a_node["href"])], callback=on_torrent))
            while not all(job.ready() for job in movies):
                if dialog.iscanceled():
                    dialog.close()
                    return
                xbmc.sleep(50)
            movies = [movie.get() for movie in movies]

        for i, movie in enumerate(movies):
            if movie.get("tmdb"):
                item = tmdb.get_list_item(movie["tmdb"])
                proper_kat_title = re.sub("\s+", " ", re.sub(r"[().-]", " ", movie["title"]))
                item["label"] = "%s (%s)" % (item["label"], re.sub(r"(?i)%s" % item["label"], "", proper_kat_title).strip())
            else:
                item = {
                    "label": movie["title"],
                    "info": {
                        "genre": ", ".join(movie["genres"]),
                    }
                }
            lower_title = movie["title"].lower()
            if "x264" in lower_title:
                item.setdefault("stream_info", {})["codec"] = item.setdefault("info", {})["video_codec"] = "h264"
            if "xvid" in lower_title:
                item.setdefault("stream_info", {})["codec"] = item.setdefault("info", {})["video_codec"] = "xvid"
            if "720p" in lower_title:
                item.setdefault("stream_info", {}).update({
                    "width": 1280,
                    "height": 720,
                })
            if "1080p" in lower_title:
                item.setdefault("stream_info", {}).update({
                    "width": 1920,
                    "height": 1080,
                })
            item.update({
                "path": plugin.url_for("play", magnet=movie["magnet_url"]),
                "is_playable": True,
            })
            item.setdefault("info", {}).update({
                "count": i,
                "genre": "%s (S:%s P:%s)" % (item.get("info", {}).get("genre") or "", movie["seeders"], movie["leechers"]),
            })
            item.setdefault("stream_info", {}).update({
                "video": get_quality_from_name(movie["title"]),
            })
            yield item
    yield {
        "label": "Next page...",
        "path": plugin.url_for("kickass_page", root=root, page=page + 1, content_type=content_type),
    }
示例#7
0
def rutor_page(catind, page, query=None):
    import urllib, xbmc
    from bs4 import BeautifulSoup, SoupStrainer
    from urlparse import urljoin
    from contextlib import closing
    from itertools import izip
    from concurrent import futures
    from multiprocessing.pool import ThreadPool
    from xbmctorrent.utils import terminating, SafeDialogProgress

    scraper_name = ""
    category = ([cat for cat in CATEGORIES if cat[0] == catind]
                or [("0", u"", "", "")])[0]
    scraper_name = category[3]
    plugin.set_content(category[2])

    page = int(page)
    catind = int(catind)

    mode = "browse"
    if query and query != str(None):
        mode = "search"

    with closing(SafeDialogProgress(delay_close=0)) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0,
                      line1=u"Получение информации о раздачах...",
                      line2="",
                      line3="")

        nodes = []
        try:
            html_data = url_get(urljoin(
                BASE_URL, "%s/%d/%d/0/0/%s" % (mode, page, catind, query)),
                                headers=HEADERS)
            soup = BeautifulSoup(html_data, "html5lib")
            nodes = soup.findAll("tr", class_=["gai", "tum"])
        except:
            import xbmcgui
            plugin.log.error("Unexpected error: %s" % sys.exc_info()[0])
            xbmcgui.Dialog().ok(plugin.name,
                                "Не удалось получить данные от сервера")
            return

        if not nodes or len(nodes) == 0:
            yield {
                "label": u"[ Не найдено ]",
                "path": plugin.url_for("rutor_index"),
                "is_playable": False,
            }
            return

        # store length before filter
        nodeslen = len(nodes)
        nodes = [node for node in _rutor_filter_nodes(nodes)]

        items = []
        for node in nodes:
            cells = node.findAll("td")
            seeds, peers = map(lambda x: x.text.strip(),
                               cells[len(cells) - 1].findAll("span"))
            links = cells[1].findAll("a")
            magnet_node, title_node = [
                links[len(links) - 2], links[len(links) - 1]
            ]
            size = cells[len(cells) - 2].text
            tid = int(title_node["href"][9:title_node["href"].find(u"/", 9)])
            title = _rutor_cleantitle(title_node.text)
            label = "%s | %s (S:%s P:%s)" % (title, size, seeds, peers)
            item = {
                "label": label,
                "path": plugin.url_for("rutor_details", catind=catind,
                                       tid=tid),
                "info": {
                    "title": title
                },
                "is_playable": False,
            }
            items.append(item)

        def _get_torrent_info(item):
            from xbmctorrent.search import scrapers as search
            from xbmctorrent.utils import get_quality_from_name

            scrapers = search.Scrapers()

            if not plugin.get_setting("rutor_usesearch", bool):
                meta = scrapers.default(item)
            else:
                meta = scrapers.scraper(scraper_name, item)

            meta["path"] = item["path"]
            meta["is_playable"] = item["is_playable"]
            meta.setdefault("stream_info",
                            {}).update(get_quality_from_name(meta['label']))
            return meta

        state = {"done": 0}

        def on_done(data):
            state["done"] += 1
            dialog.update(
                percent=int(state["done"] * 100.0 / len(nodes)),
                line2=data["info"].get("title") or data.get("label") or "",
            )

        with terminating(ThreadPool(5)) as pool:
            jobs = [
                pool.apply_async(_get_torrent_info, [item], callback=on_done)
                for item in items
            ]
            while not all(job.ready() for job in jobs):
                if dialog.iscanceled():
                    return
                xbmc.sleep(100)

        import hashlib

        passed = {}
        for job in jobs:
            item = job.get()
            sha1 = hashlib.sha1(
                uenc(
                    item.get("subdir") and item["subdir"]
                    or (item.get("search") and item["search"]
                        or item["label"]))).hexdigest()
            if not passed.get(sha1):
                passed[sha1] = True
                del item["search"]
                del item["subdir"]
                yield item

        if nodeslen == 100:
            next_page = {
                "label":
                u"[Далее >]",
                "path":
                plugin.url_for("rutor_page",
                               catind=catind,
                               page=page + 1,
                               query=query),
                "is_playable":
                False,
            }
            yield next_page
示例#8
0
def yify_show_data(callback):
    import xbmc
    import xbmcgui
    from contextlib import nested, closing
    from itertools import izip, chain
    from multiprocessing.pool import ThreadPool
    from xbmctorrent import tmdb
    from xbmctorrent.utils import url_get_json, terminating

    plugin.set_content("movies")
    args = dict((k, v[0]) for k, v in plugin.request.args.items())

    current_page = int(args["set"])
    limit = int(args["limit"])

    with closing(xbmcgui.DialogProgress()) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0, line1="Fetching movie information...", line2="", line3="")

        search_result = url_get_json("%s/api/list.json" % BASE_URL, params=args, headers=HEADERS)
        movies = search_result["MovieList"]

        state = {"done": 0}
        def on_movie(data):
            state["done"] += 1
            dialog.update(
                percent=int(state["done"] * 100.0 / (len(movies) * 2)),
                line2=data.get("title") or data.get("MovieTitleClean") or "",
            )

        with nested(terminating(ThreadPool(2)), terminating(ThreadPool(2))) as (pool_yify, pool_tmdb):
            yify_list = [pool_yify.apply_async(yify_get, [movie["MovieID"]], callback=on_movie) for movie in movies]
            tmdb_list = [pool_tmdb.apply_async(tmdb.get, [movie["ImdbCode"]], callback=on_movie) for movie in movies]
            while not all(job.ready() for job in chain(yify_list, tmdb_list)):
                if dialog.iscanceled():
                    dialog.close()
                    return
                xbmc.sleep(50)

        yify_list = map(lambda job: job.get(), yify_list)
        tmdb_list = map(lambda job: job.get(), tmdb_list)
        for movie, tmdb_meta in izip(yify_list, tmdb_list):
            if tmdb_meta:
                item = tmdb.get_list_item(tmdb_meta)
                if args.get("quality") == "all" and movie["Quality"] != "720p":
                    item["label"] = "%s (%s)" % (item["label"], movie["Quality"])
                item.update({
                    "path": plugin.url_for("play", magnet=movie["TorrentMagnetUrl"]),
                    "is_playable": True,
                })
                item.setdefault("info", {}).update({
                    "count": movie["MovieID"],
                    "genre": "%s (%s S:%s P:%s)" % (item["info"]["genre"], movie["Size"], movie["TorrentSeeds"], movie["TorrentPeers"]),
                    "trailer": YOUTUBE_ACTION % movie["YoutubeTrailerID"],
                    "plot_outline": movie["ShortDescription"],
                    "video_codec": "h264",
                    "mpaa": movie["AgeRating"],
                })
                width, height = map(int, movie["Resolution"].split("*"))
                item.setdefault("stream_info", {}).update({
                    "video": {
                        "codec": "h264",
                        "width": width,
                        "height": height,
                        "aspect": float(width) / float(height),
                    },
                    "audio": {
                        "codec": "aac",
                        "language": movie["Language"],
                    },
                })
                yield item

        if current_page < (int(search_result["MovieCount"]) / limit):
            next_args = args.copy()
            next_args["set"] = int(next_args["set"]) + 1
            yield {
                "label": ">> Next page",
                "path": plugin.url_for(callback, **next_args),
            }
示例#9
0
def eztv_get_episodes_for_season(show_id, season):
    import copy
    import random
    from bs4 import BeautifulSoup
    from itertools import izip
    from multiprocessing.pool import ThreadPool
    from xbmctorrent.utils import first, terminating, url_get
    from xbmctorrent import tvdb

    plugin.set_content("episodes")
    season = int(season)
    tvdb_id = first(plugin.request.args.get("tvdb_id"))
    with terminating(ThreadPool(2)) as pool:
        def _eztv_get_show():
            return BeautifulSoup(url_get("http://eztv.it/shows/%s/" % show_id), "html5lib")
        soup = pool.apply_async(_eztv_get_show)
        if tvdb_id:
            tvdb_show = pool.apply_async(tvdb.get_all_meta, [plugin.request.args["tvdb_id"][0]])

        soup = soup.get()
        items = []
        fanarts = []
        if tvdb_id:
            tvdb_show = tvdb_show.get()
            fanarts = list([banner for banner in tvdb_show["banners"] if banner["bannertype"] == "fanart"])
            random.shuffle(fanarts)
            items = list(tvdb.build_episode_list_items(tvdb_show, int(season)))
        text_nodes = soup.findAll("a", "epinfo")
        href_nodes = soup.findAll("a", "magnet")
        season_nodes = izip(text_nodes, href_nodes)
        if season >= 0:
            season_nodes = filter(lambda x: ("S%02d" % season) in x[0].text, season_nodes)

        for i, (node_text, node_magnet) in enumerate(season_nodes):
            season, episode = get_episode_data_from_name(node_text.text)
            if tvdb_id and episode >= 0:
                item = copy.deepcopy(items[int(episode) - 1])
                for pattern, suffix in (("720p", "(HD)"), ("1080p", "(FullHD)"), ("repack", "(REPACK)"), ("proper", "(PROPER)")):
                    if pattern in node_text.text.lower():
                        item["label"] = "%s %s" % (item["label"], suffix)
            else:
                item = {
                    "label": node_text.text,
                }
            item.setdefault("info", {}).update({
                "tvshowtitle": node_text.text,
                "title": item["label"],
            })
            stream_info = {}
            if "x264" in node_text.text:
                stream_info["codec"] = item["info"]["video_codec"] = "h264"
            if "xvid" in node_text.text.lower():
                stream_info["codec"] = item["info"]["video_codec"] = "xvid"
            if "720p" in node_text.text:
                stream_info["width"] = 1280
                stream_info["height"] = 720
            if "1080p" in node_text.text:
                stream_info["width"] = 1920
                stream_info["height"] = 1080
            item.update({
                "path": plugin.url_for("play", magnet=node_magnet["href"]),
                "stream_info": {"video": stream_info},
                "is_playable": True,
            })
            if fanarts:
                item.setdefault("properties", {}).update({
                    "fanart_image": fanarts[i % len(fanarts)]["bannerpath"],
                })
            yield item
示例#10
0
def rutracker_page(catind, page, query=None):
    import urllib, xbmc
    from bs4 import BeautifulSoup, SoupStrainer
    from urlparse import urljoin
    from contextlib import closing
    from itertools import izip
    from concurrent import futures
    from multiprocessing.pool import ThreadPool
    from xbmctorrent.utils import terminating, SafeDialogProgress
    from urlparse import urlparse

    scraper_name = ""
    category = ([cat for cat in CATEGORIES if cat[0] == catind] or [("0", u"", "", "")])[0]
    scraper_name = category[3]
    plugin.set_content(category[2])
    page = int(page)

    with closing(SafeDialogProgress(delay_close=0)) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0, line1=u"Получение информации о раздачах...", line2="", line3="")

        items = []
        try:
            start_index = 0
            url = urljoin(BASE_URL, "viewforum.php?f=%s&start=%s" % (query, str(page * 50)))
            html_data = url_get(url, headers=HEADERS)
            soup = BeautifulSoup(html_data, "html5lib")
            # find subforums
            nodes = soup.findAll("h4", class_=["forumlink"])

            #Add search item on topmost page:
            if catind == query:
                yield {
                    "label": u"[COLOR FFFFFF00][ Поиск ][/COLOR]",
                    "path": plugin.url_for("rutracker_search", catind=catind),
                }

            plugin.log.debug("Loading forum nodes")
            for node in nodes:
                link = node.find("a")
                plugin.log.debug("Forum link: " + str(link))
                link_href = link["href"]
                # find forum id in href:
                forumid = int(link_href[link_href.find(u"=") + 1:len(link_href)])
                item = {
                    "label": link.text,
                    "path": plugin.url_for("rutracker_page", catind=catind, page=0, query=forumid),
                    "info": {"title": link.text},
                    "is_playable": False,
                }
                yield item

            nodes = soup.findAll("td", class_=["topic_id"])

            for node in nodes:
                id = node["id"]
                title_node = node.parent.find(id='tt-%s' % str(id))
                title = _rutracker_cleantitle(title_node.text)
                row_node = node.parent \
                    #find "size" table cell - it should have download link
                size_td = row_node.find_all("td")[2]

                #check if size node has download link to torrent file:
                if size_td:
                    size_link = size_td.find("a", class_=["small"])
                    if size_link:
                        size = size_link.text
                        seeds = size_td.find("span", class_=["seedmed"]).b.text
                        peers = size_td.find("span", class_=["leechmed"]).b.text
                        size = size_td.find("a", class_=["small"]).text
                        label = "%s | %s (S:%s P:%s)" % ( title, size, seeds, peers)
                        item = {
                            "label": label,
                            "path": plugin.url_for("rutracker_play", tid=id),
                            "info": {"title": title},
                            "is_playable": False,
                            "context_menu": [
                                ("Play with Pulsar", actions.update_view(plugin.url_for("rutracker_play_pulsar", tid=id)))
                            ]
                        }
                        items.append(item)
                        plugin.log.debug("Item added: " + title.encode('utf-8'))
        except:

            plugin.log.error("Unexpected error: %s" % format_exc().split('\n')[-2])
            xbmcgui.Dialog().ok(plugin.name, "Не удалось получить данные от сервера")
            return

        def _get_torrent_info(item):

            from xbmctorrent.search import scrapers as search
            from xbmctorrent.utils import get_quality_from_name

            try:
                scrapers = search.Scrapers()
                if not plugin.get_setting("rutracker_usesearch", bool):
                    meta = scrapers.default(item)
                else:
                    meta = scrapers.scraper(scraper_name, item)

                meta["path"] = item["path"]
                meta["is_playable"] = item["is_playable"]
                meta.setdefault("stream_info", {}).update(get_quality_from_name(meta['label']))
                return meta
            except:
                plugin.log.error("Unexpected error: %s" % format_exc().split('\n')[-2])
                return scrapers.default(item)

        state = {"done": 0}

        def on_done(data):
            state["done"] += 1
            dialog.update(
                percent=int(state["done"] * 100.0 / len(items)),
                line2=data["info"].get("title") or data.get("label") or "",
            )

        with terminating(ThreadPool(5)) as pool:
            jobs = [pool.apply_async(_get_torrent_info, [item], callback=on_done) for item in items]
            while not all(job.ready() for job in jobs):
                if dialog.iscanceled():
                    return
                xbmc.sleep(100)

        for job in jobs:
            item = job.get()
            del item["search"]
            del item["subdir"]
            yield item

        next_page = {
            "label": u"[Далее >]",
            "path": plugin.url_for("rutracker_page", catind=catind, page=page + 1, query=query),
            "is_playable": False,
        }
        yield next_page
示例#11
0
def rutracker_search_page(catind, page, search=None, search_id = None):
    import urllib, xbmc
    from bs4 import BeautifulSoup, SoupStrainer
    from urlparse import urljoin
    from contextlib import closing
    from itertools import izip
    from concurrent import futures
    from multiprocessing.pool import ThreadPool
    from xbmctorrent.utils import terminating, SafeDialogProgress
    from urlparse import urlparse

    scraper_name = ""
    category = ([cat for cat in CATEGORIES if cat[0] == catind] or [("0", u"", "", "")])[0]
    scraper_name = category[3]
    plugin.set_content(category[2])
    if plugin.request.args.get("search_id"):
        search_id = plugin.request.args.get("search_id")[0]
    page = int(page)
    catind = int(catind)

    with closing(SafeDialogProgress(delay_close=0)) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0, line1=u"Получение информации о раздачах...", line2="", line3="")

        items = []
        try:
            start_index = 0
            url = urljoin(BASE_URL, "search.php?")

            if search_id:
                params = {}
                params["nm"] = search
                if int(page) > 0:
                    params["start"] = int(page) * 50
                params["id"] = search_id
                html_data = url_get(url, headers=HEADERS, params = params)
            else:
                post_body = {"nm": search, "fsf": catind}
                html_data = url_get(url, headers=HEADERS, post = post_body)

            soup = BeautifulSoup(html_data, "html5lib")

            node = soup.find("a", class_ =["pg"])
            if node:
                r = search_id_parser.search(node['href'])
                if r:
                    plugin.log.debug("Search id found: " + str(r.group(1)))
                    search_id = str(r.group(1))

            nodes = soup.findAll("a", class_=["topictitle"])

            for link in nodes:
                try:
                    title = _rutracker_cleantitle(link.text)
                    r = topic_id_parser.search(link['href'])
                    if r:
                        id = r.group(1)
                        label = "%s" % ( title)
                        item = {
                            "label": label,
                            "path": plugin.url_for("rutracker_play", tid=id),
                            "info": {"title": title},
                            "is_playable": False,
                            "context_menu": [
                                ("Play with Pulsar", actions.update_view(plugin.url_for("rutracker_play_pulsar", tid=id)))
                            ]
                        }
                        items.append(item)
                        plugin.log.debug("Item added: " + title.encode('utf-8'))
                except:
                    plugin.log.error("Unexpected error: %s \r Skipping item" % format_exc().split('\n')[-2])
        except:
            plugin.log.error("Unexpected error: %s" % format_exc().split('\n')[-2])
            xbmcgui.Dialog().ok(plugin.name, "Не удалось получить данные от сервера")
            return

        def _get_torrent_info(item):

            from xbmctorrent.search import scrapers as search
            from xbmctorrent.utils import get_quality_from_name

            try:
                scrapers = search.Scrapers()
                if not plugin.get_setting("rutracker_usesearch", bool):
                    meta = scrapers.default(item)
                else:
                    meta = scrapers.scraper(scraper_name, item)
                plugin.log.debug("RUTRACKER: Meta information received")
                meta["path"] = item["path"]
                meta["is_playable"] = item["is_playable"]
                meta.setdefault("stream_info", {}).update(get_quality_from_name(meta['label']))
                plugin.log.debug("RUTRACKER: Meta path updated")
                return meta
            except:
                plugin.log.error("RUTRACKER: Unexpected error: %s parsing item [%s]" % (format_exc().split('\n')[-2],str(item)))
                return scrapers.default(item)

        state = {"done": 0}

        def on_done(data):
            state["done"] += 1
            dialog.update(
                percent=int(state["done"] * 100.0 / len(items)),
                line2=data["info"].get("title") or data.get("label") or "",
            )

        with terminating(ThreadPool(5)) as pool:
            jobs = [pool.apply_async(_get_torrent_info, [item], callback=on_done) for item in items]
            while not all(job.ready() for job in jobs):
                if dialog.iscanceled():
                    return
                xbmc.sleep(100)

        for job in jobs:
            try:
                item = job.get()
                del item["search"]
                del item["subdir"]
                yield item
            except:
                plugin.log.error("RUTRACKER: Unexpected error: %s parsing item [%s]" % (format_exc().split('\n')[-2],str(item)))
        if search_id:
            next_page = {
                "label": u"[Далее >]",
                "path": plugin.url_for("rutracker_search_page", catind=catind, page=page + 1, search=search,search_id = search_id),
                "is_playable": False,
            }
            yield next_page