def btdigg_page(query, sort, page):
    from bs4 import BeautifulSoup
    from xbmctorrentV2.utils import url_get

    html_data = url_get("%s/search" % BASE_URL, headers=HEADERS, params={
        "order": sort,
        "q": query,
        "p": page,
    })
    soup = BeautifulSoup(html_data, "html5lib")
    name_nodes = soup.findAll("td", "torrent_name")
    attr_nodes = soup.findAll("table", "torrent_name_tbl")[1::2]

    for name_node, attr_node in zip(name_nodes, attr_nodes):
        attrs = attr_node.findAll("span", "attr_val")
        title = "%s (%s, DLs:%s)" % (name_node.find("a").text, attrs[0].text, attrs[2].text)
        yield {
            "label": title,
            "path": plugin.url_for("play", uri=attr_node.find("a")["href"]),
            "is_playable": True,
        }
    yield {
        "label": ">> Next page",
        "path": plugin.url_for("btdigg_page", query=query, sort=sort, page=int(page) + 1),
        "is_playable": False,
    }
def search(name, complete=False):
    from xbmctorrentV2.caching import shelf
    import hashlib

    search_hash = hashlib.sha1(name).hexdigest()
    with shelf("com.thetvdb.search.%s" % search_hash) as show:
        if not show:
            import re
            import xml.etree.ElementTree as ET
            from xbmctorrentV2.utils import url_get

            dom = ET.fromstring(
                url_get(
                    "%s/api/GetSeries.php" % BASE_URL,
                    params={"seriesname": name},
                    headers=HEADERS,
                    with_immunicity=False,
                )
            )
            if not len(dom):
                return
            meta = dom2dict(dom[0])
            if not complete:
                return update_image_urls(meta)
            show.update(get(meta["id"]))
        return show
def get_banners(show_id):
    import xml.etree.ElementTree as ET
    from xbmctorrentV2.utils import url_get

    r = url_get("%s/banners.xml" % show_base_url(show_id), headers=HEADERS, with_immunicity=False)
    dom = ET.fromstring(r)
    if not len(dom):
        return
    return update_image_urls(dom2dict(dom))["banner"]
def piratebay_page(root, page):
  import xbmc
  import xbmcgui
  from concurrent import futures
  from contextlib import nested, closing
  from bs4 import BeautifulSoup
  from urlparse import urljoin
  from xbmctorrentV2.utils import url_get
  from itertools import izip, chain
  from xbmctorrentV2.utils import url_get_json, terminating, SafeDialogProgress

  with closing(SafeDialogProgress(delay_close=0)) as dialog:
    dialog.create(plugin.name)
    dialog.update(percent=0, line1="Fetching index page...", line2="", line3="")

    plugin.set_content("movies")
    page = int(page)

    try:
        html_data = url_get(urljoin(BASE_URL, "%s/%d/7/100,200,500" % (root, page)), headers=HEADERS)
    except:
        dialog = xbmcgui.Dialog()
        dialog.ok("Piratebay","Timeout")
        return

    soup = BeautifulSoup(html_data, "html5lib")
    nodes = soup.findAll("div", "detName")

    state = {"done": 0}
    def on_movie(future):
        data = future.result()
        state["done"] += 1
        dialog.update(
            percent=int(state["done"] * 100.0 / len(nodes)),
            line2=data.get("label") or "",
        )

    dialog.update(percent=0, line1="Fetching movie information...", line2="", line3="")
    with futures.ThreadPoolExecutor(max_workers=10) as pool_tmdb:
        tmdb_list = [pool_tmdb.submit(piratebay_record, node) for node in nodes]
        [future.add_done_callback(on_movie) for future in tmdb_list]
        while not all(job.done() for job in tmdb_list):
                if dialog.iscanceled():
                    return
                xbmc.sleep(200)

    tmdb_list = map(lambda job: job.result(), tmdb_list)
    for node, item in izip(nodes, tmdb_list):
        yield node.item

    yield {
        "label": ">> Next page",
        "path": plugin.url_for("piratebay_page", root=root, page=page + 1),
        "is_playable": False,
    }
def bitsnoop_page(root, page):
    from urlparse import urljoin
    from xbmctorrentV2.scrapers import rss
    from xbmctorrentV2.utils import url_get

    content_type = plugin.request.args_dict.get("content_type")
    if content_type:
        plugin.set_content(content_type)

    page = int(page)
    page_data = url_get(urljoin(BASE_URL, "%s/%d/" % (root, page)), headers=HEADERS, params={
        "fmt": "rss",
        "sort": "n_s",
        "dir": "desc",
    })
    return rss.parse(page_data)
def from_torrent_url(url):
    import base64
    import bencode
    import hashlib
    import urllib
    from xbmctorrentV2.utils import url_get

    torrent_data = url_get(url)
    metadata = bencode.bdecode(torrent_data)
    hashcontents = bencode.bencode(metadata["info"])
    digest = hashlib.sha1(hashcontents).digest()
    b32hash = base64.b32encode(digest)
    params = {"dn": metadata["info"]["name"], "tr": metadata["announce"]}
    plugin.log.info(params)
    paramstr = urllib.urlencode(params)
    return "magnet:?%s&%s" % ("xt=urn:btih:%s" % b32hash, paramstr)
def get(show_id):
    from xbmctorrentV2.caching import shelf

    with shelf("com.thetvdb.show.%s" % show_id) as show:
        if not show:
            import xml.etree.ElementTree as ET
            from xbmctorrentV2.utils import url_get

            dom = ET.fromstring(url_get(show_url(show_id), headers=HEADERS, with_immunicity=False))
            if not len(dom):
                return
            meta = dom2dict(dom[0])
            meta = split_keys(meta, "actors", "genre", "writer")
            update_image_urls(meta)
            show.update(meta)
        return dict(show)
def extratorrent_page(type_="", cid="", search="", page=1):
    from urlparse import urljoin
    from xbmctorrentV2.scrapers import rss
    from xbmctorrentV2.utils import url_get

    content_type = plugin.request.args_dict.pop("content_type", None)
    if content_type:
        plugin.set_content(content_type)

    params = {
        "type": type_,
        "search": search,
        "cid": cid,
    }
    params.update(plugin.request.args_dict)
    page_data = url_get(urljoin(BASE_URL, "/rss.xml"), headers=HEADERS, params=params)
    return rss.parse(page_data, content_type)
def nyaa_page(cats, offset, sort, order, term=""):
    from xbmctorrentV2.scrapers import rss
    from xbmctorrentV2.utils import url_get

    offset = int(offset)
    rss_data = url_get(BASE_URL, headers=HEADERS, params={
        "cats": cats,
        "offset": "%d" % offset,
        "sort": sort,
        "order": order,
        "term": term,
        "page": "rss",
    })
    for item in rss.parse(rss_data):
        yield item
    yield {
        "label": ">> Next page",
        "path": plugin.url_for(term and "search_result_page" or "default_nyaa_page", cats=cats, sort=sort, order=order, term=term, offset=offset + 1),
        "is_playable": False,
    }
def kat_page(root, page, sort_field, sort_order):
    from urlparse import urljoin
    from xbmctorrentV2.scrapers import rss
    from xbmctorrentV2.utils import url_get

    content_type = plugin.request.args_dict.get("content_type")
    if content_type:
        plugin.set_content(content_type)

    page = int(page)
    page_data = url_get(urljoin(BASE_URL, "%s/%d" % (root, page)), headers=HEADERS, params={
        "rss": "1",
        "field": sort_field,
        "sorder": sort_order
    })
    for item in rss.parse(page_data, content_type):
        yield item
    yield {
        "label": ">> Next page",
        "path": plugin.url_for("kat_page", root=root, page=page + 1, sort_field=sort_field, sort_order=sort_order, **plugin.request.args_dict),
    }
def piratebay_record(node):
    import re
    from xbmctorrentV2.utils import url_get
    from urlparse import urljoin
    from xbmctorrentV2 import tmdb

    node.seeds, node.peers = map(lambda x: x.text, node.parent.parent.findAll("td")[2:])
    node.magnet_node = node.parent.findAll("a")[1]
    node.desc_node = node.parent.findAll("font", "detDesc")[0]
    node.size = re.search("Size (.*?),", node.desc_node.text).group(1)
    node.txt = "%s" % (node.a.text)

    node.item = {}
    try:
        node.search_result = url_get(urljoin(BASE_URL, node.parent.findAll("a")[0]["href"]), headers=HEADERS)
    except:
        pass
    else:
        if node.search_result:
            try:
                node.imdb_url = re.search("http://www.imdb.com/title/tt[0-9]*", node.search_result).group(0)
            except:
                pass
            else:
                if node.imdb_url:
                    node.imdb_id = re.search(r"(tt\d+)", node.imdb_url).group(0)

                    if node.imdb_id:
                        node.release_tags = tmdb.get_list_item(tmdb.get(node.imdb_id))

                        if node.release_tags:
                            node.item.update(node.release_tags)

    node.item.update({
        "label": node.txt,
        "path": plugin.url_for("play", uri=node.magnet_node["href"]),
        "is_playable": True,
    })
    return node.item
 def proc_close():
     if not proc.poll():
         url_get("http://%s/shutdown" % proc.bind_address, with_immunicity=False)
예제 #13
0
 def _get_all_meta():
     r = url_get("%s/all/%s.xml" % (show_base_url(show_id), LANG), headers=HEADERS, with_immunicity=False)
     dom = ET.fromstring(r)
     if not len(dom):
         return
     return update_image_urls(dom2dict(dom))
def eztv_shows_by_letter(letter):
    import re
    import xbmc
    import xbmcgui
    from bs4 import BeautifulSoup
    from contextlib import nested, closing
    from itertools import izip, groupby
    from concurrent import futures
    from xbmctorrentV2.scrapers import ungenerate
    from xbmctorrentV2.utils import terminating, url_get, SafeDialogProgress
    from xbmctorrentV2 import tvdb

    with shelf("it.eztv.shows") as eztv_shows:
        if not eztv_shows:
            response = url_get("%s/showlist/" % BASE_URL, headers=HEADERS)
            soup = BeautifulSoup(response, "html5lib")
            nodes = soup.findAll("a", "thread_link")
            for node in nodes:
                show_id, show_named_id = node["href"].split("/")[2:4]
                show_name = node.text
                show_first_letter = show_name[0].lower()
                if re.match("\d+", show_first_letter):
                    show_first_letter = "0-9"
                eztv_shows.setdefault(show_first_letter, {}).update({
                    show_id: {
                        "id": show_id,
                        "named_id": show_named_id,
                        "name": node.text,
                    }
                })

    shows_list = sorted(eztv_shows[letter.lower()].values(), key=lambda x: x["name"].lower())

    with closing(SafeDialogProgress(delay_close=0)) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0, line1="Fetching serie information...", line2="", line3="")

        state = {"done": 0}
        def on_serie(future):
            data = future.result()
            state["done"] += 1
            dialog.update(
                percent=int(state["done"] * 100.0 / len(shows_list)),
                line2=data and data["seriesname"] or "",
            )

        with futures.ThreadPoolExecutor(max_workers=5) as pool_tvdb:
            tvdb_list = [pool_tvdb.submit(tvdb.search, show["name"], True) for show in shows_list]
            [future.add_done_callback(on_serie) for future in tvdb_list]
            while not all(job.done() for job in tvdb_list):
                if dialog.iscanceled():
                    return
                xbmc.sleep(100)

    tvdb_list = [job.result() for job in tvdb_list]
    for i, (eztv_show, tvdb_show) in enumerate(izip(shows_list, tvdb_list)):
        if tvdb_show:
            item = tvdb.get_list_item(tvdb_show)
            item.update({
                "path": plugin.url_for("eztv_get_show_seasons", show_id=eztv_show["id"], tvdb_id=tvdb_show["id"])
            })
            yield item
        else:
            yield {
                "label": eztv_show["name"],
                "path": plugin.url_for("eztv_get_show_seasons", show_id=eztv_show["id"])
            }
 def _eztv_get_show():
     return BeautifulSoup(url_get("%s/shows/%s/" % (BASE_URL, show_id), headers=HEADERS), "html5lib")
 def _eztv_get_show():
     plugin.log.info("Getting show")
     response = url_get("%s/shows/%s/" % (BASE_URL, show_id), headers=HEADERS)
     plugin.log.info("Got show")
     return BeautifulSoup(response, "html5lib")