def proc_close():
     if not proc.poll():
         plugin.log.info("Trying to stop torrent2http at http://%s/shutdown" % proc.bind_address)
         try:
             url_get("http://%s/shutdown" % proc.bind_address, with_immunicity=False)
         except Exception, e:
             plugin.log.info('Failed to sto torrent2http')
             map(plugin.log.info, traceback.format_exc(e).split('\n'))
 def proc_close():
     if not proc.poll():
         plugin.log.info(
             "Trying to stop torrent2http at http://%s/shutdown" %
             proc.bind_address)
         try:
             url_get("http://%s/shutdown" % proc.bind_address,
                     with_immunicity=False)
         except Exception, e:
             plugin.log.info('Failed to sto torrent2http')
             map(plugin.log.info, traceback.format_exc(e).split('\n'))
Пример #3
0
def piratebay_page(root, page):
    import re
    from bs4 import BeautifulSoup
    from urlparse import urljoin
    from kmediatorrent.utils import url_get

    page = int(page)
    html_data = url_get(urljoin(BASE_URL, "%s/%d/7/100,200,500" % (root, page)), headers=HEADERS)
    soup = BeautifulSoup(html_data, "html5lib")
    nodes = soup.findAll("div", "detName")

    for node in nodes:
        seeds, peers = map(lambda x: x.text, node.parent.parent.findAll("td")[2:])
        magnet_node = node.parent.findAll("a")[1]
        desc_node = node.parent.findAll("font", "detDesc")[0]
        size = re.search("Size (.*?),", desc_node.text).group(1)
        text = "%s (%s S:%s P:%s)" % (node.a.text, size.replace(" ", " "), seeds, peers)
        yield {
            "label": text,
            "path": plugin.url_for("play", uri=magnet_node["href"]),
            "is_playable": True,
        }
    yield {
        "label": ">> Next page",
        "path": plugin.url_for("piratebay_page", root=root, page=page + 1),
        "is_playable": False,
    }
Пример #4
0
def btdigg_page(query, sort, page):
    from bs4 import BeautifulSoup
    from kmediatorrent.utils import url_get

    html_data = url_get("%s/search" % BASE_URL,
                        headers=HEADERS,
                        params={
                            "order": sort,
                            "q": query,
                            "p": page,
                        })
    soup = BeautifulSoup(html_data, "html5lib")
    name_nodes = soup.findAll("td", "torrent_name")
    attr_nodes = soup.findAll("table", "torrent_name_tbl")[1::2]

    for name_node, attr_node in zip(name_nodes, attr_nodes):
        attrs = attr_node.findAll("span", "attr_val")
        title = "%s (%s, DLs:%s)" % (name_node.find("a").text, attrs[0].text,
                                     attrs[2].text)
        yield {
            "label": title,
            "path": plugin.url_for("play", uri=attr_node.find("a")["href"]),
            "is_playable": True,
        }
    yield {
        "label":
        ">> Next page",
        "path":
        plugin.url_for("btdigg_page",
                       query=query,
                       sort=sort,
                       page=int(page) + 1),
        "is_playable":
        False,
    }
Пример #5
0
def kat_page(root, page, sort_field, sort_order):
    from urlparse import urljoin
    from kmediatorrent.scrapers import rss
    from kmediatorrent.utils import url_get

    content_type = plugin.request.args_dict.get("content_type")
    if content_type:
        plugin.set_content(content_type)

    page = int(page)
    page_data = url_get(urljoin(BASE_URL, "%s/%d" % (root, page)),
                        headers=HEADERS,
                        params={
                            "rss": "1",
                            "field": sort_field,
                            "sorder": sort_order
                        })
    for item in rss.parse(page_data, content_type):
        yield item
    yield {
        "label":
        ">> Next page",
        "path":
        plugin.url_for("kat_page",
                       root=root,
                       page=page + 1,
                       sort_field=sort_field,
                       sort_order=sort_order,
                       **plugin.request.args_dict),
    }
def btdigg_page(query, sort, page):
    from bs4 import BeautifulSoup
    from kmediatorrent.utils import url_get

    html_data = url_get("%s/search" % BASE_URL, headers=HEADERS, params={
        "order": sort,
        "q": query,
        "p": page,
    })
    soup = BeautifulSoup(html_data, "html5lib")
    name_nodes = soup.findAll("td", "torrent_name")
    attr_nodes = soup.findAll("table", "torrent_name_tbl")[1::2]

    for name_node, attr_node in zip(name_nodes, attr_nodes):
        attrs = attr_node.findAll("span", "attr_val")
        title = "%s (%s, DLs:%s)" % (name_node.find("a").text, attrs[0].text, attrs[2].text)
        yield {
            "label": title,
            "path": plugin.url_for("play", uri=attr_node.find("a")["href"]),
            "is_playable": True,
        }
    yield {
        "label": ">> Next page",
        "path": plugin.url_for("btdigg_page", query=query, sort=sort, page=int(page) + 1),
        "is_playable": False,
    }
Пример #7
0
def get_banners(show_id):
    import xml.etree.ElementTree as ET
    from kmediatorrent.utils import url_get

    r = url_get("%s/banners.xml" % show_base_url(show_id), headers=HEADERS, with_immunicity=False)
    dom = ET.fromstring(r)
    if not len(dom):
        return
    return update_image_urls(dom2dict(dom))["banner"]
Пример #8
0
def cpb_top(type):
    from bs4 import BeautifulSoup
    from kmediatorrent.utils import url_get

    url_adr = "%s/top-100.php" % BASE_URL
    if type != TYPE_A:
        url_adr = url_adr + "?filtre=%s" % type
    html_data = url_get(url_adr)

    soup = BeautifulSoup(html_data, "html5lib")
    name_nodes = soup.findAll('div', re.compile('ligne[0,1]'))

    for name_node in name_nodes:
        if name_node.find('a', href=isValid) :
            yield getItemDetails(name_node)
Пример #9
0
def get(show_id):
    from kmediatorrent.caching import shelf
    with shelf("com.thetvdb.show.%s" % show_id) as show:
        if not show:
            import xml.etree.ElementTree as ET
            from kmediatorrent.utils import url_get

            dom = ET.fromstring(url_get(show_url(show_id), headers=HEADERS, with_immunicity=False))
            if not len(dom):
                return
            meta = dom2dict(dom[0])
            meta = split_keys(meta, "actors", "genre", "writer")
            update_image_urls(meta)
            show.update(meta)
        return dict(show)
def bitsnoop_page(root, page):
    from urlparse import urljoin
    from kmediatorrent.scrapers import rss
    from kmediatorrent.utils import url_get

    content_type = plugin.request.args_dict.get("content_type")
    if content_type:
        plugin.set_content(content_type)

    page = int(page)
    page_data = url_get(urljoin(BASE_URL, "%s/%d/" % (root, page)), headers=HEADERS, params={
        "fmt": "rss",
        "sort": "n_s",
        "dir": "desc",
    })
    return rss.parse(page_data)
Пример #11
0
def extratorrent_page(type_="", cid="", search="", page=1):
    from urlparse import urljoin
    from kmediatorrent.scrapers import rss
    from kmediatorrent.utils import url_get

    content_type = plugin.request.args_dict.pop("content_type", None)
    if content_type:
        plugin.set_content(content_type)

    params = {
        "type": type_,
        "search": search,
        "cid": cid,
    }
    params.update(plugin.request.args_dict)
    page_data = url_get(urljoin(BASE_URL, "/rss.xml"), headers=HEADERS, params=params)
    return rss.parse(page_data, content_type)
def from_torrent_url(url):
    import base64
    import bencode
    import hashlib
    import urllib
    from kmediatorrent.utils import url_get
    torrent_data = url_get(url)
    metadata = bencode.bdecode(torrent_data)
    hashcontents = bencode.bencode(metadata['info'])
    digest = hashlib.sha1(hashcontents).digest()
    b32hash = base64.b32encode(digest)
    params = {
        'dn': metadata['info']['name'],
        'tr': metadata['announce'],
    }
    plugin.log.info(params)
    paramstr = urllib.urlencode(params)
    return 'magnet:?%s&%s' % ('xt=urn:btih:%s' % b32hash, paramstr)
Пример #13
0
def from_torrent_url(url):
    import base64
    import bencode
    import hashlib
    import urllib
    from kmediatorrent.utils import url_get
    torrent_data = url_get(url)
    metadata = bencode.bdecode(torrent_data)
    hashcontents = bencode.bencode(metadata['info'])
    digest = hashlib.sha1(hashcontents).digest()
    b32hash = base64.b32encode(digest)
    params = {
        'dn': metadata['info']['name'],
        'tr': metadata['announce'],
    }
    plugin.log.info(params)
    paramstr = urllib.urlencode(params)
    return 'magnet:?%s&%s' % ('xt=urn:btih:%s' % b32hash, paramstr)
Пример #14
0
def bitsnoop_page(root, page):
    from urlparse import urljoin
    from kmediatorrent.scrapers import rss
    from kmediatorrent.utils import url_get

    content_type = plugin.request.args_dict.get("content_type")
    if content_type:
        plugin.set_content(content_type)

    page = int(page)
    page_data = url_get(urljoin(BASE_URL, "%s/%d/" % (root, page)),
                        headers=HEADERS,
                        params={
                            "fmt": "rss",
                            "sort": "n_s",
                            "dir": "desc",
                        })
    return rss.parse(page_data)
Пример #15
0
def search(name, complete=False):
    from kmediatorrent.caching import shelf
    import hashlib
    search_hash = hashlib.sha1(name).hexdigest()
    with shelf("com.thetvdb.search.%s" % search_hash) as show:
        if not show:
            import re
            import xml.etree.ElementTree as ET
            from kmediatorrent.utils import url_get
            dom = ET.fromstring(url_get("%s/api/GetSeries.php" % BASE_URL, params={
                "seriesname": name,
            }, headers=HEADERS, with_immunicity=False))
            if not len(dom):
                return
            meta = dom2dict(dom[0])
            if not complete:
                return update_image_urls(meta)
            show.update(get(meta["id"]))
        return show
Пример #16
0
def nyaa_page(cats, offset, sort, order, term=""):
    from kmediatorrent.scrapers import rss
    from kmediatorrent.utils import url_get

    offset = int(offset)
    rss_data = url_get(BASE_URL, headers=HEADERS, params={
        "cats": cats,
        "offset": "%d" % offset,
        "sort": sort,
        "order": order,
        "term": term,
        "page": "rss",
    })
    for item in rss.parse(rss_data):
        yield item
    yield {
        "label": ">> Next page",
        "path": plugin.url_for(term and "search_result_page" or "default_nyaa_page", cats=cats, sort=sort, order=order, term=term, offset=offset + 1),
        "is_playable": False,
    }
Пример #17
0
def cpb_listAll(cat, page):
    from bs4 import BeautifulSoup
    from kmediatorrent.utils import url_get

    url_adr = "%s/view_cat.php?categorie=%s&page=%s" % (BASE_URL, cat, page) #view_cat.php?categorie=films&page=1
    html_data = url_get(url_adr)

    soup = BeautifulSoup(html_data, "html5lib")
    name_nodes = soup.findAll('div', re.compile('ligne[0,1]'))

    for name_node in name_nodes:
        if name_node.find('a', href=isValid) :
            yield getItemDetails(name_node)

    if soup.find('div', id="pagination"):
        yield {
            "label": ">> Next page",
            "path": plugin.url_for("cpb_listAll", cat=cat, page=int(page) + 1),
            "is_playable": False
        }
def kat_page(root, page, sort_field, sort_order):
    from urlparse import urljoin
    from kmediatorrent.scrapers import rss
    from kmediatorrent.utils import url_get

    content_type = plugin.request.args_dict.get("content_type")
    if content_type:
        plugin.set_content(content_type)

    page = int(page)
    page_data = url_get(urljoin(BASE_URL, "%s/%d" % (root, page)), headers=HEADERS, params={
        "rss": "1",
        "field": sort_field,
        "sorder": sort_order
    })
    for item in rss.parse(page_data, content_type):
        yield item
    yield {
        "label": ">> Next page",
        "path": plugin.url_for("kat_page", root=root, page=page + 1, sort_field=sort_field, sort_order=sort_order, **plugin.request.args_dict),
    }
Пример #19
0
def cpb_page(action, query, sort, page):
    from bs4 import BeautifulSoup
    from kmediatorrent.utils import url_get

    url_adr = "%s/%s/" % (BASE_URL, action)
    url_adr = url_adr + query.replace(" ","+") + "/page-" + str(page) + "," + sort
    html_data = url_get(url_adr)

    soup = BeautifulSoup(html_data, "html5lib")
    name_nodes = soup.findAll('div', re.compile('ligne[0,1]'))

    for name_node in name_nodes:
        if name_node.find('a', href=isValid) :
            yield getItemDetails(name_node)

	# Add pagination only if needed
    if soup.find('div', id="pagination") :
        yield {
            "label": ">> Next page",
            "path": plugin.url_for("cpb_page", action=action, query=query, sort=sort, page=int(page) + 1),
            "is_playable": False
        }
Пример #20
0
def eztv_shows_by_letter(letter):
    import re
    import xbmc
    import xbmcgui
    from bs4 import BeautifulSoup
    from contextlib import nested, closing
    from itertools import izip, groupby
    from concurrent import futures
    from kmediatorrent.scrapers import ungenerate
    from kmediatorrent.utils import terminating, url_get, SafeDialogProgress
    from kmediatorrent import tvdb

    with shelf("it.eztv.shows") as eztv_shows:
        if not eztv_shows:
            response = url_get("%s/showlist/" % BASE_URL, headers=HEADERS)
            soup = BeautifulSoup(response, "html5lib")
            nodes = soup.findAll("a", "thread_link")
            for node in nodes:
                show_id, show_named_id = node["href"].split("/")[2:4]
                show_name = node.text
                if len(show_name) > 0:
                    show_first_letter = show_name[0].lower()
                    if re.match("\d+", show_first_letter):
                        show_first_letter = "0-9"
                    eztv_shows.setdefault(show_first_letter, {}).update({
                        show_id: {
                            "id": show_id,
                            "named_id": show_named_id,
                            "name": node.text,
                        }
                    })

    if letter != "0-9":
        eztv_show_by_letter = []
        for key in eztv_shows.keys():
            if len(key) > 0 and key[:1].lower() == letter.lower():
                eztv_show_by_letter.append(key.lower())
        letter_list={}
        shows_list=[]
        for show in eztv_show_by_letter:
            letter_list.update(eztv_shows[show])
        for key in letter_list.keys():
            shows_list.append(letter_list[key])
        shows_list = sorted(shows_list, key=lambda k: k['name'])
    else:
        shows_list = sorted(eztv_shows[letter.lower()].values(), key=lambda x: x["name"].lower())

    with closing(SafeDialogProgress(delay_close=0)) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0, line1="Fetching serie information...", line2="", line3="")

        state = {"done": 0}
        def on_serie(future):
            data = future.result()
            state["done"] += 1
            dialog.update(
                percent=int(state["done"] * 100.0 / len(shows_list)),
                line2=data and data["seriesname"] or "",
            )

        with futures.ThreadPoolExecutor(max_workers=5) as pool_tvdb:
            tvdb_list = [pool_tvdb.submit(tvdb.search, show["name"], True) for show in shows_list]
            [future.add_done_callback(on_serie) for future in tvdb_list]
            while not all(job.done() for job in tvdb_list):
                if dialog.iscanceled():
                    return
                xbmc.sleep(100)

    tvdb_list = [job.result() for job in tvdb_list]
    for i, (eztv_show, tvdb_show) in enumerate(izip(shows_list, tvdb_list)):
        if tvdb_show:
            item = tvdb.get_list_item(tvdb_show)
            item.update({
                "path": plugin.url_for("eztv_get_show_seasons", show_id=eztv_show["id"], tvdb_id=tvdb_show["id"])
            })
            yield item
        else:
            yield {
                "label": eztv_show["name"],
                "path": plugin.url_for("eztv_get_show_seasons", show_id=eztv_show["id"])
            }
Пример #21
0
 def _eztv_get_show():
     return BeautifulSoup(url_get("%s/shows/%s/" % (BASE_URL, show_id), headers=HEADERS), "html5lib")
Пример #22
0
 def _eztv_get_show():
     plugin.log.info("Getting show")
     response = url_get("%s/shows/%s/" % (BASE_URL, show_id), headers=HEADERS)
     plugin.log.info("Got show")
     return BeautifulSoup(response, "html5lib")
Пример #23
0
 def _eztv_get_show():
     plugin.log.info("Getting show")
     response = url_get("%s/shows/%s/" % (BASE_URL, show_id),
                        headers=HEADERS)
     plugin.log.info("Got show")
     return BeautifulSoup(response, "html5lib")
Пример #24
0
 def _eztv_get_show():
     return BeautifulSoup(
         url_get("%s/shows/%s/" % (BASE_URL, show_id), headers=HEADERS),
         "html5lib")
Пример #25
0
def eztv_shows_by_letter(letter):
    import re
    import xbmc
    import xbmcgui
    from bs4 import BeautifulSoup
    from contextlib import nested, closing
    from itertools import izip, groupby
    from concurrent import futures
    from kmediatorrent.scrapers import ungenerate
    from kmediatorrent.utils import terminating, url_get, SafeDialogProgress
    from kmediatorrent import tvdb

    with shelf("it.eztv.shows") as eztv_shows:
        if not eztv_shows:
            response = url_get("%s/showlist/" % BASE_URL, headers=HEADERS)
            soup = BeautifulSoup(response, "html5lib")
            nodes = soup.findAll("a", "thread_link")
            for node in nodes:
                show_id, show_named_id = node["href"].split("/")[2:4]
                show_name = node.text
                if len(show_name) > 0:
                    show_first_letter = show_name[0].lower()
                    if re.match("\d+", show_first_letter):
                        show_first_letter = "0-9"
                    eztv_shows.setdefault(show_first_letter, {}).update({
                        show_id: {
                            "id": show_id,
                            "named_id": show_named_id,
                            "name": node.text,
                        }
                    })

    if letter != "0-9":
        eztv_show_by_letter = []
        for key in eztv_shows.keys():
            if len(key) > 0 and key[:1].lower() == letter.lower():
                eztv_show_by_letter.append(key.lower())
        letter_list = {}
        shows_list = []
        for show in eztv_show_by_letter:
            letter_list.update(eztv_shows[show])
        for key in letter_list.keys():
            shows_list.append(letter_list[key])
        shows_list = sorted(shows_list, key=lambda k: k['name'])
    else:
        shows_list = sorted(eztv_shows[letter.lower()].values(),
                            key=lambda x: x["name"].lower())

    with closing(SafeDialogProgress(delay_close=0)) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0,
                      line1="Fetching serie information...",
                      line2="",
                      line3="")

        state = {"done": 0}

        def on_serie(future):
            data = future.result()
            state["done"] += 1
            dialog.update(
                percent=int(state["done"] * 100.0 / len(shows_list)),
                line2=data and data["seriesname"] or "",
            )

        with futures.ThreadPoolExecutor(max_workers=5) as pool_tvdb:
            tvdb_list = [
                pool_tvdb.submit(tvdb.search, show["name"], True)
                for show in shows_list
            ]
            [future.add_done_callback(on_serie) for future in tvdb_list]
            while not all(job.done() for job in tvdb_list):
                if dialog.iscanceled():
                    return
                xbmc.sleep(100)

    tvdb_list = [job.result() for job in tvdb_list]
    for i, (eztv_show, tvdb_show) in enumerate(izip(shows_list, tvdb_list)):
        if tvdb_show:
            item = tvdb.get_list_item(tvdb_show)
            item.update({
                "path":
                plugin.url_for("eztv_get_show_seasons",
                               show_id=eztv_show["id"],
                               tvdb_id=tvdb_show["id"])
            })
            yield item
        else:
            yield {
                "label":
                eztv_show["name"],
                "path":
                plugin.url_for("eztv_get_show_seasons",
                               show_id=eztv_show["id"])
            }
Пример #26
0
 def _get_all_meta():
     r = url_get("%s/all/%s.xml" % (show_base_url(show_id), LANG), headers=HEADERS, with_immunicity=False)
     dom = ET.fromstring(r)
     if not len(dom):
         return
     return update_image_urls(dom2dict(dom))