def proc_close():
     if not proc.poll():
         try:
             url_get("http://%s/shutdown" % proc.bind_address,
                     with_immunicity=False)
         except:
             pass
Esempio n. 2
0
 def proc_close():
     if not proc.poll():
         plugin.log.info("Trying to stop torrent2http at http://%s/shutdown" % proc.bind_address)
         try:
             url_get("http://%s/shutdown" % proc.bind_address, with_immunicity=False)
         except Exception, e:
             plugin.log.info('Failed to sto torrent2http')
             map(plugin.log.info, traceback.format_exc(e).split('\n'))
Esempio n. 3
0
 def proc_close():
     if not proc.poll():
         plugin.log.info(
             "Trying to stop torrent2http at http://%s/shutdown" %
             proc.bind_address)
         try:
             url_get("http://%s/shutdown" % proc.bind_address,
                     with_immunicity=False)
         except Exception, e:
             plugin.log.info('Failed to sto torrent2http')
             map(plugin.log.info, traceback.format_exc(e).split('\n'))
Esempio n. 4
0
def torrents3d_play(article):
    import xbmcgui
    from bs4 import BeautifulSoup
    from contextlib import closing
    from urlparse import urljoin
    from xbmctorrent.magnet import generate_magnet
    from xbmctorrent.utils import SafeDialogProgress

    article = int(article)

    with closing(SafeDialogProgress(delay_close=0)) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0, line1=u"Получение информации о релизе...", line2="", line3="")

        url = urljoin(BASE_URL, "article/%d" % article)

        try:
            html_data = url_get(url, headers=HEADERS)
            soup = BeautifulSoup(html_data, "html5lib")
            torrent_href = soup.find("a", class_="genmed")

            if not torrent_href:
                dialog.update(percent=50, line2=u"Требуется авторизация. Авторизация...")

                if not plugin.get_setting("t3d_login") and not plugin.get_setting("t3d_passwd"):
                    plugin.notify("Проверьте настройки авторизации.", delay=15000)
                    return

                html_data = _torrents3d_login(url)
                soup = BeautifulSoup(html_data, "html5lib")
                torrent_href = soup.find("a", class_="genmed")

            if not torrent_href:
                xbmcgui.Dialog().ok(plugin.name, "Авторизация неудалась. Проверьте настройки авторизации.")
                return

            dialog.update(percent=100, line2=u"Обработка данных.")

            from bencode import bdecode

            title = "[%s] %s" % _torrents3d_cleantitle(soup.find("a", class_="tt-text").text)
            torrent_data = url_get(torrent_href["href"], headers=HEADERS)
            metadata = bdecode(torrent_data)

            plugin.redirect(plugin.url_for("play", uri=generate_magnet(metadata, uenc(title))))

        except Exception:
            plugin.log.error("Cannot get data from remote server")
            xbmcgui.Dialog().ok(plugin.name, u"Не удалось получить данные от сервера")
            return
Esempio n. 5
0
def iplay_page(cat, page):
    import re
    from bs4 import BeautifulSoup
    from urlparse import urljoin
    from xbmctorrent.utils import url_get

    page = int(page)
    url = urljoin(BASE_URL, "test/proxy.php")
    html_data = url_get(url, params = {"cat": cat, "page": page}, headers = HEADERS)
    soup = BeautifulSoup(html_data, "html5lib")

    nodes = soup.findAll('a', 'torrent')

    next_page = {
        "label": "Next page...",
        "path": plugin.url_for("iplay_page", cat = cat, page = page + 1),
        "is_playable": False,
    }

    for node in nodes:
        text = "%s" % node.get('title')
        torrent_node = node.parent.find('img', {"class": "dld"}).parent

        yield {
            "label": text,
            "path": plugin.url_for("play", uri=torrent_node["href"]),
            "is_playable": False,
        }
    #print 'DBG URL:' + url
    yield next_page
Esempio n. 6
0
def cpb_listAll(cat, page):
    from bs4 import BeautifulSoup
    from xbmctorrent.utils import url_get

    url_adr = "%s/view_cat.php?categorie=%s&page=%s" % (BASE_URL, cat, page) #view_cat.php?categorie=films&page=1
    html_data = url_get(url_adr)

    soup = BeautifulSoup(html_data, "html5lib")
    name_nodes = soup.findAll('div', re.compile('ligne[0,1]'))

    for name_node in name_nodes:
        title = name_node.find("a").text
        tds = name_node.parent.findAll("td")
        seed = name_node.find("div","up").text
        leech = name_node.find("div","down").text
        color = getColor(seed, leech)
        title = title + " [COLOR %s][S:%s|L:%s][/COLOR]" %(color, seed, leech)
        html_uri=name_node.find("a")["href"]
        torrent_basename = os.path.basename(html_uri)
        uri_addr= BASE_URL + "/_torrents/" + torrent_basename.replace(".html",".torrent")
        img_addr = BASE_URL + "/_pictures/" + torrent_basename.replace(".html",".jpg")
        yield {
            "label": title,
            "path": plugin.url_for("play", uri=uri_addr),
            "is_playable": True,
            "thumbnail" : img_addr , 
            "properties": {
              "fanart_image" : img_addr , 
            } ,
        }
    yield {
        "label": ">> Next page",
        "path": plugin.url_for("cpb_listAll", cat=cat, page=int(page) + 1),
        "is_playable": False,
    }
Esempio n. 7
0
def cpb_last(type):
    from bs4 import BeautifulSoup
    from xbmctorrent.utils import url_get

    url_adr = "%s/derniers-torrents.php" % BASE_URL
    if type != TYPE_A:
        url_adr = url_adr + "?filtre=%s" % type
    html_data = url_get(url_adr)

    soup = BeautifulSoup(html_data, "html5lib")
    name_nodes = soup.findAll('div', re.compile('ligne[0,1]'))

    for name_node in name_nodes:
        title = name_node.find("a").text
        tds = name_node.parent.findAll("td")
        seed = name_node.find("div","up").text
        leech = name_node.find("div","down").text
        color = getColor(seed, leech)
        title = title + " [COLOR %s][S:%s|L:%s][/COLOR]" %(color, seed, leech)
        html_uri=name_node.find("a")["href"]
        torrent_basename = os.path.basename(html_uri)
        uri_addr= BASE_URL + "/_torrents/" + torrent_basename.replace(".html",".torrent")
        img_addr = BASE_URL + "/_pictures/" + torrent_basename.replace(".html",".jpg")
        yield {
            "label": title,
            "path": plugin.url_for("play", uri=uri_addr),
            "is_playable": True,
            "thumbnail" : img_addr , 
            "properties": {
              "fanart_image" : img_addr , 
            } ,
        }
Esempio n. 8
0
def ilcorsaronero_page(root, page):
    from bs4 import BeautifulSoup
    from urlparse import urljoin
    from xbmctorrent.utils import url_get

    page = int(page)
    html_data = url_get(urljoin(BASE_URL, "%s&page=%d" % (root, page)), headers=HEADERS)
    soup = BeautifulSoup(html_data, "html5lib")
    nodes = soup.findAll("a", class_="tab")

    for node in nodes:
        size, null, data, seeds, peers = map(lambda x: (x.font.text if x.font is not None else None), node.parent.parent.findAll("td")[-5:])
        text = "%s (%s S:%s P:%s %s)" % (node.text, size, seeds, peers, data)

        yield {
            "label": text,
            "path": plugin.url_for("ilcorsaronero_play", uri=node["href"]),
            "is_playable": True,
        }

    if root.find('search') == -1:
        yield {
            "label": ">> Next page",
            "path": plugin.url_for("ilcorsaronero_page", root=root, page=page + 1),
            "is_playable": False,
        }
Esempio n. 9
0
def piratebay_page(root, page):
    import re
    from bs4 import BeautifulSoup
    from urlparse import urljoin
    from xbmctorrent.utils import url_get

    page = int(page)
    html_data = url_get(urljoin(BASE_URL, "%s/%d/7/100,200,500" % (root, page)), headers=HEADERS)
    soup = BeautifulSoup(html_data, "html5lib")
    nodes = soup.findAll("div", "detName")

    for node in nodes:
        seeds, peers = map(lambda x: x.text, node.parent.parent.findAll("td")[2:])
        magnet_node = node.parent.findAll("a")[1]
        desc_node = node.parent.findAll("font", "detDesc")[0]
        size = re.search("Size (.*?),", desc_node.text).group(1)
        text = "%s (%s S:%s P:%s)" % (node.a.text, size.replace(" ", " "), seeds, peers)
        yield {
            "label": text,
            "path": plugin.url_for("play", uri=magnet_node["href"]),
            "is_playable": True,
        }
    yield {
        "label": ">> Next page",
        "path": plugin.url_for("piratebay_page", root=root, page=page + 1),
        "is_playable": False,
    }
Esempio n. 10
0
def kat_page(root, page, sort_field, sort_order):
    from urlparse import urljoin
    from xbmctorrent.scrapers import rss
    from xbmctorrent.utils import url_get

    content_type = plugin.request.args_dict.get("content_type")
    if content_type:
        plugin.set_content(content_type)

    page = int(page)
    page_data = url_get(
        urljoin(BASE_URL, "%s/%d" % (root, page)),
        headers=HEADERS,
        params={"rss": "1", "field": sort_field, "sorder": sort_order},
    )
    for item in rss.parse(page_data, content_type):
        yield item
    yield {
        "label": ">> Next page",
        "path": plugin.url_for(
            "kat_page",
            root=root,
            page=page + 1,
            sort_field=sort_field,
            sort_order=sort_order,
            **plugin.request.args_dict
        ),
    }
Esempio n. 11
0
def btdigg_page(query, sort, page):
    from bs4 import BeautifulSoup
    from xbmctorrent.utils import url_get

    html_data = url_get("%s/search" % BASE_URL, headers=HEADERS, params={
        "order": sort,
        "q": query,
        "p": page,
    })
    soup = BeautifulSoup(html_data, "html5lib")
    name_nodes = soup.findAll("td", "torrent_name")
    attr_nodes = soup.findAll("table", "torrent_name_tbl")[1::2]

    for name_node, attr_node in zip(name_nodes, attr_nodes):
        attrs = attr_node.findAll("span", "attr_val")
        title = "%s (%s, DLs:%s)" % (name_node.find("a").text, attrs[0].text, attrs[2].text)
        yield {
            "label": title,
            "path": plugin.url_for("play", uri=attr_node.find("a")["href"]),
            "is_playable": True,
        }
    yield {
        "label": ">> Next page",
        "path": plugin.url_for("btdigg_page", query=query, sort=sort, page=int(page) + 1),
        "is_playable": False,
    }
Esempio n. 12
0
def cpb_series():
    url_adr = "%s/series/" % BASE_URL
    html_movies = url_get(url_adr)
    soup = BeautifulSoup(html_movies, "html5lib")
    s2 = soup.find("div", "affichage-sous-categorie").find_all(
        "a")  #,"affichage-sous-categorie")
    sous_cat = []
    for aa in s2:
        if aa.text != "":
            sous_cat_label = (aa.text).encode('ascii', 'replace')
            sous_cat_name = "films"
            sous_cat_query = string.split(aa.get('href'), '/')[-2]
            sous_cat_sort = SORT_DATE + SORT_DIRECTION_DESC
            sous_cat_page = 0
            sous_cat += [
                {
                    "label":
                    sous_cat_label,
                    "path":
                    plugin.url_for("cpb_page",
                                   cat=sous_cat_name,
                                   query=sous_cat_query,
                                   sort=sous_cat_sort,
                                   page=sous_cat_page)
                },
            ]
    return sous_cat
Esempio n. 13
0
def get_torrent_info(url):
    from bs4 import BeautifulSoup
    from xbmctorrent.utils import first, url_get

    response = url_get(url)
    soup = BeautifulSoup(response, "html5lib")
    movie = first(soup.select('td[itemtype="http://schema.org/Product"] div.font11px a[href="/movies/"]')) is not None
    serie = first(soup.select('td[itemtype="http://schema.org/Product"] div.font11px a[href="/tv/"]')) is not None

    def itemprop(prop, default=None):
        node = first(soup.select('[itemprop="%s"]' % prop))
        return node and node.text or default
    def itemprops(prop, default=[]):
        nodes = soup.select('[itemprop="%s"]' % prop)
        return [node.text for node in nodes] or default

    return {
        "movie": movie and not serie,
        "serie": serie and not movie,
        "seeders": itemprop("seeders"),
        "leechers": itemprop("leechers"),
        "title": itemprop("name"),
        "quality": itemprop("quality"),
        "genres": itemprops("genre"),
        "magnet_url": first(soup.select(".magnetlinkButton"))["href"],
        "poster": first(soup.select('a.movieCover img'), {}).get("href"),
        "cast": [cast.text for cast in soup.select('[itemprop="name"] a')],
        "imdb_id": find_imdb_id(soup),
    }
Esempio n. 14
0
File: nyaa.py Progetto: roeiba/xbmc
def nyaa_page(cats, offset, sort, order, term=""):
    from xbmctorrent.scrapers import rss
    from xbmctorrent.utils import url_get

    offset = int(offset)
    rss_data = url_get(BASE_URL,
                       headers=HEADERS,
                       params={
                           "cats": cats,
                           "offset": "%d" % offset,
                           "sort": sort,
                           "order": order,
                           "term": term,
                           "page": "rss",
                       })
    for item in rss.parse(rss_data):
        yield item
    yield {
        "label":
        ">> Next page",
        "path":
        plugin.url_for(term and "search_result_page" or "default_nyaa_page",
                       cats=cats,
                       sort=sort,
                       order=order,
                       term=term,
                       offset=offset + 1),
        "is_playable":
        False,
    }
Esempio n. 15
0
 def _get_all_meta():
     r = url_get("%s/all/%s.xml" % (show_base_url(show_id), LANG),
                 headers=HEADERS)
     dom = ET.fromstring(r)
     if not len(dom):
         return
     return update_image_urls(dom2dict(dom))
Esempio n. 16
0
def search(name, complete=False):
    from xbmctorrent.caching import shelf
    import hashlib

    search_hash = hashlib.sha1(name).hexdigest()
    with shelf("com.thetvdb.search.%s" % search_hash) as show:
        if not show:
            import re
            import xml.etree.ElementTree as ET
            from xbmctorrent.utils import url_get

            dom = ET.fromstring(
                url_get(
                    "%s/api/GetSeries.php" % BASE_URL,
                    params={"seriesname": name},
                    headers=HEADERS,
                    with_immunicity=False,
                )
            )
            if not len(dom):
                return
            meta = dom2dict(dom[0])
            if not complete:
                return update_image_urls(meta)
            show.update(get(meta["id"]))
        return show
Esempio n. 17
0
def btdigg_page(query, sort, page):
    from bs4 import BeautifulSoup
    from xbmctorrent.utils import url_get

    html_data = url_get("%s/search" % BASE_URL,
                        headers=HEADERS,
                        params={
                            "order": sort,
                            "q": query,
                            "p": page,
                        })
    soup = BeautifulSoup(html_data, "html5lib")
    name_nodes = soup.findAll("td", "torrent_name")
    attr_nodes = soup.findAll("table", "torrent_name_tbl")[1::2]

    for name_node, attr_node in zip(name_nodes, attr_nodes):
        attrs = attr_node.findAll("span", "attr_val")
        title = "%s (%s, DLs:%s)" % (name_node.find("a").text, attrs[0].text,
                                     attrs[2].text)
        yield {
            "label": title,
            "path": plugin.url_for("play", uri=attr_node.find("a")["href"]),
            "is_playable": True,
        }
    yield {
        "label":
        ">> Next page",
        "path":
        plugin.url_for("btdigg_page",
                       query=query,
                       sort=sort,
                       page=int(page) + 1),
        "is_playable":
        False,
    }
Esempio n. 18
0
def kat_page(root, page, sort_field, sort_order):
    from urlparse import urljoin
    from xbmctorrent.scrapers import rss
    from xbmctorrent.utils import url_get

    content_type = plugin.request.args_dict.get("content_type")
    if content_type:
        plugin.set_content(content_type)

    page = int(page)
    page_data = url_get(urljoin(BASE_URL, "%s/%d" % (root, page)),
                        headers=HEADERS,
                        params={
                            "rss": "1",
                            "field": sort_field,
                            "sorder": sort_order
                        })
    for item in rss.parse(page_data, content_type):
        yield item
    yield {
        "label":
        ">> Next page",
        "path":
        plugin.url_for("kat_page",
                       root=root,
                       page=page + 1,
                       sort_field=sort_field,
                       sort_order=sort_order,
                       **plugin.request.args_dict),
    }
Esempio n. 19
0
def cpb_page(cat, query, sort, page):

    log.debug("THE CAT = " + cat + " & THE Q = " + query)
    if not "cpasbien" in query:
        url_adr = BASE_URL + "/" + cat + "/" + query.replace(" ", "+") + "/page-" + str(page) + "," + sort
    else:
        url_adr = BASE_URL + "/view_cat.php?categorie=" + cat + "&page=" + str(page) + "&trie=" + sort[5:]
    log.debug("THE URL = " + url_adr)
    html_data = url_get(url_adr)

    soup = BeautifulSoup(html_data, "html5lib")
    name_nodes = soup.findAll("td", "torrent-aff")

    for name_node in name_nodes:
        title = name_node.find("a").text
        html_uri = name_node.find("a")["href"]
        torrent_basename = os.path.basename(html_uri)
        uri_addr = BASE_URL + "/_torrents/" + torrent_basename.replace(".html", ".torrent")
        img_addr = BASE_URL + "/_pictures/" + torrent_basename.replace(".html", ".jpg")
        yield {
            "label": title,
            "path": plugin.url_for("play", uri=uri_addr),
            "is_playable": True,
            "thumbnail": img_addr,
            "properties": {"fanart_image": img_addr},
        }
    yield {
        "label": ">> Next page",
        "path": plugin.url_for("cpb_page", cat=cat, query=query, sort=sort, page=int(page) + 1),
        "is_playable": False,
    }
Esempio n. 20
0
def piratebay_page(root, page):
    import re
    from bs4 import BeautifulSoup
    from urlparse import urljoin
    from xbmctorrent.utils import url_get

    page = int(page)
    html_data = url_get(urljoin(BASE_URL,
                                "%s/%d/7/100,200,500" % (root, page)),
                        headers=HEADERS)
    soup = BeautifulSoup(html_data, "html5lib")
    nodes = soup.findAll("div", "detName")

    for node in nodes:
        seeds, peers = map(lambda x: x.text,
                           node.parent.parent.findAll("td")[2:])
        magnet_node = node.parent.findAll("a")[1]
        desc_node = node.parent.findAll("font", "detDesc")[0]
        size = re.search("Size (.*?),", desc_node.text).group(1)
        text = "%s (%s S:%s P:%s)" % (node.a.text, size.replace(
            " ", " "), seeds, peers)
        yield {
            "label": text,
            "path": plugin.url_for("play", uri=magnet_node["href"]),
            "is_playable": True,
        }
    yield {
        "label": ">> Next page",
        "path": plugin.url_for("piratebay_page", root=root, page=page + 1),
        "is_playable": False,
    }
Esempio n. 21
0
def get_banners(show_id):
    import xml.etree.ElementTree as ET
    from xbmctorrent.utils import url_get

    r = url_get("%s/banners.xml" % show_base_url(show_id), headers=HEADERS)
    dom = ET.fromstring(r)
    if not len(dom):
        return
    return update_image_urls(dom2dict(dom))["banner"]
Esempio n. 22
0
def get_banners(show_id):
    import xml.etree.ElementTree as ET
    from xbmctorrent.utils import url_get

    r = url_get("%s/banners.xml" % show_base_url(show_id), headers=HEADERS)
    dom = ET.fromstring(r)
    if not len(dom):
        return
    return update_image_urls(dom2dict(dom))["banner"]
Esempio n. 23
0
def ilcorsaronero_play(uri):
    import re
    from bs4 import BeautifulSoup
    from xbmctorrent.utils import url_get

    html_data = url_get(uri, headers=HEADERS)
    soup = BeautifulSoup(html_data, "html5lib")
    magnet_node = soup.findAll("a", class_="forbtn", href=re.compile("magnet"))

    plugin.redirect(plugin.url_for("play", uri=magnet_node[0]["href"]))
Esempio n. 24
0
 def proc_close():
     if proc.poll() is None:
         try:
             url_get("http://%s/shutdown" %
                     proc.bind_address, with_immunicity=False)
         except Exception:
             pass
         finished = False
         os.close(log_thread.write_fd)
         start = time.time()
         while (time.time() - start) < 10:
             time.sleep(0.2)
             if proc.poll() is not None:
                 finished = True
                 break
         if not finished:
             logger("timeout occured while shutting down")
             proc.kill()
         proc.wait()
Esempio n. 25
0
def piratebay_page(root, page):
  import xbmc
  import xbmcgui
  from concurrent import futures
  from contextlib import nested, closing
  from bs4 import BeautifulSoup
  from urlparse import urljoin
  from xbmctorrent.utils import url_get
  from itertools import izip, chain
  from xbmctorrent.utils import url_get_json, terminating, SafeDialogProgress

  with closing(SafeDialogProgress(delay_close=0)) as dialog:
    dialog.create(plugin.name)
    dialog.update(percent=0, line1="Fetching index page...", line2="", line3="")

    plugin.set_content("movies")
    page = int(page)

    try:
        html_data = url_get(urljoin(BASE_URL, "%s/%d/7/100,200,500" % (root, page)), headers=HEADERS)
    except:
        dialog = xbmcgui.Dialog()
        dialog.ok("Piratebay","Timeout")
        return

    soup = BeautifulSoup(html_data, "html5lib")
    nodes = soup.findAll("div", "detName")

    state = {"done": 0}
    def on_movie(future):
        data = future.result()
        state["done"] += 1
        dialog.update(
            percent=int(state["done"] * 100.0 / len(nodes)),
            line2=data.get("label") or "",
        )

    dialog.update(percent=0, line1="Fetching movie information...", line2="", line3="")
    with futures.ThreadPoolExecutor(max_workers=10) as pool_tmdb:
        tmdb_list = [pool_tmdb.submit(piratebay_record, node) for node in nodes]
        [future.add_done_callback(on_movie) for future in tmdb_list]
        while not all(job.done() for job in tmdb_list):
                if dialog.iscanceled():
                    return
                xbmc.sleep(200)

    tmdb_list = map(lambda job: job.result(), tmdb_list)
    for node, item in izip(nodes, tmdb_list):
        yield node.item

    yield {
        "label": ">> Next page",
        "path": plugin.url_for("piratebay_page", root=root, page=page + 1),
        "is_playable": False,
    }
Esempio n. 26
0
def get(show_id):
    import xml.etree.ElementTree as ET
    from xbmctorrent.utils import url_get

    dom = ET.fromstring(url_get(show_url(show_id), headers=HEADERS))
    if not len(dom):
        return
    meta = dom2dict(dom[0])
    meta = split_keys(meta, "actors", "genre", "writer")
    update_image_urls(meta)
    return meta
Esempio n. 27
0
def _iplay_login(referer = BASE_URL):
    from bs4 import BeautifulSoup
    from urlparse import urljoin
    from xbmctorrent.utils import url_get

    html_data = url_get(urljoin(BASE_URL, "user"))
    
    soup = BeautifulSoup(html_data, "html5lib")
    form_build_id = soup.find('input', {"name": "form_build_id"}).get("value")
    #plugin.notify(form_build_id)
    
    HEADERS["Referer"] = referer
    HEADERS["Origin"] = BASE_URL
    HEADERS["Content-Type"] = "application/x-www-form-urlencoded"

    values = {
        "name": "admin",
        "pass": "******",
        "form_id": "user_login",
        "form_build_id": form_build_id,
        "op": "Log in",
        #"returnto": "/browse.php"
    }
    
    # #values = None

    html_data = url_get(urljoin(BASE_URL, "user"), post=values, headers = HEADERS)
    plugin.notify(html_data)
    # #html_data = url_get(urljoin(BASE_URL, "test/proxy.php"), headers=HEADERS)
    #soup = BeautifulSoup(html_data, "html5lib")
    
    # #login = soup.find('div', {"id": "ilogin"})
    #login = soup.find('form', {"id": "user-login"})
    
    # if login is not None:
    #     plugin.notify("You have to login", 'Msg', 100)
    # else:
    #     plugin.notify('Logged in', 'Msg', 100)
    return ""
Esempio n. 28
0
def search(name, complete=False):
    import re
    import xml.etree.ElementTree as ET
    from xbmctorrent.utils import url_get

    dom = ET.fromstring(url_get("http://www.thetvdb.com/api/GetSeries.php", params={
        "seriesname": re.sub("\(\w+\)", "", name).strip(),
    }, headers=HEADERS))
    if not len(dom):
        return
    meta = dom2dict(dom[0])
    if not complete:
        return update_image_urls(meta)
    return get(meta["id"])
Esempio n. 29
0
def get(show_id):
    from xbmctorrent.caching import shelf
    with shelf("com.thetvdb.show.%s" % show_id) as show:
        if not show:
            import xml.etree.ElementTree as ET
            from xbmctorrent.utils import url_get

            dom = ET.fromstring(url_get(show_url(show_id), headers=HEADERS, with_immunicity=False))
            if not len(dom):
                return
            meta = dom2dict(dom[0])
            meta = split_keys(meta, "actors", "genre", "writer")
            update_image_urls(meta)
            show.update(meta)
        return dict(show)
Esempio n. 30
0
def get(show_id):
    from xbmctorrent.caching import shelf
    with shelf("com.thetvdb.show.%s" % show_id) as show:
        if not show:
            import xml.etree.ElementTree as ET
            from xbmctorrent.utils import url_get

            dom = ET.fromstring(url_get(show_url(show_id), headers=HEADERS))
            if not len(dom):
                return
            meta = dom2dict(dom[0])
            meta = split_keys(meta, "actors", "genre", "writer")
            update_image_urls(meta)
            show.update(meta)
        return dict(show)
Esempio n. 31
0
def bitsnoop_page(root, page):
    from urlparse import urljoin
    from xbmctorrent.scrapers import rss
    from xbmctorrent.utils import url_get

    content_type = plugin.request.args_dict.get("content_type")
    if content_type:
        plugin.set_content(content_type)

    page = int(page)
    page_data = url_get(urljoin(BASE_URL, "%s/%d/" % (root, page)), headers=HEADERS, params={
        "fmt": "rss",
        "sort": "n_s",
        "dir": "desc",
    })
    return rss.parse(page_data)
Esempio n. 32
0
def torrents3d_resuls(query, page):
    import re
    from bs4 import BeautifulSoup
    from urlparse import urljoin
    from xbmctorrent.utils import url_get

    page = int(page)
    params = {"nm": query, "start": page * PAGE_SIZE}

    html_data = url_get(urljoin(BASE_URL, "tracker.php"),
                        params=params,
                        headers=HEADERS)
    soup = BeautifulSoup(html_data, "html5lib")
    nodes = soup.findAll("tr", id=re.compile("tor_(\d+)"))

    for node in nodes:
        download_node = node.find("a", class_="tr-dl")
        # skip closed
        if not download_node:
            continue

        article_href = node.find("a", class_="genmed")["href"]
        size = download_node.text.replace("&nbsp;", " ").strip()
        title, _, _, seeds, peers, _ = map(lambda x: x.text.strip(),
                                           node.findAll("td")[3:])
        text = "[COLOR FFFFFFCC][%s][/COLOR] %s (%s S:%s P:%s)" % (
            _torrents3d_cleantitle(title) + (size, seeds, peers))
        yield {
            "label":
            text,
            "path":
            plugin.url_for("torrents3d_play",
                           article=re.search(".*\/(\d+)\/",
                                             article_href).group(1)),
            "is_playable":
            True,
        }

    if len(nodes) == PAGE_SIZE:
        next_page = {
            "label": u"[COLOR FFFFFF00][Далее >>][/COLOR]",
            "path": plugin.url_for("torrents3d_resuls",
                                   query=query,
                                   page=page + 1),
            "is_playable": False,
        }
        yield next_page
Esempio n. 33
0
def torrents3d_page(section, page):
    import re
    from bs4 import BeautifulSoup
    from urlparse import urljoin

    page = int(page)
    html_data = url_get(urljoin(BASE_URL, "section/%s/%s/" %
                                (section, PAGE_SIZE * page)),
                        headers=HEADERS)
    soup = BeautifulSoup(html_data, "html5lib")
    regexp = re.compile("tr-(\d+)")
    nodes = soup.findAll("tr", id=regexp)
    print len(nodes)

    for node in nodes:
        tds = node.findAll("td")
        if len(tds) < 5:
            continue
        size, seeds = map(lambda x: x.text.strip(), tds[1:3])
        article_node = tds[0]
        article_href = article_node.find("a", class_="tt-text")
        stereo, title = _torrents3d_cleantitle(article_href.text)
        text = "[%s] %s (%s S:%s)" % (stereo, title, size.replace(
            "&nbsp;", " "), seeds)
        yield {
            "label":
            text,
            "path":
            plugin.url_for("torrents3d_play", article=article_href["id"][3:]),
            "is_playable":
            True,
            "info": {
                "title": title
            }
        }

    if len(nodes) == PAGE_SIZE:
        next_page = {
            "label":
            u"[COLOR FFFFFF00][Далее >>][/COLOR]",
            "path":
            plugin.url_for("torrents3d_page", section=section, page=page + 1),
            "is_playable":
            False,
        }
        yield next_page
Esempio n. 34
0
def extratorrent_page(type_="", cid="", search="", page=1):
    from urlparse import urljoin
    from xbmctorrent.scrapers import rss
    from xbmctorrent.utils import url_get

    content_type = plugin.request.args_dict.pop("content_type", None)
    if content_type:
        plugin.set_content(content_type)

    params = {
        "type": type_,
        "search": search,
        "cid": cid,
    }
    params.update(plugin.request.args_dict)
    page_data = url_get(urljoin(BASE_URL, "/rss.xml"), headers=HEADERS, params=params)
    return rss.parse(page_data, content_type)
Esempio n. 35
0
def from_torrent_url(url):
    import base64
    import bencode
    import hashlib
    import urllib
    from xbmctorrent.utils import url_get
    torrent_data = url_get(url)
    metadata = bencode.bdecode(torrent_data)
    hashcontents = bencode.bencode(metadata['info'])
    digest = hashlib.sha1(hashcontents).digest()
    b32hash = base64.b32encode(digest)
    params = {
        'dn': metadata['info']['name'],
        'tr': metadata['announce'],
    }
    plugin.log.info(params)
    paramstr = urllib.urlencode(params)
    return 'magnet:?%s&%s' % ('xt=urn:btih:%s' % b32hash, paramstr)
Esempio n. 36
0
def from_torrent_url(url):
    import base64
    import bencode
    import hashlib
    import urllib
    from xbmctorrent.utils import url_get
    torrent_data = url_get(url)
    metadata = bencode.bdecode(torrent_data)
    hashcontents = bencode.bencode(metadata['info'])
    digest = hashlib.sha1(hashcontents).digest()
    b32hash = base64.b32encode(digest)
    params = {
        'dn': metadata['info']['name'],
        'tr': metadata['announce'],
    }
    plugin.log.info(params)
    paramstr = urllib.urlencode(params)
    return 'magnet:?%s&%s' % ('xt=urn:btih:%s' % b32hash, paramstr)
def bitsnoop_page(root, page):
    from urlparse import urljoin
    from xbmctorrent.scrapers import rss
    from xbmctorrent.utils import url_get

    content_type = plugin.request.args_dict.get("content_type")
    if content_type:
        plugin.set_content(content_type)

    page = int(page)
    page_data = url_get(urljoin(BASE_URL, "%s/%d/" % (root, page)),
                        headers=HEADERS,
                        params={
                            "fmt": "rss",
                            "sort": "n_s",
                            "dir": "desc",
                        })
    return rss.parse(page_data)
Esempio n. 38
0
def cpb_page(cat, query, sort, page):

    log.debug("THE CAT = " + cat + " & THE Q = " + query)
    if not "cpasbien" in query:
        url_adr = BASE_URL + "/" + cat + "/" + query.replace(
            " ", "+") + "/page-" + str(page) + "," + sort
    else:
        url_adr = BASE_URL + "/view_cat.php?categorie=" + cat + "&page=" + str(
            page) + "&trie=" + sort[5:]
    log.debug("THE URL = " + url_adr)
    html_data = url_get(url_adr)

    soup = BeautifulSoup(html_data, "html5lib")
    name_nodes = soup.findAll("td", "torrent-aff")

    for name_node in name_nodes:
        title = name_node.find("a").text
        html_uri = name_node.find("a")["href"]
        torrent_basename = os.path.basename(html_uri)
        uri_addr = BASE_URL + "/_torrents/" + torrent_basename.replace(
            ".html", ".torrent")
        img_addr = BASE_URL + "/_pictures/" + torrent_basename.replace(
            ".html", ".jpg")
        yield {
            "label": title,
            "path": plugin.url_for("play", uri=uri_addr),
            "is_playable": True,
            "thumbnail": img_addr,
            "properties": {
                "fanart_image": img_addr,
            },
        }
    yield {
        "label":
        ">> Next page",
        "path":
        plugin.url_for("cpb_page",
                       cat=cat,
                       query=query,
                       sort=sort,
                       page=int(page) + 1),
        "is_playable":
        False,
    }
Esempio n. 39
0
def extratorrent_page(type_="", cid="", search="", page=1):
    from urlparse import urljoin
    from xbmctorrent.scrapers import rss
    from xbmctorrent.utils import url_get

    content_type = plugin.request.args_dict.pop("content_type", None)
    if content_type:
        plugin.set_content(content_type)

    params = {
        "type": type_,
        "search": search,
        "cid": cid,
    }
    params.update(plugin.request.args_dict)
    page_data = url_get(urljoin(BASE_URL, "/rss.xml"),
                        headers=HEADERS,
                        params=params)
    return rss.parse(page_data, content_type)
Esempio n. 40
0
def nyaa_page(cats, offset, sort, order, term=""):
    from xbmctorrent.scrapers import rss
    from xbmctorrent.utils import url_get

    offset = int(offset)
    rss_data = url_get(BASE_URL, headers=HEADERS, params={
        "cats": cats,
        "offset": "%d" % offset,
        "sort": sort,
        "order": order,
        "term": term,
        "page": "rss",
    })
    for item in rss.parse(rss_data):
        yield item
    yield {
        "label": ">> Next page",
        "path": plugin.url_for(term and "search_result_page" or "default_nyaa_page", cats=cats, sort=sort, order=order, term=term, offset=offset + 1),
        "is_playable": False,
    }
Esempio n. 41
0
def _torrents3d_login(referer=BASE_URL):
    from urlparse import urljoin

    HEADERS["Referer"] = referer
    HEADERS["Origin"] = BASE_URL
    HEADERS["Content-Type"] = "application/x-www-form-urlencoded"

    values = {
        "login_username": plugin.get_setting("t3d_login"),
        "login_password": plugin.get_setting("t3d_passw"),
        "autologin": 1,
        "login": "******"
    }

    html_data = url_get(urljoin(BASE_URL, "login.php"), post=values, headers=HEADERS)

    HEADERS["Referer"] = BASE_URL
    del HEADERS["Origin"]

    return html_data
Esempio n. 42
0
def _torrents3d_login(referer=BASE_URL):
    from urlparse import urljoin

    HEADERS["Referer"] = referer
    HEADERS["Origin"] = BASE_URL
    HEADERS["Content-Type"] = "application/x-www-form-urlencoded"

    values = {
        "login_username": plugin.get_setting("t3d_login"),
        "login_password": plugin.get_setting("t3d_passw"),
        "autologin": 1,
        "login": "******"
    }

    html_data = url_get(urljoin(BASE_URL, "login.php"),
                        post=values,
                        headers=HEADERS)

    HEADERS["Referer"] = BASE_URL
    del HEADERS["Origin"]

    return html_data
Esempio n. 43
0
def piratebay_record(node):
    import re
    from xbmctorrent.utils import url_get
    from urlparse import urljoin
    from xbmctorrent import tmdb

    node.seeds, node.peers = map(lambda x: x.text, node.parent.parent.findAll("td")[2:])
    node.magnet_node = node.parent.findAll("a")[1]
    node.desc_node = node.parent.findAll("font", "detDesc")[0]
    node.size = re.search("Size (.*?),", node.desc_node.text).group(1)
    node.txt = "%s (%s S:%s P:%s)" % (node.a.text, node.size.replace("&nbsp;", " "), node.seeds, node.peers)

    node.item = {}
    try:
        node.search_result = url_get(urljoin(BASE_URL, node.parent.findAll("a")[0]["href"]), headers=HEADERS)
    except:
        pass
    else:
        if node.search_result:
            try:
                node.imdb_url = re.search("http://www.imdb.com/title/tt[0-9]*", node.search_result).group(0)
            except:
                pass
            else:
                if node.imdb_url:
                    node.imdb_id = re.search(r"(tt\d+)", node.imdb_url).group(0)

                    if node.imdb_id:
                        node.release_tags = tmdb.get_list_item(tmdb.get(node.imdb_id))

                        if node.release_tags:
                            node.item.update(node.release_tags)

    node.item.update({
        "label": node.txt,
        "path": plugin.url_for("play", uri=node.magnet_node["href"]),
        "is_playable": True,
    })
    return node.item
Esempio n. 44
0
def cpb_series():
    url_adr = "%s/series/" % BASE_URL
    html_movies = url_get(url_adr)
    soup = BeautifulSoup(html_movies, "html5lib")
    s2 = soup.find("div", "affichage-sous-categorie").find_all("a")  # ,"affichage-sous-categorie")
    sous_cat = []
    for aa in s2:
        if aa.text != "":
            sous_cat_label = (aa.text).encode("ascii", "replace")
            sous_cat_name = "films"
            sous_cat_query = string.split(aa.get("href"), "/")[-2]
            sous_cat_sort = SORT_DATE + SORT_DIRECTION_DESC
            sous_cat_page = 0
            sous_cat += [
                {
                    "label": sous_cat_label,
                    "path": plugin.url_for(
                        "cpb_page", cat=sous_cat_name, query=sous_cat_query, sort=sous_cat_sort, page=sous_cat_page
                    ),
                }
            ]
    return sous_cat
Esempio n. 45
0
def search(name, complete=False):
    from xbmctorrent.caching import shelf
    import hashlib
    search_hash = hashlib.sha1(name).hexdigest()
    with shelf("com.thetvdb.search.%s" % search_hash) as show:
        if not show:
            import re
            import xml.etree.ElementTree as ET
            from xbmctorrent.utils import url_get
            dom = ET.fromstring(
                url_get("%s/api/GetSeries.php" % BASE_URL,
                        params={
                            "seriesname": name,
                        },
                        headers=HEADERS))
            if not len(dom):
                return
            meta = dom2dict(dom[0])
            if not complete:
                return update_image_urls(meta)
            show.update(get(meta["id"]))
        return show
Esempio n. 46
0
def torrents3d_page(section, page):
    import re
    from bs4 import BeautifulSoup
    from urlparse import urljoin

    page = int(page)
    html_data = url_get(urljoin(BASE_URL, "section/%s/%s/" % (section, PAGE_SIZE * page)), headers=HEADERS)
    soup = BeautifulSoup(html_data, "html5lib")
    regexp = re.compile("tr-(\d+)")
    nodes = soup.findAll("tr", id=regexp)
    print len(nodes)

    for node in nodes:
        tds = node.findAll("td")
        if len(tds) < 5:
            continue
        size, seeds = map(lambda x: x.text.strip(), tds[1:3])
        article_node = tds[0]
        article_href = article_node.find("a", class_="tt-text")
        stereo, title = _torrents3d_cleantitle(article_href.text)
        text = "[%s] %s (%s S:%s)" % (stereo, title, size.replace("&nbsp;", " "), seeds)
        yield {
            "label": text,
            "path": plugin.url_for("torrents3d_play", article=article_href["id"][3:]),
            "is_playable": True,
            "info": {
                "title": title
            }
        }

    if len(nodes) == PAGE_SIZE:
        next_page = {
            "label": u"[COLOR FFFFFF00][Далее >>][/COLOR]",
            "path": plugin.url_for("torrents3d_page", section=section, page=page + 1),
            "is_playable": False,
        }
        yield next_page
Esempio n. 47
0
def torrents3d_resuls(query, page):
    import re
    from bs4 import BeautifulSoup
    from urlparse import urljoin
    from xbmctorrent.utils import url_get

    page = int(page)
    params = {"nm": query, "start": page * PAGE_SIZE}

    html_data = url_get(urljoin(BASE_URL, "tracker.php"), params=params, headers=HEADERS)
    soup = BeautifulSoup(html_data, "html5lib")
    nodes = soup.findAll("tr", id=re.compile("tor_(\d+)"))

    for node in nodes:
        download_node = node.find("a", class_="tr-dl")
        # skip closed
        if not download_node:
            continue

        article_href = node.find("a", class_="genmed")["href"]
        size = download_node.text.replace("&nbsp;", " ").strip()
        title, _, _, seeds, peers, _ = map(lambda x: x.text.strip(), node.findAll("td")[3:])
        text = "[COLOR FFFFFFCC][%s][/COLOR] %s (%s S:%s P:%s)" % (_torrents3d_cleantitle(title) + (size, seeds, peers))
        yield {
            "label": text,
            "path": plugin.url_for("torrents3d_play", article=re.search(".*\/(\d+)\/", article_href).group(1)),
            "is_playable": True,
        }

    if len(nodes) == PAGE_SIZE:
        next_page = {
            "label": u"[COLOR FFFFFF00][Далее >>][/COLOR]",
            "path": plugin.url_for("torrents3d_resuls", query=query, page=page + 1),
            "is_playable": False,
        }
        yield next_page
Esempio n. 48
0
def get_torrent_info(url):
    from bs4 import BeautifulSoup
    from xbmctorrent.utils import first, url_get

    response = url_get(url)
    soup = BeautifulSoup(response, "html5lib")
    movie = first(
        soup.select(
            'td[itemtype="http://schema.org/Product"] div.font11px a[href="/movies/"]'
        )) is not None
    serie = first(
        soup.select(
            'td[itemtype="http://schema.org/Product"] div.font11px a[href="/tv/"]'
        )) is not None

    def itemprop(prop, default=None):
        node = first(soup.select('[itemprop="%s"]' % prop))
        return node and node.text or default

    def itemprops(prop, default=[]):
        nodes = soup.select('[itemprop="%s"]' % prop)
        return [node.text for node in nodes] or default

    return {
        "movie": movie and not serie,
        "serie": serie and not movie,
        "seeders": itemprop("seeders"),
        "leechers": itemprop("leechers"),
        "title": itemprop("name"),
        "quality": itemprop("quality"),
        "genres": itemprops("genre"),
        "magnet_url": first(soup.select(".magnetlinkButton"))["href"],
        "poster": first(soup.select('a.movieCover img'), {}).get("href"),
        "cast": [cast.text for cast in soup.select('[itemprop="name"] a')],
        "imdb_id": find_imdb_id(soup),
    }
Esempio n. 49
0
def get_magnet(url):
    from bencode import bdecode
    from xbmctorrent.utils import url_get

    return generate_magnet(bdecode(url_get(url)))
Esempio n. 50
0
def torrents3d_play(article):
    import re, xbmcgui
    from bs4 import BeautifulSoup
    from contextlib import closing
    from urlparse import urljoin
    from xbmctorrent.magnet import generate_magnet
    from xbmctorrent.utils import SafeDialogProgress

    article = int(article)

    with closing(SafeDialogProgress(delay_close=0)) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0,
                      line1=u"Получение информации о релизе...",
                      line2="",
                      line3="")

        url = urljoin(BASE_URL, "article/%d" % article)

        try:
            html_data = url_get(url, headers=HEADERS)
            soup = BeautifulSoup(html_data, "html5lib")
            torrent_href = soup.find("a", class_="genmed")

            if not torrent_href:
                dialog.update(percent=50,
                              line2=u"Требуется авторизация. Авторизация...")

                if not plugin.get_setting(
                        "t3d_login") and not plugin.get_setting("t3d_passwd"):
                    plugin.notify("Проверьте настройки авторизации.",
                                  delay=15000)
                    return

                html_data = _torrents3d_login(url)
                soup = BeautifulSoup(html_data, "html5lib")
                torrent_href = soup.find("a", class_="genmed")

            if not torrent_href:
                xbmcgui.Dialog().ok(
                    plugin.name,
                    "Авторизация неудалась. Проверьте настройки авторизации.")
                return

            dialog.update(percent=100, line2=u"Обработка данных.")

            from bencode import bencode, bdecode

            title = "[%s] %s" % _torrents3d_cleantitle(
                soup.find("a", class_="tt-text").text)
            torrent_data = url_get(torrent_href["href"], headers=HEADERS)
            metadata = bdecode(torrent_data)

            plugin.redirect(
                plugin.url_for("play",
                               uri=generate_magnet(metadata, uenc(title))))

        except:
            plugin.log.error("Cannot get data from remote server")
            xbmcgui.Dialog().ok(plugin.name,
                                u"Не удалось получить данные от сервера")
            return
Esempio n. 51
0
 def proc_close():
     if not proc.poll():
         url_get("http://%s/shutdown" % proc.bind_address)
Esempio n. 52
0
 def _eztv_get_show():
     plugin.log.info("Getting show")
     response = url_get("%s/shows/%s/" % (BASE_URL, show_id), headers=HEADERS)
     plugin.log.info("Got show")
     return BeautifulSoup(response, "html5lib")
Esempio n. 53
0
 def _eztv_get_show():
     return BeautifulSoup(url_get("%s/shows/%s/" % (BASE_URL, show_id), headers=HEADERS), "html5lib")
Esempio n. 54
0
def eztv_shows_by_letter(letter):
    import re
    import xbmc
    import xbmcgui
    from bs4 import BeautifulSoup
    from contextlib import nested, closing
    from itertools import izip, groupby
    from concurrent import futures
    from xbmctorrent.scrapers import ungenerate
    from xbmctorrent.utils import terminating, url_get, SafeDialogProgress
    from xbmctorrent import tvdb

    with shelf("it.eztv.shows") as eztv_shows:
        if not eztv_shows:
            response = url_get("%s/showlist/" % BASE_URL, headers=HEADERS)
            soup = BeautifulSoup(response, "html5lib")
            nodes = soup.findAll("a", "thread_link")
            for node in nodes:
                show_id, show_named_id = node["href"].split("/")[2:4]
                show_name = node.text
                show_first_letter = show_name[0].lower()
                if re.match("\d+", show_first_letter):
                    show_first_letter = "0-9"
                eztv_shows.setdefault(show_first_letter, {}).update({
                    show_id: {
                        "id": show_id,
                        "named_id": show_named_id,
                        "name": node.text,
                    }
                })

    shows_list = sorted(eztv_shows[letter.lower()].values(), key=lambda x: x["name"].lower())

    with closing(SafeDialogProgress(delay_close=0)) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0, line1="Fetching serie information...", line2="", line3="")

        state = {"done": 0}
        def on_serie(future):
            data = future.result()
            state["done"] += 1
            dialog.update(
                percent=int(state["done"] * 100.0 / len(shows_list)),
                line2=data and data["seriesname"] or "",
            )

        with futures.ThreadPoolExecutor(max_workers=5) as pool_tvdb:
            tvdb_list = [pool_tvdb.submit(tvdb.search, show["name"], True) for show in shows_list]
            [future.add_done_callback(on_serie) for future in tvdb_list]
            while not all(job.done() for job in tvdb_list):
                if dialog.iscanceled():
                    return
                xbmc.sleep(100)

    tvdb_list = [job.result() for job in tvdb_list]
    for i, (eztv_show, tvdb_show) in enumerate(izip(shows_list, tvdb_list)):
        if tvdb_show:
            item = tvdb.get_list_item(tvdb_show)
            item.update({
                "path": plugin.url_for("eztv_get_show_seasons", show_id=eztv_show["id"], tvdb_id=tvdb_show["id"])
            })
            yield item
        else:
            yield {
                "label": eztv_show["name"],
                "path": plugin.url_for("eztv_get_show_seasons", show_id=eztv_show["id"])
            }
Esempio n. 55
0
 def _eztv_get_show():
     plugin.log.info("Getting show")
     response = url_get("%s/shows/%s/" % (BASE_URL, show_id),
                        headers=HEADERS)
     plugin.log.info("Got show")
     return response
Esempio n. 56
0
 def _eztv_get_show():
     return url_get("%s/shows/%s/" % (BASE_URL, show_id),
                    headers=HEADERS)
Esempio n. 57
0
def get_magnet(url):
    from bencode import bdecode
    from xbmctorrent.utils import url_get

    return generate_magnet(bdecode(url_get(url, headers=HEADERS)))
Esempio n. 58
0
def kickass_page(root, page):
    import re
    import xbmc
    import xbmcgui
    from bs4 import BeautifulSoup
    from contextlib import closing
    from concurrent import futures
    from urlparse import urljoin
    from xbmctorrent import tmdb
    from xbmctorrent.utils import first, terminating, url_get, get_quality_from_name

    content_type = first(plugin.request.args.get("content_type")) or None
    if content_type:
        plugin.set_content(content_type)

    page = int(page)
    page_data = url_get(urljoin(BASE_URL, "%s/%d" % (root, page)))
    soup = BeautifulSoup(page_data, "html5lib")
    torrent_nodes = [
        node.parent for node in soup.findAll("td", "torrentnameCell")
    ]

    def _get_torrent_info_with_meta(url):
        torrent_info = get_torrent_info(url)
        if torrent_info["movie"] and torrent_info["imdb_id"]:
            torrent_info["tmdb"] = tmdb.get(torrent_info["imdb_id"])
        return torrent_info

    with closing(xbmcgui.DialogProgress()) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0,
                      line1="Fetching torrent information...",
                      line2="",
                      line3="")

        state = {"done": 0}

        def on_torrent(future):
            data = future.result()
            state["done"] += 1
            dialog.update(
                percent=int(state["done"] * 100.0 / len(torrent_nodes)),
                line2=data.get("title") or "",
            )

        with futures.ThreadPoolExecutor(max_workers=3) as pool:
            movies = []
            for node in torrent_nodes:
                info_nodes = node.findAll("td")
                a_node = first(info_nodes[0].select("div.torrentname a.plain"))
                movies.append(
                    pool.submit(_get_torrent_info_with_meta,
                                urljoin(BASE_URL, a_node["href"])))
            [future.add_done_callback(on_torrent) for future in movies]
            while not all(job.done() for job in movies):
                if dialog.iscanceled():
                    return
                xbmc.sleep(100)

    movies = [movie.result() for movie in movies]
    for i, movie in enumerate(movies):
        if movie.get("tmdb"):
            item = tmdb.get_list_item(movie["tmdb"])
            proper_kat_title = re.sub("\s+", " ",
                                      re.sub(r"[().-]", " ", movie["title"]))
            item["label"] = "%s (%s)" % (item["label"],
                                         re.sub(r"(?i)%s" % item["label"], "",
                                                proper_kat_title).strip())
        else:
            item = {
                "label": movie["title"],
                "info": {
                    "genre": ", ".join(movie["genres"]),
                }
            }
        lower_title = movie["title"].lower()
        if "x264" in lower_title:
            item.setdefault("stream_info", {})["codec"] = item.setdefault(
                "info", {})["video_codec"] = "h264"
        if "xvid" in lower_title:
            item.setdefault("stream_info", {})["codec"] = item.setdefault(
                "info", {})["video_codec"] = "xvid"
        if "720p" in lower_title:
            item.setdefault("stream_info", {}).update({
                "width": 1280,
                "height": 720,
            })
        if "1080p" in lower_title:
            item.setdefault("stream_info", {}).update({
                "width": 1920,
                "height": 1080,
            })
        item.update({
            "path": plugin.url_for("play", magnet=movie["magnet_url"]),
            "is_playable": True,
        })
        item.setdefault("info", {}).update({
            "count":
            i,
            "genre":
            "%s (S:%s P:%s)" % (item.get("info", {}).get("genre")
                                or "", movie["seeders"], movie["leechers"]),
        })
        item.setdefault("stream_info", {}).update({
            "video":
            get_quality_from_name(movie["title"]),
        })
        yield item
    yield {
        "label":
        "Next page...",
        "path":
        plugin.url_for("kickass_page",
                       root=root,
                       page=page + 1,
                       content_type=content_type),
    }