def popcorn_index(): import json from stream.utils import url_get with shelf("popcorn.shows") as popcorn_shows: #print popcorn_shows if not popcorn_shows: pages = json.loads( url_get("%sshows" % BASE_URL, headers=HEADERS) ) popcorn_shows['shows'] = [] for page in pages: print "%s%s" % (BASE_URL,page) response = url_get("%s%s" % (BASE_URL,page), headers=HEADERS) if response=="": continue page_shows = json.loads(response) for show in page_shows: popcorn_shows['shows'].append( { "label": show['title'], "icon": show['images']['poster'], "thumbnail": show['images']['poster'], "path": plugin.url_for("popcorn_get_show", show_id=show['_id'] ), "is_playable":False, 'properties': {'fanart_image': show['images']['fanart'] } } ) #print popcorn_shows return popcorn_shows['shows']
def popcorn_get_show(show_id): import json from stream.utils import url_get response = url_get("%sshow/%s" % (BASE_URL,show_id), headers=HEADERS) show = json.loads(response) #print show items = [] for episode in show['episodes']: item={} item["info"]={} item["info"]["tvshowtitle"]= show['title'] item["info"]["title"]= episode["title"] item["info"]["season"]= episode["season"] item["info"]["episode"]= episode["episode"] item["info"]["premiered"]= episode["first_aired"] item["icon"]= show['images']['poster'] item["thumbnail"]= show['images']['poster'] item['label'] = "S%02dE%02d %s" % (episode['season'], episode['episode'], episode["title"]) item["info"]['orden'] = 100 * episode['season'] + episode['episode'] item["path"]= plugin.url_for("play", uri=episode['torrents']['0']['url']) item["stream_info"]= {"video": {"video": 'x264'} } item["is_playable"]= True item['properties'] = {} item['properties']['fanart_image'] = show['images']['fanart'] items.append(item) return sorted(items, key=lambda a: a['info']['orden'] )
def mejorenvo_index(): print "mejor en VO!" plugin.set_content("movies") from bs4 import BeautifulSoup from stream.utils import url_get from stream import tmdb import re def filter_Desgargar(el): return el.has_attr('href') and '-pelicula-' in el['href'] and not 'comentarios' in el['href'] for url in ["http://www.mejorenvo.com/descargar-peliculas.html","http://www.mejorenvo.com/peliculas-p2.html"]: html_data = url_get( url, headers=HEADERS) soup = BeautifulSoup(html_data, "html5lib") nodes = soup.findAll(filter_Desgargar) for node in nodes: id = re.search(r'pelicula-(\d+).html', node['href']).group(1) torrent_url = 'http://www.mejorenvo.com/descargar.php?t=peliculas&id=' + id + '&torrent=1' title_year = node.get_text() title = node.stripped_strings.next() info = tmdb.search( title ) info = info['results'][0] print title print info item = tmdb.get_list_item(info) item['label'] = '%s (%f)' % (info['title'], info['vote_average']) item['path'] = plugin.url_for("play", uri=torrent_url) item['is_playable']=True yield item
def piratebay_page(root, page): import re from bs4 import BeautifulSoup from urlparse import urljoin from stream.utils import url_get page = int(page) html_data = url_get(urljoin(BASE_URL, "%s/%d/7/100,200,500" % (root, page)), headers=HEADERS) soup = BeautifulSoup(html_data, "html5lib") nodes = soup.findAll("div", "detName") for node in nodes: seeds, peers = map(lambda x: x.text, node.parent.parent.findAll("td")[2:]) magnet_node = node.parent.findAll("a")[1] desc_node = node.parent.findAll("font", "detDesc")[0] size = re.search("Size (.*?),", desc_node.text).group(1) text = "%s (%s S:%s P:%s)" % (node.a.text, size.replace(" ", " "), seeds, peers) yield { "label": text, "path": plugin.url_for("play", uri=magnet_node["href"]), "is_playable": True, } yield { "label": ">> Next page", "path": plugin.url_for("piratebay_page", root=root, page=page + 1), "is_playable": False, }
def _get_all_meta(): r = url_get("%s/all/%s.xml" % (show_base_url(show_id), LANG), headers=HEADERS, with_immunicity=False) dom = ET.fromstring(r) if not len(dom): return return update_image_urls(dom2dict(dom))
def get_banners(show_id): import xml.etree.ElementTree as ET from stream.utils import url_get r = url_get("%s/banners.xml" % show_base_url(show_id), headers=HEADERS, with_immunicity=False) dom = ET.fromstring(r) if not len(dom): return return update_image_urls(dom2dict(dom))["banner"]
def piratebay_page(root, page): import xbmc import xbmcgui from concurrent import futures from contextlib import nested, closing from bs4 import BeautifulSoup from urlparse import urljoin from stream.utils import url_get from itertools import izip, chain from stream.utils import url_get_json, terminating, SafeDialogProgress with closing(SafeDialogProgress(delay_close=0)) as dialog: dialog.create(plugin.name) dialog.update(percent=0, line1="Fetching index page...", line2="", line3="") page = int(page) try: html_data = url_get(urljoin(BASE_URL, "%s/%d/7/100,200,500" % (root, page)), headers=HEADERS) except: dialog = xbmcgui.Dialog() dialog.ok("Piratebay","Timeout") return soup = BeautifulSoup(html_data, "html5lib") nodes = soup.findAll("div", "detName") state = {"done": 0} def on_movie(future): data = future.result() state["done"] += 1 dialog.update( percent=int(state["done"] * 100.0 / len(nodes)), line2=data.get("label") or "", ) dialog.update(percent=0, line1="Fetching movie information...", line2="", line3="") with futures.ThreadPoolExecutor(max_workers=10) as pool_tmdb: tmdb_list = [pool_tmdb.submit(piratebay_record, node) for node in nodes] [future.add_done_callback(on_movie) for future in tmdb_list] while not all(job.done() for job in tmdb_list): if dialog.iscanceled(): return xbmc.sleep(200) tmdb_list = map(lambda job: job.result(), tmdb_list) for node, item in izip(nodes, tmdb_list): yield node.item yield { "label": ">> Next page", "path": plugin.url_for("piratebay_page", root=root, page=page + 1), "is_playable": False, }
def espoiler_play( url ): print "espoiler_play %s" % url html_data = url_get( url, headers=HEADERS) soup = BeautifulSoup(html_data, "html5lib") def filter_Magnet(el): return el.has_attr('href') and 'magnet:' in el['href'] nodes = soup.findAll(filter_Magnet) for node in nodes: yield { 'label': '%s' % node['href'], 'path': plugin.url_for("play", uri=node['href']), 'is_playable': True }
def get(show_id): from stream.caching import shelf with shelf("com.thetvdb.show.%s" % show_id) as show: if not show: import xml.etree.ElementTree as ET from stream.utils import url_get dom = ET.fromstring(url_get(show_url(show_id), headers=HEADERS, with_immunicity=False)) if not len(dom): return meta = dom2dict(dom[0]) meta = split_keys(meta, "actors", "genre", "writer") update_image_urls(meta) show.update(meta) return dict(show)
def get(show_id): from stream.caching import shelf with shelf("com.thetvdb.show.%s" % show_id) as show: if not show: import xml.etree.ElementTree as ET from stream.utils import url_get dom = ET.fromstring( url_get(show_url(show_id), headers=HEADERS, with_immunicity=False)) if not len(dom): return meta = dom2dict(dom[0]) meta = split_keys(meta, "actors", "genre", "writer") update_image_urls(meta) show.update(meta) return dict(show)
def from_torrent_url(url): import base64 import bencode import hashlib import urllib from stream.utils import url_get torrent_data = url_get(url) metadata = bencode.bdecode(torrent_data) hashcontents = bencode.bencode(metadata['info']) digest = hashlib.sha1(hashcontents).digest() b32hash = base64.b32encode(digest) params = { 'dn': metadata['info']['name'], 'tr': metadata['announce'], } plugin.log.info(params) paramstr = urllib.urlencode(params) return 'magnet:?%s&%s' % ('xt=urn:btih:%s' % b32hash, paramstr)
def search(name, complete=False): from stream.caching import shelf import hashlib search_hash = hashlib.sha1(name).hexdigest() with shelf("com.thetvdb.search.%s" % search_hash) as show: if not show: import re import xml.etree.ElementTree as ET from stream.utils import url_get dom = ET.fromstring(url_get("%s/api/GetSeries.php" % BASE_URL, params={ "seriesname": name, }, headers=HEADERS, with_immunicity=False)) if not len(dom): return meta = dom2dict(dom[0]) if not complete: return update_image_urls(meta) show.update(get(meta["id"])) return show
def piratebay_record(node): import re from stream.utils import url_get from urlparse import urljoin from stream import tmdb node.seeds, node.peers = map(lambda x: x.text, node.parent.parent.findAll("td")[2:]) node.magnet_node = node.parent.findAll("a")[1] node.desc_node = node.parent.findAll("font", "detDesc")[0] node.size = re.search("Size (.*?),", node.desc_node.text).group(1) node.txt = "%s (%s S:%s P:%s)" % (node.a.text, node.size.replace(" ", " "), node.seeds, node.peers) node.item = {} try: node.search_result = url_get(urljoin(BASE_URL, node.parent.findAll("a")[0]["href"]), headers=HEADERS) except: pass else: if node.search_result: try: node.imdb_url = re.search("http://www.imdb.com/title/tt[0-9]*", node.search_result).group(0) except: pass else: if node.imdb_url: node.imdb_id = re.search(r"(tt\d+)", node.imdb_url).group(0) if node.imdb_id: node.release_tags = tmdb.get_list_item(tmdb.get(node.imdb_id)) if node.release_tags: node.item.update(node.release_tags) node.item.update({ "label": node.txt, "path": plugin.url_for("play", uri=node.magnet_node["href"]), "is_playable": True, }) return node.item
def search(name, complete=False): from stream.caching import shelf import hashlib search_hash = hashlib.sha1(name).hexdigest() with shelf("com.thetvdb.search.%s" % search_hash) as show: if not show: import re import xml.etree.ElementTree as ET from stream.utils import url_get dom = ET.fromstring( url_get("%s/api/GetSeries.php" % BASE_URL, params={ "seriesname": name, }, headers=HEADERS, with_immunicity=False)) if not len(dom): return meta = dom2dict(dom[0]) if not complete: return update_image_urls(meta) show.update(get(meta["id"])) return show
def eztv_shows_by_letter(letter): import re import xbmc import xbmcgui from bs4 import BeautifulSoup from contextlib import nested, closing from itertools import izip, groupby from concurrent import futures from stream.scrapers import ungenerate from stream.utils import terminating, url_get, SafeDialogProgress from stream import tvdb with shelf("it.eztv.shows") as eztv_shows: if not eztv_shows: response = url_get("%s/showlist/" % BASE_URL, headers=HEADERS) soup = BeautifulSoup(response, "html5lib") nodes = soup.findAll("a", "thread_link") for node in nodes: show_id, show_named_id = node["href"].split("/")[2:4] show_name = node.text if show_name == "": continue print "#%s %s" , (show_id,show_name) show_first_letter = show_name[0].lower() if re.match("\d+", show_first_letter): show_first_letter = "0-9" eztv_shows.setdefault(show_first_letter, {}).update({ show_id: { "id": show_id, "named_id": show_named_id, "name": node.text, } }) shows_list = sorted(eztv_shows[letter.lower()].values(), key=lambda x: x["name"].lower()) with closing(SafeDialogProgress(delay_close=0)) as dialog: dialog.create(plugin.name) dialog.update(percent=0, line1="Fetching series...", line2="", line3="") state = {"done": 0} def on_serie(future): data = future.result() state["done"] += 1 dialog.update( percent=int(state["done"] * 100.0 / len(shows_list)), line2=data and data["seriesname"] or "", ) with futures.ThreadPoolExecutor(max_workers=5) as pool_tvdb: tvdb_list = [pool_tvdb.submit(tvdb.search, show["name"], True) for show in shows_list] [future.add_done_callback(on_serie) for future in tvdb_list] while not all(job.done() for job in tvdb_list): if dialog.iscanceled(): return xbmc.sleep(100) print shows_list tvdb_list_new = [] for job in tvdb_list: try: result = job.result() print "RESULT: (%s)" % result if result is not None: tvdb_list_new.append(result) except: None tvdb_list = tvdb_list_new #tvdb_list = [job.result() for job in tvdb_list] print tvdb_list for i, (eztv_show, tvdb_show) in enumerate(izip(shows_list, tvdb_list)): if tvdb_show: item = tvdb.get_list_item(tvdb_show) item.update({ "path": plugin.url_for("eztv_get_show_seasons", show_id=eztv_show["id"], tvdb_id=tvdb_show["id"]) }) yield item else: yield { "label": eztv_show["name"], "path": plugin.url_for("eztv_get_show_seasons", show_id=eztv_show["id"]) }
def proc_close(): if not proc.poll(): url_get("http://%s/shutdown" % proc.bind_address, with_immunicity=False)
def _eztv_get_show(): plugin.log.info("Getting show") response = url_get("%s/shows/%s/" % (BASE_URL, show_id), headers=HEADERS) plugin.log.info("Got show") return BeautifulSoup(response, "html5lib")
def _eztv_get_show(): return BeautifulSoup(url_get("%s/shows/%s/" % (BASE_URL, show_id), headers=HEADERS), "html5lib")
def piratebay_page2(root, page): import re from bs4 import BeautifulSoup from urlparse import urljoin from stream.utils import url_get from stream import tmdb page = int(page) html_data = url_get(urljoin(BASE_URL, "search/BluRay/%s/7/%s" % (page, root)), headers=HEADERS) soup = BeautifulSoup(html_data, "html5lib") nodes = soup.findAll("div", "detName") for node in nodes: seeds, peers = map(lambda x: x.text, node.parent.parent.findAll("td")[2:]) magnet_node = node.parent.findAll("a")[1] desc_node = node.parent.findAll("font", "detDesc")[0] size = re.search("Size (.*?),", desc_node.text).group(1) text = "%s (%s S:%s P:%s)" % (node.a.text, size.replace(" ", " "), seeds, peers) try: title = re.search("(.*?)(\d{4}|720p|1080p|\()", node.a.text).group(1).replace(".", " ") tmdb_list = tmdb.search(title) if len(tmdb_list["results"]) > 0: tmdb_meta = tmdb.get(tmdb_list["results"][0]["id"]) item = tmdb.get_list_item(tmdb_meta) width = 1920 height = 1080 if node.a.text.find("720p") >= 0: width = 1280 height = 720 item.update({ "path": plugin.url_for("play", uri=magnet_node["href"]), "is_playable": True, "label": "%s %sp (%s) S:%s P:%s" % (item["label"], height, size.replace(" ", " "), seeds, peers), }) audio = "aac" if node.a.text.find("DTS") >= 0: audio = "dts" item.setdefault("stream_info", {}).update({ "video": { "codec": "h264", "width": width, "height": height, }, "audio": { "codec": audio, }, }) yield item else: yield { "label": text, "path": plugin.url_for("play", uri=magnet_node["href"]), "is_playable": True, } except Exception as e: print e yield { "label": text, "path": plugin.url_for("play", uri=magnet_node["href"]), "is_playable": True, } yield { "label": ">> Next page", "path": plugin.url_for("piratebay_page2", root=root, page=page + 1), "is_playable": False, }
def eztv_shows_by_letter(letter): import re import xbmc import xbmcgui from bs4 import BeautifulSoup from contextlib import nested, closing from itertools import izip, groupby from concurrent import futures from stream.scrapers import ungenerate from stream.utils import terminating, url_get, SafeDialogProgress from stream import tvdb with shelf("it.eztv.shows") as eztv_shows: if not eztv_shows: response = url_get("%s/showlist/" % BASE_URL, headers=HEADERS) soup = BeautifulSoup(response, "html5lib") nodes = soup.findAll("a", "thread_link") for node in nodes: show_id, show_named_id = node["href"].split("/")[2:4] show_name = node.text show_first_letter = show_name[0].lower() if re.match("\d+", show_first_letter): show_first_letter = "0-9" eztv_shows.setdefault(show_first_letter, {}).update({ show_id: { "id": show_id, "named_id": show_named_id, "name": node.text, } }) shows_list = sorted(eztv_shows[letter.lower()].values(), key=lambda x: x["name"].lower()) with closing(SafeDialogProgress(delay_close=0)) as dialog: dialog.create(plugin.name) dialog.update(percent=0, line1="Fetching series...", line2="", line3="") state = {"done": 0} def on_serie(future): data = future.result() state["done"] += 1 dialog.update( percent=int(state["done"] * 100.0 / len(shows_list)), line2=data and data["seriesname"] or "", ) with futures.ThreadPoolExecutor(max_workers=5) as pool_tvdb: tvdb_list = [pool_tvdb.submit(tvdb.search, show["name"], True) for show in shows_list] [future.add_done_callback(on_serie) for future in tvdb_list] while not all(job.done() for job in tvdb_list): if dialog.iscanceled(): return xbmc.sleep(100) tvdb_list = [job.result() for job in tvdb_list] for i, (eztv_show, tvdb_show) in enumerate(izip(shows_list, tvdb_list)): if tvdb_show: item = tvdb.get_list_item(tvdb_show) item.update({ "path": plugin.url_for("eztv_get_show_seasons", show_id=eztv_show["id"], tvdb_id=tvdb_show["id"]) }) yield item else: yield { "label": eztv_show["name"], "path": plugin.url_for("eztv_get_show_seasons", show_id=eztv_show["id"]) }