def __init__(self, url, path, filename=None, headers=[], resume=True, max_connections=10, block_size=2 ** 17, part_size=2 ** 24, max_buffer=10, json_path=None): # Parameters self._resume = resume self._path = path self._filename = filename self._max_connections = max_connections self._block_size = block_size self._part_size = part_size self._max_buffer = max_buffer self._json_path = json_path self._json_text = '' self._json_item = Item() try: import xbmc self.tmp_path = xbmc.translatePath("special://temp/") except: self.tmp_path = os.getenv("TEMP") or os.getenv("TMP") or os.getenv("TMPDIR") self.states = type('states', (), {"stopped": 0, "connecting": 1, "downloading": 2, "completed": 3, "error": 4, "saving": 5}) self._state = self.states.stopped self._download_lock = Lock() self._headers = {"User-Agent": "Kodi/15.2 (Windows NT 10.0; WOW64) App_Bitness/32 Version/15.2-Git:20151019-02e7013"} self._speed = 0 self._buffer = {} self._seekable = True self._threads = [Thread(target=self.__start_part__, name="Downloader %s/%s" % (x + 1, self._max_connections)) for x in range(self._max_connections)] self._speed_thread = Thread(target=self.__speed_metter__, name="Speed Meter") self._save_thread = Thread(target=self.__save_file__, name="File Writer") # We update the headers self._headers.update(dict(headers)) # We separate the headers from the url self.__url_to_headers__(url) # We get the server info self.__get_download_headers__() self._file_size = int(self.response_headers.get("content-length", "0")) if not self.response_headers.get("accept-ranges") == "bytes" or self._file_size == 0: self._max_connections = 1 self._part_size = 0 self._resume = False # We get the file name self.__get_download_filename__() # We open in "a+" mode to create the file if it does not exist, then in "r + b" mode to be able to do seek () self.file = filetools.file_open(filetools.join(self._path, self._filename), "a+", vfs=VFS) if self.file: self.file.close() self.file = filetools.file_open(filetools.join(self._path, self._filename), "r+b", vfs=VFS) if not self.file: return if self._file_size >= 2 ** 31 or not self._file_size: try: self.file.seek(2 ** 31, 0) except OverflowError: self._seekable = False logger.info("Cannot do seek() or tell() in files larger than 2GB") self.__get_download_info__() try: logger.info("Download started: Parts: %s | Path: %s | File: %s | Size: %s" % (str(len(self._download_info["parts"])), self._pathencode('utf-8'), self._filenameencode('utf-8'), str(self._download_info["size"]))) except: pass
def getchanneltypes(preferred_thumb=""): logger.info("channelselector getchanneltypes") # Lista de categorias valid_types = [ "movie","serie","anime","documentary","vos","torrent","latino","adult","deportes"] # Lee la lista de canales channel_path = os.path.join( config.get_runtime_path() , "channels" , '*.xml' ) logger.info("channelselector.getchanneltypes channel_path="+channel_path) channel_files = glob.glob(channel_path) channel_language = config.get_setting("channel_language") logger.info("channelselector.getchanneltypes channel_language="+channel_language) # Construye la lista de tipos channel_types = [] for index, channel in enumerate(channel_files): logger.info("channelselector.getchanneltypes channel="+channel) if channel.endswith(".xml"): try: channel_parameters = channeltools.get_channel_parameters(channel[:-4]) logger.info("channelselector.filterchannels channel_parameters="+repr(channel_parameters)) # Si es un canal para adultos y el modo adulto está desactivado, se lo salta if channel_parameters["adult"]=="true" and config.get_setting("adult_mode")=="false": continue # Si el canal está en un idioma filtrado if channel_language!="all" and channel_parameters["language"]!=channel_language: continue categories = channel_parameters["categories"] for category in categories: logger.info("channelselector.filterchannels category="+category) if category not in channel_types and category in valid_types: channel_types.append(category) except: logger.info("Se ha producido un error al leer los datos del canal " + channel + traceback.format_exc()) logger.info("channelselector.getchanneltypes Encontrados:") for channel_type in channel_types: logger.info("channelselector.getchanneltypes channel_type="+channel_type) # Ahora construye el itemlist ordenadamente itemlist = [] dict_cat_lang = {'movie': config.get_localized_string(30122), 'serie': config.get_localized_string(30123), 'anime': config.get_localized_string(30124), 'documentary': config.get_localized_string(30125), 'vos': config.get_localized_string(30136), 'adult': config.get_localized_string(30126), 'latino': config.get_localized_string(30127), 'deportes' : 'Deportes'} itemlist.append(Item(title=config.get_localized_string(30121), channel="channelselector", action="listchannels", category="all", thumbnail=urlparse.urljoin(get_thumbnail_path(preferred_thumb), "thumb_canales_todos.png"))) logger.info("channelselector.getchanneltypes Ordenados:") for channel_type in valid_types: logger.info("channelselector.getchanneltypes channel_type="+channel_type) if channel_type in channel_types: title = channel_type if channel_type in dict_cat_lang: title = dict_cat_lang[channel_type] if title == "Deportes": thumbnail = "http://s6.postimg.org/5nlt6dk7l/deporteslogo.png" else : thumbnail = urlparse.urljoin(get_thumbnail_path(preferred_thumb), "thumb_canales_"+channel_type+".png") itemlist.append(Item(title=title, channel="channelselector", action="listchannels", category=channel_type, thumbnail=thumbnail)) return itemlist
def search(item, texto): logger.info("[filesmonster_catalogue.py] search") itemlist = [] cuantos_videos = 0 # Descarga la pagina con (plot es la pagina) if (item.plot != ''): pagina = item.plot if (item.plot == ""): pagina = "1" url = "http://filesmonster.filesdl.net/search.php?q=" + texto + "&page=" + pagina data1 = scrapertools.downloadpageGzip(url) pagina = int(pagina) pagina = pagina + 1 pagina = str(pagina) url = "http://filesmonster.filesdl.net/search.php?q=" + texto + "&page=" + pagina data2 = scrapertools.downloadpageGzip(url) pagina = int(pagina) pagina = pagina + 1 pagina = str(pagina) url = "http://filesmonster.filesdl.net/search.php?q=" + texto + "&page=" + pagina data3 = scrapertools.downloadpageGzip(url) pagina = int(pagina) pagina = pagina + 1 pagina = str(pagina) url = "http://filesmonster.filesdl.net/search.php?q=" + texto + "&page=" + pagina data4 = scrapertools.downloadpageGzip(url) pagina = int(pagina) pagina = pagina + 1 pagina = str(pagina) url = "http://filesmonster.filesdl.net/search.php?q=" + texto + "&page=" + pagina data5 = scrapertools.downloadpageGzip(url) data = data1 + data2 + data3 + data4 + data5 # Extrae las entradas (carpetas) #patronvideos ='<h1 class="product_title"><a href="([^"]+)">([^<]+)</a>.*?<img src="([^"]+)"' patronvideos = '<div class="panel-heading">.*?<a href="([^"]+)">([^<]+).*?</a>.*?<img src="([^"]+)"' matches = re.compile(patronvideos, re.DOTALL).findall(data) for match in matches: scrapedtitle = match[1] scrapedtitle = scrapedtitle.replace("–", "-") scrapedtitle = scrapedtitle.replace("’", "'") scrapedtitle = scrapedtitle.strip() scrapedurl = match[0] scrapedthumbnail = match[2] imagen = "" scrapedplot = match[0] tipo = match[2] if (DEBUG): logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") scrapedplot = strip_tags(scrapedplot) if (scrapedtitle != "All" and scrapedthumbnail != "http://filesmonster.filesdl.net/img/pornbutton.gif"): itemlist.append( Item(channel=__channel__, action="detail", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True)) if (scrapedtitle != "All" and scrapedthumbnail != "http://filesmonster.filesdl.net/img/pornbutton.gif"): cuantos_videos = cuantos_videos + 1 #creamos los enlaces a las páginas consecutivas if (item.plot != ""): plot = int(item.plot) if (item.plot == ""): plot = 1 pagina = plot + 5 pagina_despues = str(pagina + 4) pagina = str(pagina) if (cuantos_videos == 25): itemlist.append( Item(channel=__channel__, action="lista", title=">> siguientes (páginas " + pagina + " a " + pagina_despues + ")", url=texto, thumbnail="", plot=pagina, folder=True)) itemlist.append( Item(channel=__channel__, action="mainlist", title="<< volver al inicio", folder=True)) return itemlist
def lista(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r'"|\n|\r|\t| |<br>', "", data) patron = "peli><a href=([^ ]+) title=(.*?)><img src=([^ ]+) alt=.*?><div class=([^>]+)>.*?<p>.*?<\/p>.*?flags ([" \ "^']+)'" matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedcalidad, scrapedidioma in matches: year = scrapertools.find_single_match(scrapedtitle, '.*?\((\d{4})\)') scrapedtitle = scrapertools.find_single_match(scrapedtitle, '(.*?)\(\.*?') url = scrapedurl thumbnail = scrapedthumbnail scrapedcalidad = scrapedcalidad.replace("'", "") scrapedcalidad = scrapedcalidad.lower() if scrapedcalidad in tcalidad: scrapedcalidad = tcalidad[scrapedcalidad] else: scrapedcalidad = '[COLOR orange]MULTI[/COLOR]' if scrapedidioma in taudio: scrapedidioma = taudio[scrapedidioma] else: scrapedidioma = '[COLOR orange]MULTI[/COLOR]' title = scrapedtitle + ' | ' + scrapedcalidad + ' | ' + scrapedidioma + ' | ' fanart = '' plot = '' itemlist.append( Item(channel=item.channel, action='findvideos', title=title, url=url, thumbnail=thumbnail, plot=plot, fanart=fanart, contentTitle=scrapedtitle, extra=item.extra, infoLabels={'year': year}, show=scrapedtitle, list_language=list_language, context=autoplay.context)) # #Paginacion tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) itemlist = fail_tmdb(itemlist) if itemlist != []: actual_page_url = item.url next_page = scrapertools.find_single_match( data, "class=previouspostslink' href='([^']+)'>Siguiente ›<\/a>") if next_page != '': itemlist.append( Item( channel=item.channel, action="lista", title='Siguiente >>>', url=next_page, thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png', extra=item.extra)) return itemlist
def novedades(item): logger.info("[pelispekes.py] novedades") itemlist = [] # Extrae las entradas (carpetas) data = scrapertools.cachePage(item.url) patron = 'class="filmgal">(.*?)<strong>Duraci[^<]+</strong>' matches = re.compile(patron, re.DOTALL).findall(data) logger.info("hay %d matches" % len(matches)) for match in matches: ''' <a target="_blank" href="http://www.pelispekes.com/bob-el-constructor-la-gran-dino-excavacion-la-pelicula/"> <img width="145" height="199" border="0" src="http://www.pelispekes.com/caratula-pekes/6783-145x199.jpg" alt="Ver pelicula Bob El constructor La Gran Dino Excavacion La Pelicula"/> </a> </div> <div class="pelInfoToolTip" id="divtool6783"> <div class="divTituloTool"> <span class="titulotool"><strong>Bob El constructor La Gran Dino Excavacion La Pelicula</strong></span> <strong>(2011)</strong> </div> <div> <strong>Género: </strong>2011 / Animacion / B / Infantil </div> <div class="sinopsis"> <strong>Sinopsis:</strong> Coge tu pala y unete a la diversion con bob el constructor y su excelente equipo desenterrando antiguas sorpresas en esta nueva pelicula Un camion nuevo llamado escombros se une al equipo Can-Do para ayudar a construir un parque de atracciones enorme pero cuando las cosas se ponen dificiles con las gaviotas y bultos imposibles [...] </div> <div> <strong>Duración: </strong>61 min </div> <!-- modificar --> <!-- <div class="datos"> <div class="verde">+6 puntos</div> <div class="reproducciones"> 22248 reproducciones</div> </div> --> </div> ''' patron = '<a target="_blank" href="([^"]+)"[^<]+' patron += '<img width="\d+" height="\d+" border="0" src="([^"]+)" alt="([^"]+)".*?' patron += '<strong>Sinopsis:</strong>(.*?)</div>' matches2 = re.compile(patron, re.DOTALL).findall(match) logger.info("hay %d matches2" % len(matches2)) for match2 in matches2: # Atributos scrapedurl = match2[0] scrapedtitle = match2[2].replace("Ver pelicula", "").replace("–", "") scrapedthumbnail = match2[1] scrapedplot = match2[3] if (DEBUG): logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot, viewmode="movie_with_plot")) # Extrae la pagina siguiente patron = '<a class="nextpostslink" href="([^"]+)"' matches = re.compile(patron, re.DOTALL).findall(data) for match in matches: scrapedtitle = ">> Pagina siguiente" scrapedurl = match scrapedthumbnail = "" scrapeddescription = "" if (DEBUG): logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") itemlist.append( Item(channel=item.channel, action="novedades", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot)) return itemlist
def findvideos(item): logger.info() itemlist = [] data = get_source(item.url) pattern = '<a href="([^"]+)"><div class="boton_version">Idioma:.*?src="([^"]+)"> Subtitulos: ([^<]+)</div>' matches = re.compile(pattern, re.DOTALL).findall(data) title = '' '''if matches: for url, lang_data, sub_info in matches: lang = name="keywords" content= title = set_lang(lang, sub_info) new_data = get_source(host+url) url_list = scrapertools.find_multiple_matches(new_data, 'label:\s?"([^"]+)",\s?file:\s?"([^"]+)"') for quality, url in url_list: quality +='p' itemlist.append(Item(channel=item.channel, title='Directo %s[%s]' % (title, quality), url=url, action='play', language=IDIOMAS.get(lang, 'VO'), quality=quality, server='directo', infoLabels=item.infoLabels )) else: try: lang_data, sub_info = scrapertools.find_single_match(data, '>Idioma:.*?src="([^"]+)"> Subtitulos: ([^<]+)</div>') if lang_data: lang = languages_from_flags(lang_data, '/img/banderas/', 'jpg') title = set_lang(lang, sub_info) except: lang = 'VO' title = ' [%s]' % lang ''' lang = lang_from_keywords(data) url_list = scrapertools.find_multiple_matches( data, 'label:\s?"([^"]+)",\s?file:\s?"([^"]+)"') for quality, url in url_list: quality += 'p' if not config.get_setting('unify'): title = '[%s]' % lang itemlist.append( Item(channel=item.channel, title='Directo %s [%s]' % (title, quality), url=url, action='play', language=lang, quality=quality, server='directo', infoLabels=item.infoLabels)) # Requerido para FilterTools itemlist = filtertools.get_links(itemlist, item, list_language) # Requerido para AutoPlay autoplay.start(itemlist, item) itemlist = sorted(itemlist, key=lambda it: it.language) if config.get_videolibrary_support( ) and len(itemlist) > 0 and item.extra != 'findvideos': itemlist.append( Item( channel=item.channel, title= '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url, action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle, )) return itemlist
def update_json(path, params): item = Item().fromjson(filetools.read(path)) item.__dict__.update(params) filetools.write(path, item.tojson())
def findvideos(item): logger.info() itemlist = list() data = httptools.downloadpage(item.url).data data = scrapertools.unescape(data) soup = BeautifulSoup(data, "html5lib", from_encoding="utf-8") lang = soup.find_all("b") lang_list = get_langs(lang) lang_count = 0 for tab_soup in soup.find_all("div", class_="contenedor_tab"): lang = lang_list[lang_count] for elem in tab_soup.find_all("iframe"): title = "" enc_url = scrapertools.find_single_match(elem["data-data"], '([^\+]+)\+(.+)?') s = base64.b64decode(enc_url[0]) i = enc_url[1] hidden_url = "https://encriptando.com" + s + i hidden_data = httptools.downloadpage(hidden_url, follow_redirects=False, headers={ 'Referer': host }).data var, val = scrapertools.find_single_match( hidden_data.replace("'", '"'), 'var (k|s)="([^"]+)";') url = decrypt(var, val) if var == "k": url += "|%s" % item.url if not config.get_setting('unify'): title = ' [%s]' % lang itemlist.append( Item(channel=item.channel, title='%s' + title, url=url, action='play', language=lang, infoLabels=item.infoLabels)) lang_count += 1 itemlist = servertools.get_servers_itemlist( itemlist, lambda x: x.title % x.server.capitalize()) # Requerido para FilterTools itemlist = filtertools.get_links(itemlist, item, list_language) # Requerido para AutoPlay autoplay.start(itemlist, item) if config.get_videolibrary_support( ) and len(itemlist) > 0 and item.extra != 'findvideos': itemlist.append( Item(channel=item.channel, title= '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url, action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle)) return itemlist
def fichas(item): logger.info("[streamondemand-pureita altadefinizione_pink ] fichas") itemlist = [] # Descarga la pagina data = scrapertools.anti_cloudflare(item.url, headers) # fix - calidad data = re.sub( r'<div class="wrapperImage"[^<]+<a', '<div class="wrapperImage"><fix>SD</fix><a', data ) # fix - IMDB data = re.sub( r'<h5> </div>', '<fix>IMDB: 0.0</fix>', data ) if "/?s=" in item.url: patron = '<div class="col-lg-3 col-md-3 col-xs-3">.*?' patron += 'href="([^"]+)".*?' patron += '<div class="wrapperImage"[^<]+' patron += '<[^>]+>([^<]+)<.*?' patron += 'src="([^"]+)".*?' patron += 'class="titleFilm">([^<]+)<.*?' patron += 'IMDB: ([^<]+)<' else: patron = '<div class="wrapperImage"[^<]+\s*[^>]+>([^<]+).*?\s*<a href="([^"]+)">' patron += '<img width=".*?" height=".*?" src="([^"]+)" class="attachment[^>]+>' patron += '</a>\s*<div class="info">\s*<h2 class="titleFilm"><a href[^>]+>([^<]+)</a></h2>\s*[^>]+>[^>]+>\s*(.*?)<' matches = re.compile(patron, re.DOTALL).findall(data) for scraped_1, scraped_2, scrapedthumbnail, scrapedtitle, scrapedpuntuacion in matches: scrapedurl = scraped_2 scrapedcalidad = scraped_1 if "/?s=" in item.url: scrapedurl = scraped_1 scrapedcalidad = scraped_2 if scrapedpuntuacion=="": scrapedpuntuacion="N/A" title = scrapertools.decodeHtmlentities(scrapedtitle) title_f = scrapertools.decodeHtmlentities(scrapedtitle) title += " (" + scrapedcalidad + ") (" + scrapedpuntuacion + ")" scraped_calidad = " ([COLOR yellow]" + scrapedcalidad + "[/COLOR])" scraped_puntuacion = " ([COLOR yellow]" + scrapedpuntuacion + "[/COLOR])" title_f += scraped_calidad + scraped_puntuacion # ------------------------------------------------ scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail) # ------------------------------------------------ itemlist.append(infoSod( Item(channel=__channel__, action="findvideos", contentType="movie", title=title_f, url=scrapedurl, thumbnail=scrapedthumbnail, fulltitle=title, show=title), tipo='movie')) # Paginación next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">') if next_page != "": itemlist.append( Item(channel=__channel__, action="fichas", title="[COLOR orange]Successivo >>[/COLOR]", url=next_page, thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png")) return itemlist
def mainlist(item): logger.info("tvalacarta.channels.canal22 mainlist") return programas(Item(channel=CHANNELNAME))
def list_all(item): logger.info itemlist = list() next = False data = httptools.downloadpage(item.url).data soup = BeautifulSoup(data, "html5lib", from_encoding="utf-8") matches = soup.find_all("div", class_="post") first = item.first last = first + 20 if last >= len(matches): last = len(matches) next = True for elem in matches[first:last]: title = "" url = elem.a["href"] thumb = elem.img["src"] langs = elem.find("div", "tituloidioma") language = languages_from_flags(langs, "png") full_title = elem.find("div", class_="title").text.split('(') cleantitle = full_title[0] if len(full_title) > 1: year = re.sub('\)', '', full_title[1]) else: year = '-' if not config.get_setting('unify'): title = '%s [%s] %s' % (cleantitle, year, language) itemlist.append( Item(channel=item.channel, title='%s' % title, url=url, action='findvideos', contentTitle=cleantitle, thumbnail=thumb, language=language, infoLabels={'year': year})) tmdb.set_infoLabels_itemlist(itemlist, True) # Pagination if not next: url_next_page = item.url first = last else: try: current = soup.find("span", class_="current").text except: return itemlist url_next_page = '%spage/%s' % (host, str(int(current) + 1)) first = 0 if url_next_page and len(matches) > 21: itemlist.append( Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all', first=first)) return itemlist
def menu_video(item): itemlist = [] logger.info("menu_video url=" + item.url + ", server=" + item.server) if item.server == "local": itemlist.append( Item(title=item.title, fulltitle=item.fulltitle, url=item.url, action="play_video", thumbnail=item.thumbnail, plot=item.plot, folder=False)) return itemlist video_urls = [] # Extrae todos los enlaces posibles exec "from servers import " + item.server + " as server_connector" video_urls = server_connector.get_video_url( page_url=item.url, premium=(config.get_setting("megavideopremium") == "true"), user=config.get_setting("megavideouser"), password=config.get_setting("megavideopassword")) if config.get_setting("fileniumpremium") == "true" and item.server not in [ "vk", "fourshared", "directo", "adnstream", "facebook", "megalive", "tutv", "stagevu" ]: exec "from servers import filenium as gen_conector" # Parche para solucionar el problema habitual de que un vídeo http://www.megavideo.com/?d=XXX no está, pero http://www.megaupload.com/?d=XXX si url = url.replace("http://www.megavideo.com/?d", "http://www.megaupload.com/?d") video_gen = gen_conector.get_video_url( page_url=item.url, premium=(config.get_setting("fileniumpremium") == "true"), user=config.get_setting("fileniumuser"), password=config.get_setting("fileniumpassword")) logger.info("[rsstools.py] filenium url=" + video_gen) video_urls.append(["[filenium]", video_gen]) if len(video_urls) == 0: itemlist.append( Item(title="El vídeo no está disponible", channel=item.channel, action=item.action, url=item.url, server=item.server, extra=item.extra, fulltitle=item.fulltitle)) itemlist.append( Item(title="en %s." % item.server, channel=item.channel, action=item.action, url=item.url, server=item.server, extra=item.extra, fulltitle=item.fulltitle)) return itemlist for video_url in video_urls: itemlist.append( Item(channel=item.channel, title="Ver " + video_url[0], url=video_url[1], action="play_video", extra=item.extra, fulltitle=item.fulltitle)) refered_item_encoded = urllib.quote(item.title) + "|" + urllib.quote( item.server) + "|" + urllib.quote(item.url) + "|" + urllib.quote( item.extra) + "|" + urllib.quote( item.fulltitle) + "|" + urllib.quote( item.thumbnail) + "|" + urllib.quote(item.plot) itemlist.append( Item(channel=item.channel, title="Descargar", action="descargar", url=refered_item_encoded, extra=item.extra, fulltitle=item.fulltitle)) if item.channel != "favoritos": itemlist.append( Item(channel=item.channel, title="Añadir a favoritos", action="add_to_favorites", url=refered_item_encoded, extra=item.extra, fulltitle=item.fulltitle)) else: itemlist.append( Item(channel=item.channel, title="Quitar de favoritos", action="remove_from_favorites", url=refered_item_encoded, extra=item.extra, fulltitle=item.fulltitle)) if item.channel != "descargas": itemlist.append( Item(channel=item.channel, title="Añadir a la lista de descargas", action="add_to_downloads", url=refered_item_encoded, extra=item.extra, fulltitle=item.fulltitle)) else: if item.category == "errores": itemlist.append( Item(channel=item.channel, title="Quitar definitivamente de la lista de descargas", action="remove_from_error_downloads", url=refered_item_encoded, extra=item.extra, fulltitle=item.fulltitle)) itemlist.append( Item(channel=item.channel, title="Pasar de nuevo a la lista de descargas", action="add_again_to_downloads", url=refered_item_encoded, extra=item.extra, fulltitle=item.fulltitle)) else: itemlist.append( Item(channel=item.channel, title="Quitar de la lista de descargas", action="remove_from_downloads", url=refered_item_encoded, extra=item.extra, fulltitle=item.fulltitle)) itemlist.append( Item(channel=item.channel, title="Enviar a pyLoad", action="send_to_pyload", url=refered_item_encoded, extra=item.extra, fulltitle=item.fulltitle)) # itemlist.append( Item(channel=item.channel, title="Enviar a jdownloader",action="send_to_jdownloader",url=refered_item_encoded, extra=item.extra, fulltitle=item.fulltitle ) ) if item.channel != "trailertools" or item.action != "play": itemlist.append( Item(channel=item.channel, title="Buscar trailer", action="search_trailer", url=refered_item_encoded, extra=item.extra, fulltitle=item.fulltitle)) if item.category == "wiideoteca": itemlist.append( Item(channel=item.channel, title="Marcar como Ultimo Episodio Visto", action="UltimoVisto", url=item.extra, fulltitle=item.fulltitle)) return itemlist
def getitems(requestpath): logger.info("getitems") itemlist = [] # La ruta empleada en la petición ruta = requestpath.split("?")[0] logger.info("ruta=" + ruta) # Los parámetros son las partes de la ruta separadas por "/" rutas = ruta.split("/") cadena = " " # Las imprime en el log for linea in rutas: cadena = cadena + linea + " | " logger.info(cadena) # Extrae los parámetros channel = rutas[2] accion = rutas[3] url = rutas[4] if url != "none": url = urllib.unquote_plus(url) server = rutas[5].lower() title = urllib.unquote_plus(rutas[6]) extra = urllib.unquote_plus(rutas[7]) category = urllib.unquote_plus(rutas[8]) fulltitle = urllib.unquote_plus(rutas[9]) logger.info("channel=" + channel + ", accion=" + accion + ", url=" + url + ", server=" + server + ", title=" + title + ", extra=" + extra + ", category=" + category + " fulltitle=" + fulltitle) print "channel=" + channel + ", accion=" + accion + ", url=" + url + ", server=" + server + ", title=" + title + ", extra=" + extra + ", category=" + category if accion == "mainlist" and config.get_setting("updatechannels") == "true": logger.info("Verificando actualización del canal") from core import updater actualizado = updater.updatechannel(channel) if actualizado: itemlist.append(Item(title="¡Canal descargado y actualizado!")) # Obtiene un nombre válido para la cache hashed_url = binascii.hexlify(md5.new(requestpath).digest()) cached_file = os.path.join(config.get_data_path(), "tmp", "cache", hashed_url) logger.info("cached_file=" + cached_file) # Si el fichero está en cache if channel not in ("trailertools", "buscador", "configuracion", "pyload", "wiidoteca") and os.path.exists(cached_file): # <-- logger.info("Reading from cache") fichero = open(cached_file, "rb") itemlist = cerealizer.load(fichero) fichero.close() else: logger.info("Not cached") ### ESTUDIAR #if accion not in ("play","findvideos","detail"): titulo = "" ### # El item que invocó es importante para obtener el siguiente senderitem = Item(title=title, channel=channel, action=accion, url=url, server=server, extra=extra, category=category, fulltitle=fulltitle) if "|" in url: ## <-- unquote despues de split para no interferir cuando | aparece en algun campo partes = senderitem.url.split("|") decpartes = [] for parte in partes: decpartes.append(urllib.unquote_plus(parte)) partes = decpartes ## <-- refered_item = Item(title=partes[0], url=partes[2], thumbnail=partes[5], server=partes[1], plot=partes[6], extra=partes[3], fulltitle=partes[4]) logger.info("refered_item title=" + refered_item.title + ", url=" + refered_item.url + ", server=" + refered_item.server + ", extra=" + refered_item.extra) else: refered_item = Item() # Importa el canal y ejecuta la función if channel in ("configuracion", "trailertools", "buscador"): exec "import " + channel else: try: exec "from tvalacarta.channels import " + channel except: exec "from core import " + channel # play - es el menú de reproducción de un vídeo if accion == "play": logger.info("ACCION PLAY") print "ACCION PLAY" try: exec "itemlist = " + channel + ".play(senderitem)" print "itemlist = " + channel + ".play(senderitem)" senderitem = itemlist[0] senderitem.folder = False except: import sys for line in sys.exc_info(): logger.error("%s" % line) itemlist = menu_video(senderitem) # play_video - genera una playlist con una sola entrada para que wiimc la reproduzca elif accion == "play_video": logger.info("ACCION PLAY_VIDEO") senderitem.folder = False itemlist.append(senderitem) # search - es el buscador elif accion == "search": logger.info("ACCION SEARCH") texto = requestpath.split(".rss")[1] exec "itemlist = " + channel + "." + accion + "(senderitem,texto)" # findvideos - debe encontrar videos reproducibles elif accion == "findvideos": logger.info("ACCION FINDVIDEOS") try: exec "itemlist = " + channel + "." + accion + "(senderitem)" except: import sys for line in sys.exc_info(): logger.error("%s" % line) itemlist = findvideos(senderitem, channel) elif accion == "descargar": itemlist = download_item(senderitem, refered_item) elif accion == "downloadall": ## <-- itemlist = downloadall(senderitem, refered_item) ## <-- elif accion == "add_to_favorites": itemlist = add_to_favorites(senderitem, refered_item) elif accion == "remove_from_favorites": itemlist = remove_from_favorites(senderitem, refered_item) elif accion == "add_to_downloads": itemlist = add_to_downloads(senderitem, refered_item) elif accion == "remove_from_downloads": itemlist = remove_from_downloads(senderitem, refered_item) elif accion == "remove_from_error_downloads": itemlist = remove_from_error_downloads(senderitem, refered_item) elif accion == "add_again_to_downloads": itemlist = add_again_to_downloads(senderitem, refered_item) elif accion == "send_to_jdownloader": itemlist = send_to_jdownloader(senderitem, refered_item) elif accion == "send_to_pyload": itemlist = send_to_pyload(senderitem, refered_item) elif accion == "search_trailer": itemlist = search_trailer(senderitem, refered_item) elif accion == "add_serie_to_wiideoteca": itemlist = wiideoteca.AgregarSerie(senderitem) elif accion == "UltimoVisto": itemlist = wiideoteca.UltimoVisto(senderitem) else: if senderitem.url == "none": senderitem.url = "" exec "itemlist.extend( " + channel + "." + accion + "(senderitem) )" # Lo almacena en cache fichero = open(cached_file, "wb") cerealizer.dump(itemlist, fichero) fichero.close() logger.info("Items devueltos") for item in itemlist: logger.info(" " + item.title + " | " + item.url + " | " + item.action) return itemlist, channel
def convert_old_to_v4(): logger.info() path_series_xml = filetools.join(config.get_data_path(), "series.xml") path_series_json = filetools.join(config.get_data_path(), "series.json") series_insertadas = 0 series_fallidas = 0 version = 'v?' # Renombrar carpeta Series y crear una vacia import time new_name = "SERIES_OLD_" + str(time.time()) path_series_old = filetools.join(library.LIBRARY_PATH, new_name) if filetools.rename(library.TVSHOWS_PATH, new_name): if not filetools.mkdir(library.TVSHOWS_PATH): logger.error( "ERROR, no se ha podido crear la nueva carpeta de SERIES") return False else: logger.error( "ERROR, no se ha podido renombrar la antigua carpeta de SERIES") return False # Convertir libreria de v1(xml) a v4 if filetools.exists(path_series_xml): try: data = filetools.read(path_series_xml) for line in data.splitlines(): try: aux = line.rstrip('\n').split(",") tvshow = aux[0].strip() url = aux[1].strip() channel = aux[2].strip() serie = Item(contentSerieName=tvshow, url=url, channel=channel, action="episodios", title=tvshow, active=True) patron = "^(.+)[\s]\((\d{4})\)$" matches = re.compile(patron, re.DOTALL).findall( serie.contentSerieName) if matches: serie.infoLabels['title'] = matches[0][0] serie.infoLabels['year'] = matches[0][1] else: serie.infoLabels['title'] = tvshow insertados, sobreescritos, fallidos = library.save_library_tvshow( serie, list()) if fallidos == 0: series_insertadas += 1 platformtools.dialog_notification( "Serie actualizada", serie.infoLabels['title']) else: series_fallidas += 1 except: series_fallidas += 1 filetools.rename(path_series_xml, "series.xml.old") version = 'v4' except EnvironmentError: logger.error("ERROR al leer el archivo: %s" % path_series_xml) return False # Convertir libreria de v2(json) a v4 if filetools.exists(path_series_json): try: data = jsontools.load_json(filetools.read(path_series_json)) for tvshow in data: for channel in data[tvshow]["channels"]: try: serie = Item( contentSerieName=data[tvshow]["channels"][channel] ["tvshow"], url=data[tvshow]["channels"][channel]["url"], channel=channel, action="episodios", title=data[tvshow]["name"], active=True) if not tvshow.startswith("t_"): serie.infoLabels["tmdb_id"] = tvshow insertados, sobreescritos, fallidos = library.save_library_tvshow( serie, list()) if fallidos == 0: series_insertadas += 1 platformtools.dialog_notification( "Serie actualizada", serie.infoLabels['title']) else: series_fallidas += 1 except: series_fallidas += 1 filetools.rename(path_series_json, "series.json.old") version = 'v4' except EnvironmentError: logger.error("ERROR al leer el archivo: %s" % path_series_json) return False # Convertir libreria de v3 a v4 if version != 'v4': # Obtenemos todos los tvshow.json de la biblioteca de SERIES_OLD recursivamente for raiz, subcarpetas, ficheros in filetools.walk(path_series_old): for f in ficheros: if f == "tvshow.json": try: serie = Item().fromjson( filetools.read(filetools.join(raiz, f))) insertados, sobreescritos, fallidos = library.save_library_tvshow( serie, list()) if fallidos == 0: series_insertadas += 1 platformtools.dialog_notification( "Serie actualizada", serie.infoLabels['title']) else: series_fallidas += 1 except: series_fallidas += 1 config.set_setting("library_version", 'v4') platformtools.dialog_notification( "Biblioteca actualizada al nuevo formato", "%s series convertidas y %s series descartadas. A continuación se va a " "obtener la información de todos los episodios" % (series_insertadas, series_fallidas), time=12000) # Por ultimo limpia la libreria, por que las rutas anteriores ya no existen xbmc_library.clean() return True
def episodios(item): logger.info() itemlist = [] html_serie = get_url_contents(item.url) info_serie = __extract_info_from_serie(html_serie) if info_serie[3]: plot = info_serie[3] else: plot = '' episodes = re.findall(REGEX_EPISODE, html_serie, re.DOTALL) es_pelicula = False for url, title, date in episodes: episode = scrapertools.find_single_match(title, r'Episodio (\d+)') # El enlace pertenece a un episodio if episode: season = 1 episode = int(episode) season, episode = renumbertools.numbered_for_tratk( item.channel, item.show, season, episode) title = "%sx%s %s (%s)" % (season, str(episode).zfill(2), "Episodio %s" % episode, date) # El enlace pertenece a una pelicula else: title = "%s (%s)" % (title, date) item.url = url es_pelicula = True logger.debug("title=[%s], url=[%s], thumbnail=[%s]" % (title, url, item.thumbnail)) itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=item.thumbnail, plot=plot, show=item.show, fulltitle="%s %s" % (item.show, title))) # El sistema soporta la videoteca y se encontro por lo menos un episodio # o pelicula if config.get_videolibrary_support() and len(itemlist) > 0: if es_pelicula: item_title = "Añadir película a la videoteca" item_action = "add_pelicula_to_library" item_extra = "" else: item_title = "Añadir serie a la videoteca" item_action = "add_serie_to_library" item_extra = "episodios" itemlist.append( Item(channel=item.channel, title=item_title, url=item.url, action=item_action, extra=item_extra, show=item.show)) if not es_pelicula: itemlist.append( Item(channel=item.channel, title="Descargar todos los episodios", url=item.url, action="download_all_episodes", extra="episodios", show=item.show)) return itemlist
try: filetools.mkdir(path) except OSError, exception: if exception.errno != errno.EEXIST: raise tvshow_path = filetools.join(path, "tvshow.nfo") if not filetools.exists(tvshow_path): # Creamos tvshow.nfo, si no existe, con la head_nfo, info de la serie y marcas de episodios vistos logger.info("Creando tvshow.nfo: " + tvshow_path) head_nfo = scraper.get_nfo(item) item_tvshow = Item(title=item.contentTitle, channel="biblioteca", action="get_temporadas", fanart=item.infoLabels['fanart'], thumbnail=item.infoLabels['thumbnail'], infoLabels=item.infoLabels, path=path.replace(TVSHOWS_PATH, "")) item_tvshow.library_playcounts = {} item_tvshow.library_urls = {item.channel: item.url} else: # Si existe tvshow.nfo, pero estamos añadiendo un nuevo canal actualizamos el listado de urls head_nfo, item_tvshow = read_nfo(tvshow_path) item_tvshow.channel = "biblioteca" item_tvshow.action = "get_temporadas" item_tvshow.library_urls[item.channel] = item.url # FILTERTOOLS # si el canal tiene filtro de idiomas, añadimos el canal y el show
def mainlist(item): logger.info() itemlist = [] itemlist.append( Item(channel=item.channel, title="Nuevos", action="lista", url=host + "/videos?o=d")) itemlist.append( Item(channel=item.channel, title="Destacado", action="lista", url=host)) itemlist.append( Item(channel=item.channel, title="Mas vistos", action="lista", url=host + "/videos?o=v7")) itemlist.append( Item(channel=item.channel, title="Mejor valorado", action="lista", url=host + "/videos?o=r7")) itemlist.append( Item(channel=item.channel, title="Mas popular", action="lista", url=host + "/videos?o=f7")) itemlist.append( Item(channel=item.channel, title="Mas largo", action="lista", url=host + "/videos?o=l7")) itemlist.append( Item(channel=item.channel, title="Mas comentado", action="lista", url=host + "/videos?o=m7")) itemlist.append( Item(channel=item.channel, title="PornStar", action="categorias", url=host + "/pornstars")) itemlist.append( Item(channel=item.channel, title="Canal", action="categorias", url=host + "/channels")) itemlist.append( Item(channel=item.channel, title="Categorias", action="categorias", url=host + "/categories")) itemlist.append(Item(channel=item.channel, title="Buscar", action="search")) return itemlist
def save_library_episodes(path, episodelist, serie, silent=False, overwrite=True): """ guarda en la ruta indicada todos los capitulos incluidos en la lista episodelist @type path: str @param path: ruta donde guardar los episodios @type episodelist: list @param episodelist: listado de items que representan los episodios que se van a guardar. @type serie: item @param serie: serie de la que se van a guardar los episodios @type silent: bool @param silent: establece si se muestra la notificación @param overwrite: permite sobreescribir los ficheros existentes @type overwrite: bool @rtype insertados: int @return: el número de episodios insertados @rtype sobreescritos: int @return: el número de episodios sobreescritos @rtype fallidos: int @return: el número de episodios fallidos """ logger.info() # No hay lista de episodios, no hay nada que guardar if not len(episodelist): logger.info("No hay lista de episodios, salimos sin crear strm") return 0, 0, 0 insertados = 0 sobreescritos = 0 fallidos = 0 news_in_playcounts = {} # Listamos todos los ficheros de la serie, asi evitamos tener que comprobar si existe uno por uno raiz, carpetas_series, ficheros = filetools.walk(path).next() ficheros = [filetools.join(path, f) for f in ficheros] # Silent es para no mostrar progreso (para library_service) if not silent: # progress dialog p_dialog = platformtools.dialog_progress('streamondemand', 'Aggiunta episodi...') p_dialog.update(0, 'Aggiunta episodio...') new_episodelist = [] # Obtenemos el numero de temporada y episodio y descartamos los q no lo sean for e in episodelist: try: season_episode = scrapertools.get_season_and_episode(e.title) e.infoLabels = serie.infoLabels e.contentSeason, e.contentEpisodeNumber = season_episode.split("x") new_episodelist.append(e) except: continue # No hay lista de episodios, no hay nada que guardar if not len(new_episodelist): logger.info("No hay lista de episodios, salimos sin crear strm") return 0, 0, 0 # fix float porque la division se hace mal en python 2.x t = float(100) / len(new_episodelist) for i, e in enumerate(scraper.sort_episode_list(new_episodelist)): if not silent: p_dialog.update(int(math.ceil((i + 1) * t)), 'Aggiunta episodio...', e.title) season_episode = "%sx%s" % (e.contentSeason, str( e.contentEpisodeNumber).zfill(2)) strm_path = filetools.join(path, "%s.strm" % season_episode) nfo_path = filetools.join(path, "%s.nfo" % season_episode) json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower()) strm_exists = strm_path in ficheros nfo_exists = nfo_path in ficheros json_exists = json_path in ficheros if not strm_exists: # Si no existe season_episode.strm añadirlo item_strm = Item(action='play_from_library', channel='biblioteca', strm_path=strm_path.replace(TVSHOWS_PATH, ""), infoLabels={}) item_strm.contentSeason = e.contentSeason item_strm.contentEpisodeNumber = e.contentEpisodeNumber item_strm.contentType = e.contentType item_strm.contentTitle = season_episode # FILTERTOOLS if item_strm.list_idiomas: # si tvshow.nfo tiene filtro se le pasa al item_strm que se va a generar if "library_filter_show" in serie: item_strm.library_filter_show = serie.library_filter_show if item_strm.library_filter_show == "": logger.error( "Se ha producido un error al obtener el nombre de la serie a filtrar" ) # logger.debug("item_strm" + item_strm.tostring('\n')) # logger.debug("serie " + serie.tostring('\n')) strm_exists = filetools.write( strm_path, '%s?%s' % (addon_name, item_strm.tourl())) item_nfo = None if not nfo_exists and e.infoLabels["code"]: # Si no existe season_episode.nfo añadirlo scraper.find_and_set_infoLabels(e) head_nfo = scraper.get_nfo(e) item_nfo = e.clone(channel="biblioteca", url="", action='findvideos', strm_path=strm_path.replace(TVSHOWS_PATH, "")) nfo_exists = filetools.write(nfo_path, head_nfo + item_nfo.tojson()) # Solo si existen season_episode.nfo y season_episode.strm continuamos if nfo_exists and strm_exists: if not json_exists or overwrite: # Obtenemos infoLabel del episodio if not item_nfo: head_nfo, item_nfo = read_nfo(nfo_path) e.infoLabels = item_nfo.infoLabels if filetools.write(json_path, e.tojson()): if not json_exists: logger.info("Insertado: %s" % json_path) insertados += 1 # Marcamos episodio como no visto news_in_playcounts[season_episode] = 0 # Marcamos la temporada como no vista news_in_playcounts["season %s" % e.contentSeason] = 0 # Marcamos la serie como no vista # logger.debug("serie " + serie.tostring('\n')) news_in_playcounts[serie.contentTitle] = 0 else: logger.info("Sobreescrito: %s" % json_path) sobreescritos += 1 else: logger.info("Fallido: %s" % json_path) fallidos += 1 else: logger.info("Fallido: %s" % json_path) fallidos += 1 if not silent and p_dialog.iscanceled(): break if not silent: p_dialog.close() if news_in_playcounts: # Si hay nuevos episodios los marcamos como no vistos en tvshow.nfo ... tvshow_path = filetools.join(path, "tvshow.nfo") try: import datetime head_nfo, tvshow_item = read_nfo(tvshow_path) tvshow_item.library_playcounts.update(news_in_playcounts) if tvshow_item.active == 30: tvshow_item.active = 1 update_last = datetime.date.today() tvshow_item.update_last = update_last.strftime('%Y-%m-%d') update_next = datetime.date.today() + datetime.timedelta( days=int(tvshow_item.active)) tvshow_item.update_next = update_next.strftime('%Y-%m-%d') filetools.write(tvshow_path, head_nfo + tvshow_item.tojson()) except: logger.error("Error al actualizar tvshow.nfo") fallidos = -1 else: # ... si ha sido correcto actualizamos la biblioteca de Kodi if config.is_xbmc() and not silent: from platformcode import xbmc_library xbmc_library.update(FOLDER_TVSHOWS, filetools.basename(path)) if fallidos == len(episodelist): fallidos = -1 logger.debug("%s [%s]: insertados= %s, sobreescritos= %s, fallidos= %s" % (serie.contentSerieName, serie.channel, insertados, sobreescritos, fallidos)) return insertados, sobreescritos, fallidos
def mainlist(item): logger.info() autoplay.init(item.channel, list_servers, list_quality) itemlist = [] itemlist.append( Item( channel=item.channel, title="Novedades", action="list_all", url=host + 'canal/novedades/', first=0, thumbnail=get_thumb('newest', auto=True), )) itemlist.append( Item(channel=item.channel, title="Destacadas", action="list_all", thumbnail=get_thumb('hot', auto=True), url=host + 'canal/Mejores+Películas/', first=0)) itemlist.append( Item(channel=item.channel, title="Castellano", action="list_all", url=host + 'canal/Pel%C3%ADculas%20en%20espa%C3%B1ol/', first=0, thumbnail=get_thumb('cast', auto=True))) itemlist.append( Item(channel=item.channel, title="Subtituladas", action="list_all", url=host + 'canal/Películas%20VO%20subtitulada/', first=0, thumbnail=get_thumb('vose', auto=True))) itemlist.append( Item( channel=item.channel, title="VO", action="list_all", url=host + 'canal/Películas%20en%20VO/', first=0, )) itemlist.append( Item( channel=item.channel, title="A-Z", action="list_all", url=host + 'canal/a-z/', first=0, thumbnail=get_thumb('alphabet', auto=True), )) itemlist.append( Item( channel=item.channel, title="Buscar", action="search", url=host + 'busqueda.php?bus=', thumbnail=get_thumb('search', auto=True), )) autoplay.show_option(item.channel, itemlist) return itemlist
def save_library_movie(item): """ guarda en la libreria de peliculas el elemento item, con los valores que contiene. @type item: item @param item: elemento que se va a guardar. @rtype insertados: int @return: el número de elementos insertados @rtype sobreescritos: int @return: el número de elementos sobreescritos @rtype fallidos: int @return: el número de elementos fallidos o -1 si ha fallado todo """ logger.info() # logger.debug(item.tostring('\n')) insertados = 0 sobreescritos = 0 fallidos = 0 path = "" # Itentamos obtener el titulo correcto: # 1. contentTitle: Este deberia ser el sitio correcto, ya que title suele contener "Añadir a la biblioteca..." # 2. fulltitle # 3. title if not item.contentTitle: # Colocamos el titulo correcto en su sitio para que scraper lo localize if item.fulltitle: item.contentTitle = item.fulltitle else: item.contentTitle = item.title # Si llegados a este punto no tenemos titulo, salimos if not item.contentTitle or not item.channel: logger.debug("NO ENCONTRADO contentTitle") return 0, 0, -1 # Salimos sin guardar scraper_return = scraper.find_and_set_infoLabels(item) # Llegados a este punto podemos tener: # scraper_return = True: Un item con infoLabels con la información actualizada de la peli # scraper_return = False: Un item sin información de la peli (se ha dado a cancelar en la ventana) # item.infoLabels['code'] == "" : No se ha encontrado el identificador de IMDB necesario para continuar, salimos if not scraper_return or not item.infoLabels['code']: # TODO de momento si no hay resultado no añadimos nada, # aunq podriamos abrir un cuadro para introducir el identificador/nombre a mano logger.debug("NO ENCONTRADO EN SCRAPER O NO TIENE code") return 0, 0, -1 _id = item.infoLabels['code'][0] # progress dialog p_dialog = platformtools.dialog_progress('streamondemand', 'Aggiunta film...') if config.get_setting( "original_title_folder", "biblioteca") == 1 and item.infoLabels['originaltitle']: base_name = item.infoLabels['originaltitle'] else: base_name = item.contentTitle base_name = unicode(filetools.validate_path(base_name.replace('/', '-')), "utf8").lower().encode("utf8") for raiz, subcarpetas, ficheros in filetools.walk(MOVIES_PATH): for c in subcarpetas: code = scrapertools.find_single_match(c, '\[(.*?)\]') if code and code in item.infoLabels['code']: path = filetools.join(raiz, c) _id = code break if not path: # Crear carpeta path = filetools.join(MOVIES_PATH, ("%s [%s]" % (base_name, _id)).strip()) logger.info("Creando directorio pelicula:" + path) if not filetools.mkdir(path): logger.debug("No se ha podido crear el directorio") return 0, 0, -1 nfo_path = filetools.join(path, "%s [%s].nfo" % (base_name, _id)) strm_path = filetools.join(path, "%s.strm" % base_name) json_path = filetools.join(path, ("%s [%s].json" % (base_name, item.channel.lower()))) nfo_exists = filetools.exists(nfo_path) strm_exists = filetools.exists(strm_path) json_exists = filetools.exists(json_path) if not nfo_exists: # Creamos .nfo si no existe logger.info("Creando .nfo: " + nfo_path) head_nfo = scraper.get_nfo(item) item_nfo = Item(title=item.contentTitle, channel="biblioteca", action='findvideos', library_playcounts={"%s [%s]" % (base_name, _id): 0}, infoLabels=item.infoLabels, library_urls={}) else: # Si existe .nfo, pero estamos añadiendo un nuevo canal lo abrimos head_nfo, item_nfo = read_nfo(nfo_path) if not strm_exists: # Crear base_name.strm si no existe item_strm = Item(channel='biblioteca', action='play_from_library', strm_path=strm_path.replace(MOVIES_PATH, ""), contentType='movie', contentTitle=item.contentTitle) strm_exists = filetools.write( strm_path, '%s?%s' % (addon_name, item_strm.tourl())) item_nfo.strm_path = strm_path.replace(MOVIES_PATH, "") # Solo si existen item_nfo y .strm continuamos if item_nfo and strm_exists: if json_exists: logger.info("El fichero existe. Se sobreescribe") sobreescritos += 1 else: insertados += 1 if filetools.write(json_path, item.tojson()): p_dialog.update(100, 'Aggiunta film...', item.contentTitle) item_nfo.library_urls[item.channel] = item.url if filetools.write(nfo_path, head_nfo + item_nfo.tojson()): # actualizamos la biblioteca de Kodi con la pelicula if config.is_xbmc(): from platformcode import xbmc_library xbmc_library.update(FOLDER_MOVIES, filetools.basename(path) + "/") p_dialog.close() return insertados, sobreescritos, fallidos # Si llegamos a este punto es por q algo ha fallado logger.error("No se ha podido guardar %s en la biblioteca" % item.contentTitle) p_dialog.update(100, 'Aggiunta fallita...', item.contentTitle) p_dialog.close() return 0, 0, -1
def mainlist(item): logger.info("pelisalacarta.channels.descargas mainlist") itemlist = [] #Lista de archivos for file in sorted(filetools.listdir(DOWNLOAD_LIST_PATH)): #Saltamos todos los que no sean JSON if not file.endswith(".json"): continue #cargamos el item file = os.path.join(DOWNLOAD_LIST_PATH, file) i = Item(path=file).fromjson(filetools.read(file)) i.thumbnail = i.contentThumbnail #Listado principal if not item.contentType == "tvshow": # Series if i.contentType == "episode": #Comprobamos que la serie no este ya en el itemlist if not filter( lambda x: x.contentSerieName == i.contentSerieName and x.contentChannel == i.contentChannel, itemlist): title = TITLE_TVSHOW % ( STATUS_COLORS[i.downloadStatus], i.downloadProgress, i.contentSerieName, i.contentChannel) itemlist.append( Item(title=title, channel="descargas", action="mainlist", contentType="tvshow", contentSerieName=i.contentSerieName, contentChannel=i.contentChannel, downloadStatus=i.downloadStatus, downloadProgress=[i.downloadProgress], fanart=i.fanart, thumbnail=i.thumbnail)) else: s = filter( lambda x: x.contentSerieName == i.contentSerieName and x.contentChannel == i.contentChannel, itemlist)[0] s.downloadProgress.append(i.downloadProgress) downloadProgress = sum(s.downloadProgress) / len( s.downloadProgress) if not s.downloadStatus in [ STATUS_CODES.error, STATUS_CODES.canceled ] and not i.downloadStatus in [ STATUS_CODES.completed, STATUS_CODES.stoped ]: s.downloadStatus = i.downloadStatus s.title = TITLE_TVSHOW % ( STATUS_COLORS[s.downloadStatus], downloadProgress, i.contentSerieName, i.contentChannel) # Peliculas elif i.contentType == "movie" or i.contentType == "video": i.title = TITLE_FILE % (STATUS_COLORS[i.downloadStatus], i.downloadProgress, i.contentTitle) itemlist.append(i) #Listado dentro de una serie else: if i.contentType == "episode" and i.contentSerieName == item.contentSerieName and i.contentChannel == item.contentChannel: i.title = TITLE_FILE % ( STATUS_COLORS[i.downloadStatus], i.downloadProgress, "%dx%0.2d: %s" % (i.contentSeason, i.contentEpisodeNumber, i.contentTitle)) itemlist.append(i) estados = [i.downloadStatus for i in itemlist] # Si hay alguno completado if 2 in estados: itemlist.insert( 0, Item(channel=item.channel, action="clean_ready", title="Eliminar descargas completadas", contentType=item.contentType, contentChannel=item.contentChannel, contentSerieName=item.contentSerieName)) # Si hay alguno con error if 3 in estados: itemlist.insert( 0, Item(channel=item.channel, action="restart_error", title="Reiniciar descargas con error", contentType=item.contentType, contentChannel=item.contentChannel, contentSerieName=item.contentSerieName)) # Si hay alguno pendiente if 1 in estados or 0 in estados: itemlist.insert( 0, Item(channel=item.channel, action="download_all", title="Descargar todo", contentType=item.contentType, contentChannel=item.contentChannel, contentSerieName=item.contentSerieName)) if len(itemlist): itemlist.insert( 0, Item(channel=item.channel, action="clean_all", title="Eliminar todo", contentType=item.contentType, contentChannel=item.contentChannel, contentSerieName=item.contentSerieName)) return itemlist
def findvideos(item): logger.info() itemlist = [] data = get_source(item.url) patron = 'id=option-(\d+).*?src=([^ ]+) frameborder' matches = re.compile(patron, re.DOTALL).findall(data) lang = '' for option, scrapedurl in matches: lang = scrapertools.find_single_match( data, 'href=#option-%s>.*?/flags/(.*?).png' % option) quality = '' if 'goo.gl' in scrapedurl: new_data = httptools.downloadpage(scrapedurl, follow_redirects=False).headers scrapedurl = new_data['location'] if lang not in IDIOMAS: lang = 'en' title = '%s %s' itemlist.append( Item(channel=item.channel, url=scrapedurl, title=title, action='play', quality=quality, language=IDIOMAS[lang], infoLabels=item.infoLabels)) itemlist = servertools.get_servers_itemlist( itemlist, lambda x: x.title % (x.server.capitalize(), x.language)) # Requerido para Filtrar enlaces if __comprueba_enlaces__: itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__) # Requerido para FilterTools itemlist = filtertools.get_links(itemlist, item, list_language) # Requerido para AutoPlay autoplay.start(itemlist, item) itemlist = sorted(itemlist, key=lambda it: it.language) if item.contentType != 'episode': if config.get_videolibrary_support( ) and len(itemlist) > 0 and item.extra != 'findvideos': itemlist.append( Item( channel=item.channel, title= '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url, action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle)) return itemlist
def list_all(item): logger.info() itemlist = list() next = False soup = create_soup(item.url).find("div", class_="content") matches = soup.find_all("article", id=re.compile(r"^post-\d+")) first = item.first last = first + 25 if last >= len(matches): last = len(matches) next = True for elem in matches[first:last]: info_1 = elem.find("div", class_="poster") info_2 = elem.find("div", class_="data") thumb = info_1.img["src"] title = info_1.img["alt"] url = info_1.a["href"] try: year = info_2.find("span", text=re.compile(r"\d{4}")).text.strip() except: year = '-' lang = languages_from_flags(info_1.find("div", class_="bandera"), "png") itemlist.append( Item(channel=item.channel, title=title, url=url, action='findvideos', thumbnail=thumb, contentTitle=title, language=lang, infoLabels={'year': year})) tmdb.set_infoLabels_itemlist(itemlist, True) if not next: url_next_page = item.url first = last else: try: url_next_page = soup.find("a", class_="arrow_pag")["href"] except: return itemlist url_next_page = '%s' % url_next_page first = 0 if url_next_page and len(matches) > 26: itemlist.append( Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all', first=first)) return itemlist
def channels_list(): itemlist = [] # En duda #itemlist.append( Item( title="Descarga Cine Clásico" , channel="descargacineclasico" , language="ES" , category="F,S" , type="generic" )) #itemlist.append( Item( title="Asia-Team" , channel="asiateam" , language="ES" , category="F,S" , type="generic" )) #itemlist.append( Item( title="Buena Isla" , channel="buenaisla" , language="ES" , category="A,VOS" , type="generic" )) itemlist.append( Item( viewmode="movie", title="Inserisci un URL" , channel="tengourl" , language="" , category="" , type="generic" )) if config.get_setting("personalchannel")=="true": itemlist.append( Item( title=config.get_setting("personalchannelname") , channel="personal" , language="" , category="" , type="generic" )) if config.get_setting("personalchannel2")=="true": itemlist.append( Item( title=config.get_setting("personalchannelname2") , channel="personal2" , language="" , category="" , type="generic" )) if config.get_setting("personalchannel3")=="true": itemlist.append( Item( title=config.get_setting("personalchannelname3") , channel="personal3" , language="" , category="" , type="generic" )) if config.get_setting("personalchannel4")=="true": itemlist.append( Item( title=config.get_setting("personalchannelname4") , channel="personal4" , language="" , category="" , type="generic" )) if config.get_setting("personalchannel5")=="true": itemlist.append( Item( title=config.get_setting("personalchannelname5") , channel="personal5" , language="" , category="" , type="generic" )) itemlist.append( Item( title="AltaDefinizione01" , channel="altadefinizione01" , language="IT" , category="F,S,A,HD" , type="generic" ,thumbnail=THUMBNAIL_REMOTE+"altadefinizione01.png")) itemlist.append( Item( title="Altadefinizione.click" , channel="altadefinizioneclick" , language="IT" , category="F,S,A,VOS,HD" , type="generic", thumbnail=THUMBNAIL_REMOTE+"altadefinizioneclick.png")) itemlist.append( Item( title="Anime Sub Ita" , channel="animesubita" , language="IT" , category="A,VOS" , type="generic" ,thumbnail=THUMBNAIL_REMOTE+"animesubita.png" )) itemlist.append( Item( title="Asian Sub-Ita" , channel="asiansubita" , language="IT" , category="F,S,VOS" , type="generic" ,thumbnail=THUMBNAIL_REMOTE+"asiansubita.png")) itemlist.append( Item( title="Casa-Cinema" , channel="casacinema" , language="IT" , category="F,S,A,VOS" , type="generic" , thumbnail=THUMBNAIL_REMOTE+"casacinema.png" )) itemlist.append( Item( title="CineBlog 01" , channel="cineblog01" , language="IT" , category="F,S,A,VOS,HD" , type="generic" )) #itemlist.append( Item( title="CineBlog01.FM" , channel="cineblogfm" , language="IT" , category="F,S" , type="generic" ,thumbnail=THUMBNAIL_REMOTE+"cineblogfm.png" )) itemlist.append( Item( title="Cinemagratis" , channel="cinemagratis" , language="IT" , category="F" , type="generic" ,thumbnail=THUMBNAIL_REMOTE+"cinemagratis.png")) #itemlist.append( Item( title="Cinestreaming01" , channel="cinestreaming01" , language="IT" , category="F" , type="generic" , extra="Series" ,thumbnail=THUMBNAIL_REMOTE+"cinestreaming01.png" )) itemlist.append( Item( title="Documentari Streaming" , channel="documentaristreaming" , language="IT" , category="D" , type="generic" ,thumbnail=THUMBNAIL_REMOTE+"documentaristreaming.png" )) itemlist.append( Item( title="Documoo" , channel="documoo" , language="IT" , category="D" , type="generic" ,thumbnail=THUMBNAIL_REMOTE+"documoo.png" )) itemlist.append( Item( title="Eurostreaming" , channel="eurostreaming" , language="IT" , category="F,S,A" , type="generic" ,thumbnail=THUMBNAIL_REMOTE+"eurostreaming.png")) itemlist.append( Item( title="Fastvideo.tv" , channel="fastvideotv" , language="IT" , category="F" , type="generic" ,thumbnail=THUMBNAIL_REMOTE+"fastvideotv.png")) itemlist.append( Item( title="FilmGratis.cc" , channel="filmgratiscc" , language="IT" , category="F" , type="generic" ,thumbnail=THUMBNAIL_REMOTE+"filmgratiscc.png" )) itemlist.append( Item( title="FilmStream.org" , channel="filmstream" , language="IT" , category="F,S" , type="generic" ,thumbnail=THUMBNAIL_REMOTE+"filmstream.png" )) itemlist.append( Item( title="FilmStream.to" , channel="filmstreampw" , language="IT" , category="F,S" , type="generic" ,thumbnail=THUMBNAIL_REMOTE+"filmstreampw.png" )) itemlist.append( Item( title="Film per tutti" , channel="filmpertutti" , language="IT" , category="F,S,A" , type="generic" )) itemlist.append( Item( title="Film Senza Limiti" , channel="filmsenzalimiti" , language="IT" , category="F" , type="generic" )) itemlist.append( Item( title="FilmSubito" , channel="filmsubitotv" , language="IT" , category="F,S,A,D" , type="generic" ,thumbnail=THUMBNAIL_REMOTE+"filmsubitotv.png" )) itemlist.append( Item( title="Guardaserie.net" , channel="guardaserie" , language="IT" , category="F,S,A" , type="generic" ,thumbnail=THUMBNAIL_REMOTE+"guardaserie.png" )) itemlist.append( Item( title="GuardareFilm" , channel="guardarefilm" , language="IT" , category="F,S,A" , type="generic" ,thumbnail=THUMBNAIL_REMOTE+"guardarefilm.png")) itemlist.append( Item( title="Hubberfilm" , channel="hubberfilm" , language="IT" , category="F,S,A" , type="generic" ,thumbnail=THUMBNAIL_REMOTE+"hubberfilm.png")) #itemlist.append( Item( title="ildocumento.it" , channel="ildocumento" , language="IT" , category="D" , type="generic" ,thumbnail=THUMBNAIL_REMOTE+"ildocumento.png" )) itemlist.append( Item( title="ItaFilm.tv" , channel="itafilmtv" , language="IT" , category="F,S,A,D" , type="generic" ,thumbnail=THUMBNAIL_REMOTE+"itafilmtv.png" )) itemlist.append( Item( title="Italia-Film.co" , channel="italiafilm" , language="IT" , category="F,S,A" , type="generic" ,thumbnail=THUMBNAIL_REMOTE+"italiafilm.png" )) itemlist.append( Item( title="Italian-Stream" , channel="italianstream" , language="IT" , category="F,S,HD,VOS" , type="generic" ,thumbnail=THUMBNAIL_REMOTE+"italianstream.png")) itemlist.append( Item( title="Italia Serie" , channel="italiaserie" , language="IT" , category="S,A" , type="generic" ,thumbnail=THUMBNAIL_REMOTE+"italiaserie.png")) itemlist.append( Item( title="ItaStreaming" , channel="itastreaming" , language="IT" , category="F" , type="generic", thumbnail=THUMBNAIL_REMOTE+"itastreaming.png")) itemlist.append( Item( title="LiberoITA" , channel="liberoita" , language="IT" , category="F" , type="generic" ,thumbnail=THUMBNAIL_REMOTE+"liberoita.png")) itemlist.append( Item( title="LiberoStreaming" , channel="liberostreaming" , language="IT" , category="F,S,A" , type="generic" ,thumbnail=THUMBNAIL_REMOTE+"liberostreaming.png")) itemlist.append( Item( title="Pianeta Streaming" , channel="pianetastreaming" , language="IT" , category="F" , type="generic" ,thumbnail=THUMBNAIL_REMOTE+"pianetastreaming.png" )) itemlist.append( Item( title="Pirate Streaming" , channel="piratestreaming" , language="IT" , category="F,S" , type="generic" )) #itemlist.append( Item( title="Serie HD" , channel="seriehd" , language="IT" , category="S" , type="generic" ,thumbnail=THUMBNAIL_REMOTE+"seriehd.png" )) itemlist.append( Item( title="Serie TV Sub ITA" , channel="serietvsubita" , language="IT" , category="S,VOS" , type="generic" , extra="Series" ,thumbnail=THUMBNAIL_REMOTE+"serietvsubita.png" )) itemlist.append( Item( title="StreamBlog" , channel="streamblog" , language="IT" , category="S,F,A" , type="generic" , extra="Series" ,thumbnail=THUMBNAIL_REMOTE+"streamblog.png" )) itemlist.append( Item( title="Streaming01" , channel="streaming01" , language="IT" , category="F" , type="generic" , extra="Series" ,thumbnail=THUMBNAIL_REMOTE+"streaming01.png" )) itemlist.append( Item( title="Tantifilm" , channel="tantifilm" , language="IT" , category="F, HD" , type="generic" ,thumbnail=THUMBNAIL_REMOTE+"tantifilm.png")) return itemlist
def filterchannels(category,preferred_thumb=""): logger.info("channelselector.filterchannels") channelslist =[] # Lee la lista de canales channel_path = os.path.join( config.get_runtime_path() , "channels" , '*.xml' ) logger.info("channelselector.filterchannels channel_path="+channel_path) channel_files = glob.glob(channel_path) logger.info("channelselector.filterchannels channel_files encontrados "+str(len(channel_files))) channel_language = config.get_setting("channel_language") logger.info("channelselector.filterchannels channel_language="+channel_language) if channel_language=="": channel_language = "all" logger.info("channelselector.filterchannels channel_language="+channel_language) for index, channel in enumerate(channel_files): logger.info("channelselector.filterchannels channel="+channel) if channel.endswith(".xml"): try: channel_parameters = channeltools.get_channel_parameters(channel[:-4]) logger.info("channelselector.filterchannels channel_parameters="+repr(channel_parameters)) # Si prefiere el bannermenu y el canal lo tiene, cambia ahora de idea if preferred_thumb=="bannermenu" and "bannermenu" in channel_parameters: channel_parameters["thumbnail"] = channel_parameters["bannermenu"] # Se salta el canal si no está activo if not channel_parameters["active"] == "true": continue # Se salta el canal para adultos si el modo adultos está desactivado if channel_parameters["adult"] == "true" and config.get_setting("adult_mode") != "true": continue # Se salta el canal si está en un idioma filtrado if channel_language!="all" and channel_parameters["language"]!=config.get_setting("channel_language"): continue # Se salta el canal si está en una categoria filtrado if category!="all" and category not in channel_parameters["categories"]: continue #Salta canales de deportes en listado all if category == "all" and "deportes" in channel_parameters["categories"]: if channel_parameters["channel"] != "update_sports": continue #Detiene la muúsica al entrar en Deportes if category == "deportes": import xbmc xbmc.executebuiltin('xbmc.PlayMedia(Stop)') print "si o que?" # Si ha llegado hasta aquí, lo añade channelslist.append(Item(title=channel_parameters["title"], channel=channel_parameters["channel"], action="mainlist", thumbnail=channel_parameters["thumbnail"] , fanart=channel_parameters["fanart"], category=", ".join(channel_parameters["categories"])[:-2], language=channel_parameters["language"], type=channel_parameters["type"] )) except: logger.info("Se ha producido un error al leer los datos del canal " + channel) import traceback logger.info(traceback.format_exc()) channelslist.sort(key=lambda item: item.title.lower().strip()) if category=="all": if config.get_setting("personalchannel5")=="true": channelslist.insert( 0 , Item( title=config.get_setting("personalchannelname5") ,action="mainlist", channel="personal5" ,thumbnail=config.get_setting("personalchannellogo5") , type="generic" )) if config.get_setting("personalchannel4")=="true": channelslist.insert( 0 , Item( title=config.get_setting("personalchannelname4") ,action="mainlist", channel="personal4" ,thumbnail=config.get_setting("personalchannellogo4") , type="generic" )) if config.get_setting("personalchannel3")=="true": channelslist.insert( 0 , Item( title=config.get_setting("personalchannelname3") ,action="mainlist", channel="personal3" ,thumbnail=config.get_setting("personalchannellogo3") , type="generic" )) if config.get_setting("personalchannel2")=="true": channelslist.insert( 0 , Item( title=config.get_setting("personalchannelname2") ,action="mainlist", channel="personal2" ,thumbnail=config.get_setting("personalchannellogo2") , type="generic" )) if config.get_setting("personalchannel")=="true": channelslist.insert( 0 , Item( title=config.get_setting("personalchannelname") ,action="mainlist", channel="personal" ,thumbnail=config.get_setting("personalchannellogo") , type="generic" )) channel_parameters = channeltools.get_channel_parameters("tengourl") # Si prefiere el bannermenu y el canal lo tiene, cambia ahora de idea if preferred_thumb=="bannermenu" and "bannermenu" in channel_parameters: channel_parameters["thumbnail"] = channel_parameters["bannermenu"] channelslist.insert( 0 , Item( title="Tengo una URL" ,action="mainlist", channel="tengourl" , thumbnail=channel_parameters["thumbnail"], type="generic" )) return channelslist
def findvid_film(item): logger.info("[cineblog01.py] findvideos") itemlist = [] # Descarga la página data = scrapertools.anti_cloudflare(item.url, headers) data = scrapertools.decodeHtmlentities(data) # Extract the quality format patronvideos = '>([^<]+)</strong></div>' matches = re.compile(patronvideos, re.DOTALL).finditer(data) QualityStr = "" for match in matches: QualityStr = scrapertools.unescape(match.group(1))[6:] # Extrae las entradas streaming = scrapertools.find_single_match(data, '<strong>Streaming:</strong>(.*?)<table height="30">') patron = '<td><a\s*href="([^"]+)"\s*target="_blank">([^<]+)</a></td>' matches = re.compile(patron, re.DOTALL).findall(streaming) for scrapedurl, scrapedtitle in matches: logger.debug("##### findvideos Streaming ## %s ## %s ##" % (scrapedurl, scrapedtitle)) title = "[COLOR orange]Streaming:[/COLOR] " + item.title + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]" itemlist.append( Item(channel=__channel__, action="play", title=title, url=scrapedurl, fulltitle=item.fulltitle, thumbnail=item.thumbnail, show=item.show, folder=False)) streaming_hd = scrapertools.find_single_match(data, '<strong>Streaming HD[^<]+</strong>(.*?)<table height="30">') patron = '<td><a\s*href="([^"]+)"\s*target="_blank">([^<]+)</a></td>' matches = re.compile(patron, re.DOTALL).findall(streaming_hd) for scrapedurl, scrapedtitle in matches: logger.debug("##### findvideos Streaming HD ## %s ## %s ##" % (scrapedurl, scrapedtitle)) title = "[COLOR yellow]Streaming HD:[/COLOR] " + item.title + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]" itemlist.append( Item(channel=__channel__, action="play", title=title, url=scrapedurl, fulltitle=item.fulltitle, thumbnail=item.thumbnail, show=item.show, folder=False)) streaming_3D = scrapertools.find_single_match(data, '<strong>Streaming 3D[^<]+</strong>(.*?)<table height="30">') patron = '<td><a\s*href="([^"]+)"\s*target="_blank">([^<]+)</a></td>' matches = re.compile(patron, re.DOTALL).findall(streaming_3D) for scrapedurl, scrapedtitle in matches: logger.debug("##### findvideos Streaming 3D ## %s ## %s ##" % (scrapedurl, scrapedtitle)) title = "[COLOR pink]Streaming 3D:[/COLOR] " + item.title + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]" itemlist.append( Item(channel=__channel__, action="play", title=title, url=scrapedurl, fulltitle=item.fulltitle, thumbnail=item.thumbnail, show=item.show, folder=False)) download = scrapertools.find_single_match(data, '<strong>Download:</strong>(.*?)<table height="30">') patron = '<td><a\s*href="([^"]+)"\s*target="_blank">([^<]+)</a></td>' matches = re.compile(patron, re.DOTALL).findall(download) for scrapedurl, scrapedtitle in matches: logger.debug("##### findvideos Download ## %s ## %s ##" % (scrapedurl, scrapedtitle)) title = "[COLOR aqua]Download:[/COLOR] " + item.title + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]" itemlist.append( Item(channel=__channel__, action="play", title=title, url=scrapedurl, fulltitle=item.fulltitle, thumbnail=item.thumbnail, show=item.show, folder=False)) download_hd = scrapertools.find_single_match(data, '<strong>Download HD[^<]+</strong>(.*?)<table width="100%" height="20">') patron = '<td><a\s*href="([^"]+)"\s*target="_blank">([^<]+)</a></td>' matches = re.compile(patron, re.DOTALL).findall(download_hd) for scrapedurl, scrapedtitle in matches: logger.debug("##### findvideos Download HD ## %s ## %s ##" % (scrapedurl, scrapedtitle)) title = "[COLOR azure]Download HD:[/COLOR] " + item.title + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]" itemlist.append( Item(channel=__channel__, action="play", title=title, url=scrapedurl, fulltitle=item.fulltitle, thumbnail=item.thumbnail, show=item.show, folder=False)) if len(itemlist) == 0: itemlist = servertools.find_video_items(item=item) return itemlist
def gay(item): logger.info("[filesmonster_catalogue.py] mainlist") itemlist = [] itemlist.append( Item( channel=__channel__, action="lista_categoria", title="gay películas completas", url= "http://filesmonster.filesdl.net/posts/category/27/gay_full-length-films", thumbnail= "http://photosex.biz/imager/w_400/h_500/a1acc8a1f273ac8bfae2130802e21188.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="gays secuencias", url="http://filesmonster.filesdl.net/posts/category/29/gays", thumbnail= "http://photosex.biz/imager/w_400/h_500/296f152596d45be35c23b14a54271735.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="gays solo", url="http://filesmonster.filesdl.net/posts/category/61/gay-solo", thumbnail= "http://photosex.biz/imager/w_400/h_500/947ac329d696c7a152f8e7be48eb1a71.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="gay 3D stereo", url= "http://filesmonster.filesdl.net/posts/category/57/gay-3d-stereo", thumbnail= "http://photosex.biz/imager/w_400/h_500/bbf39e2cfc6d773ee5aaa9d99be8a090.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="gay asian", url="http://filesmonster.filesdl.net/posts/category/25/gay-asian", thumbnail= "http://photosex.biz/imager/w_400/h_500/b6fcd3980304cc67bd7d794c46ef7200.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="gay BDSM", url="http://filesmonster.filesdl.net/posts/category/26/gay-bdsm", thumbnail= "http://photosex.biz/imager/w_400/h_500/db31625ff8b6055d406405ff9abdcbf9.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="lesbians", url="http://filesmonster.filesdl.net/posts/category/36/lesbians", thumbnail= "http://photosex.biz/imager/w_400/h_500/769c05b9ae6bfa6c082924a759d66bc3.jpg" )) return itemlist
def peliculas(item): logger.info("[cineblog01.py] mainlist") itemlist = [] if item.url == "": item.url = sito # Descarga la página data = scrapertools.anti_cloudflare(item.url, headers) # Extrae las entradas (carpetas) patronvideos = '<div class="span4".*?<a.*?<p><img src="([^"]+)".*?' patronvideos += '<div class="span8">.*?<a href="([^"]+)"> <h1>([^"]+)</h1></a>.*?' patronvideos += '<strong>([^<]*)</strong>.*?<br />([^<+]+)' matches = re.compile(patronvideos, re.DOTALL).finditer(data) for match in matches: scrapedtitle = scrapertools.unescape(match.group(3)) scrapedurl = urlparse.urljoin(item.url, match.group(2)) scrapedthumbnail = urlparse.urljoin(item.url, match.group(1)) scrapedthumbnail = scrapedthumbnail.replace(" ", "%20") scrapedplot = scrapertools.unescape("[COLOR orange]" + match.group(4) + "[/COLOR]\n" + match.group(5).strip()) scrapedplot = scrapertools.htmlclean(scrapedplot).strip() if DEBUG: logger.info( "title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") itemlist.append(infoSod( Item(channel=__channel__, action="findvideos", fulltitle=scrapedtitle, show=scrapedtitle, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, extra=item.extra, viewmode="movie_with_plot"), tipo='movie')) # Next page mark try: bloque = scrapertools.get_match(data, "<div id='wp_page_numbers'>(.*?)</div>") patronvideos = '<a href="([^"]+)">></a></li>' matches = re.compile(patronvideos, re.DOTALL).findall(bloque) scrapertools.printMatches(matches) if len(matches) > 0: scrapedtitle = "[COLOR orange]Successivo>>[/COLOR]" scrapedurl = matches[0] scrapedthumbnail = "" scrapedplot = "" if (DEBUG): logger.info( "title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") itemlist.append( Item(channel=__channel__, action="HomePage", title="[COLOR yellow]Torna Home[/COLOR]", folder=True)), itemlist.append( Item(channel=__channel__, action="peliculas", title=scrapedtitle, url=scrapedurl, thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/vari/successivo_P.png", extra=item.extra, plot=scrapedplot)) except: pass return itemlist
def hetero(item): logger.info("[filesmonster_catalogue.py] mainlist") itemlist = [] itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero películas completas", url= "http://filesmonster.filesdl.net/posts/category/24/full-length-films", thumbnail= "http://photosex.biz/imager/w_400/h_500/1325b1c8b815adc9284102d794264452.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero 3D stereo", url="http://filesmonster.filesdl.net/posts/category/59/3d-stereo", thumbnail= "http://photosex.biz/imager/w_400/h_500/10055decd738171dae2a6a28345472dd.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero comic 3D", url="http://filesmonster.filesdl.net/posts/category/1/3d-porno", thumbnail= "http://photosex.biz/imager/w_400/h_500/941ebaa6185e37357c6156f602413ada.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero amateurish", url="http://filesmonster.filesdl.net/posts/category/2/amateurish", thumbnail= "http://photosex.biz/imager/w_400/h_500/a91255afe5b24291371049d92c9cdab5.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero anal", url="http://filesmonster.filesdl.net/posts/category/3/anal", thumbnail= "http://photosex.biz/imager/w_400/h_500/0a23d1fe93f54d82f2d28024861520a4.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero anime & hentai", url= "http://filesmonster.filesdl.net/posts/category/4/anime-and-hentai", thumbnail= "http://photosex.biz/imager/w_400/h_500/af4fb300f92b1105df4c07ad9696f766.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero BBW", url="http://filesmonster.filesdl.net/posts/category/5/bbw", thumbnail= "http://photosex.biz/imager/w_400/h_500/6bcfac1425b8de6a58ee286b46258748.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero BDSM", url="http://filesmonster.filesdl.net/posts/category/6/bdsm", thumbnail= "http://photosex.biz/imager/w_400/h_500/70bc7c96aa53462041c7d49e399f432a.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero Big boobs", url="http://filesmonster.filesdl.net/posts/category/7/big-boobs", thumbnail= "http://photosex.biz/imager/w_400/h_500/9191bc193317636437a4cf455ec965fe.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero Black", url="http://filesmonster.filesdl.net/posts/category/9/black", thumbnail= "http://photosex.biz/imager/w_400/h_500/0623dffa59ef4fe0e0a1d33aa9ab29bd.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero Blondes", url="http://filesmonster.filesdl.net/posts/category/10/blondes", thumbnail= "http://photosex.biz/imager/w_400/h_500/034bee14caa0e900c89ae398fcbf3b84.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero Bukkake", url="http://filesmonster.filesdl.net/posts/category/11/bukkake", thumbnail= "http://photosex.biz/imager/w_400/h_500/816de59d8bdc8d53c44389358497a653.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero cartoons", url="http://filesmonster.filesdl.net/posts/category/11/bukkake", thumbnail= "http://photosex.biz/imager/w_400/h_500/ad7d50d20e160ffadce73409cbd1487f.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero Classic sex", url="http://filesmonster.filesdl.net/posts/category/15/classic-sex", thumbnail= "http://photosex.biz/imager/w_400/h_500/4bc7e7d2dc93151d5a57d7104c9147fd.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero Erotic & Softcore", url= "http://filesmonster.filesdl.net/posts/category/18/erotic-and-softcore", thumbnail= "http://photosex.biz/imager/w_400/h_500/90007536f2c2b512814f8769bb111f3e.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero Erotic games", url= "http://filesmonster.filesdl.net/posts/category/19/erotic-games", thumbnail= "http://photosex.biz/imager/w_400/h_500/94c6115340d1a8ae59dacd2a77764f11.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero Celebrities", url="http://filesmonster.filesdl.net/posts/category/13/celebrities", thumbnail= "http://photosex.biz/imager/w_400/h_500/56f91731abb52581079e0a16b1c39c2b.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero Extremals", url="http://filesmonster.filesdl.net/posts/category/20/extremals", thumbnail= "http://photosex.biz/imager/w_400/h_500/04d7dc6ffec80a5f62b1a177bdd654f9.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero Fisting and dildos", url= "http://filesmonster.filesdl.net/posts/category/22/fisting-and-d***o", thumbnail= "http://photosex.biz/imager/w_400/h_500/219460b181092604f5def92218438b49.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero Gonzo (point of view)", url= "http://filesmonster.filesdl.net/posts/category/30/gonzo-point-of-view", thumbnail= "http://photosex.biz/imager/w_400/h_500/6afe15689146c36584d860454f214958.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero HD Clips", url="http://filesmonster.filesdl.net/posts/category/32/hd-clips", thumbnail= "http://photosex.biz/imager/w_400/h_500/f686c6debbd6ae955759dd4a19b01dd2.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero Hairy", url="http://filesmonster.filesdl.net/posts/category/31/hairy", thumbnail= "http://photosex.biz/imager/w_400/h_500/635c8d55149d7e605266d13b44c0878d.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero Hidden Camera", url= "http://filesmonster.filesdl.net/posts/category/33/hidden-camera", thumbnail= "http://photosex.biz/imager/w_400/h_500/e6917ab0a62ec31a731d6f25e463e40a.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero Incest", url="http://filesmonster.filesdl.net/posts/category/34/incest", thumbnail= "http://photosex.biz/imager/w_400/h_500/2c4d105d75997341fc6e0afd57eef356.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero Interracial", url="http://filesmonster.filesdl.net/posts/category/56/interracial", thumbnail= "http://photosex.biz/imager/w_400/h_500/4fde1e749b6d61873df92f0e3abe92ed.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero Latino", url="http://filesmonster.filesdl.net/posts/category/35/latino", thumbnail= "http://photosex.biz/imager/w_400/h_500/813b0664b0fe11428cb8c2f8b009ac7b.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero Masturbation", url= "http://filesmonster.filesdl.net/posts/category/38/masturbation", thumbnail= "http://photosex.biz/imager/w_400/h_500/712dcd4f440eb1d0e1b133d871043110.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero Mature MILF", url="http://filesmonster.filesdl.net/posts/category/39/mature-milf", thumbnail= "http://photosex.biz/imager/w_400/h_500/b7880db52c6e2e37d67b4a2e6f460af6.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero old and young", url= "http://filesmonster.filesdl.net/posts/category/64/old-and-young", thumbnail= "http://photosex.biz/imager/w_400/h_500/d8c0e036d6f45cf8c8ad3928c5421c2e.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero Oral", url="http://filesmonster.filesdl.net/posts/category/40/oral", thumbnail= "http://photosex.biz/imager/w_400/h_500/ba52125bd04ed774ebb7595a0442e573.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero Orgies", url="http://filesmonster.filesdl.net/posts/category/41/orgies", thumbnail= "http://photosex.biz/imager/w_400/h_500/77c340bcca3e8b8004b74e8fa7bea399.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero Public sex", url="http://filesmonster.filesdl.net/posts/category/46/public-sex", thumbnail= "http://photosex.biz/imager/w_400/h_500/4a84ad103e455207c6c5b802059ae403.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero Retro", url="http://filesmonster.filesdl.net/posts/category/47/retro", thumbnail= "http://photosex.biz/imager/w_400/h_500/746e0d35e98a08e78f55fc3e563e207c.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero Russian", url="http://filesmonster.filesdl.net/posts/category/48/russian", thumbnail= "http://photosex.biz/imager/w_400/h_500/7accc2b596dd5585cfacba1075151b72.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero Teens", url="http://filesmonster.filesdl.net/posts/category/51/teens", thumbnail= "http://photosex.biz/imager/w_400/h_500/a3461fcab14e6afb675a7baf6fb322bc.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero Threesome", url="http://filesmonster.filesdl.net/posts/category/65/threesome", thumbnail= "http://photosex.biz/imager/w_400/h_500/273e59c21ffc9d05a2133b29749efab8.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero Uncensored asian", url= "http://filesmonster.filesdl.net/posts/category/53/uncensored-asian", thumbnail= "http://photosex.biz/imager/w_400/h_500/25a5c2c40cdc625dae96075423a6f2d4.jpg" )) itemlist.append( Item( channel=__channel__, action="lista_categoria", title="hetero old and young", url= "http://filesmonster.filesdl.net/posts/category/64/old-and-young", thumbnail= "http://photosex.biz/imager/w_400/h_500/d8c0e036d6f45cf8c8ad3928c5421c2e.jpg" )) return itemlist
def detail_2(item): logger.info() itemlist = [] # descarga la pagina data = scrapertools.downloadpageGzip(item.url) data = data.split( '<span class="filesmonsterdlbutton">Download from Filesmonster</span>') data = data[0] # descubre la url patronvideos = 'href="http://filesmonster.com/download.php(.*?)".(.*?)' matches = re.compile(patronvideos, re.DOTALL).findall(data) for match2 in matches: url = "http://filesmonster.com/download.php" + match2[0] title = "Archivo %d: %s [filesmonster]" % (len(itemlist) + 1, item.fulltitle) itemlist.append( Item(channel=item.channel, action="play", server="filesmonster", title=title, fulltitle=item.fulltitle, url=url, thumbnail=item.thumbnail, folder=False)) itemlist.append( Item(channel=item.channel, action="anadir_favorito", title="(+) Añadir el vídeo a tus favoritos en filesmonster", url=match2[0], thumbnail=item.thumbnail, plot="el archivo", folder=True)) itemlist.append(Item(channel=item.channel, title="")) patronvideos = '["|\'](http\://filesmonster.com/folders.php\?[^"\']+)["|\']' matches = re.compile(patronvideos, re.DOTALL).findall(data) for url in matches: if not url == item.url: logger.info(url) logger.info(item.url) title = "Carpeta %d: %s [filesmonster]" % (len(itemlist) + 1, item.fulltitle) itemlist.append( Item(channel=item.channel, action="detail", title=title, fulltitle=item.fulltitle, url=url, thumbnail=item.thumbnail, folder=True)) itemlist.append( Item(channel=item.channel, action="anadir_favorito", title= "(+) Añadir la carpeta a tus favoritos en filesmonster", url=url, thumbnail=item.thumbnail, plot="la carpeta", folder=True)) itemlist.append(Item(channel=item.channel, title="")) return itemlist