def listas(item): logger.info() # Para añadir listas a la videoteca en carpeta CINE itemlist = [] data = anti_cloudflare(item.url, host=host, headers=headers) data = jsontools.load(data) head = header_string + get_cookie_value() for child in data["sortedRepoChilds"]: infolabels = {} # Fanart if child['hashBackground']: fanart = host + "/json/repo/%s/background.jpg" % child["id"] else: fanart = host + "/json/repo/%s/thumbnail.jpg" % child["id"] # Thumbnail if child['hasPoster']: thumbnail = host + "/json/repo/%s/poster.jpg" % child["id"] else: thumbnail = fanart thumbnail += head fanart += head url = host + "/json/repo/%s/index.json" % child["id"] if child['name'] == "": title = scrapertools.slugify(child['id'].rsplit(".", 1)[0]) else: title = scrapertools.slugify(child['name']) title = title.replace('-', ' ').replace('_', ' ').capitalize() infolabels['title'] = title try: from core import videolibrarytools new_item = item.clone(title=title, url=url, fulltitle=title, fanart=fanart, extra="findvideos", thumbnail=thumbnail, infoLabels=infolabels, category="Cine") videolibrarytools.library.add_movie(new_item) error = False except: error = True import traceback logger.error(traceback.format_exc()) if not error: itemlist.append( Item(channel=item.channel, title='Lista añadida correctamente a la videoteca', action="", folder=False)) else: itemlist.append( Item( channel=item.channel, title='ERROR. Han ocurrido uno o varios errores en el proceso', action="", folder=False)) return itemlist
def listas(item): logger.info("pelisalacarta.channels.tvvip listas") # Para añadir listas a la biblioteca en carpeta CINE itemlist = [] data = scrapertools.anti_cloudflare(item.url, host=host, headers=headers) data = jsontools.load_json(data) head = header_string + get_cookie_value() for child in data["sortedRepoChilds"]: infolabels = {} infolabels['plot'] = child['description'] infolabels['year'] = data['year'] if child['tags']: infolabels['genre'] = ', '.join([x.strip() for x in child['tags']]) infolabels['rating'] = child['rate'].replace(',', '.') infolabels['votes'] = child['rateCount'] infolabels['duration'] = child['duration'] if child['cast']: infolabels['cast'] = child['cast'].split(",") infolabels['director'] = child['director'] # Fanart if child['hashBackground']: fanart = "http://tv-vip.com/json/repo/%s/background.jpg" % child["id"] else: fanart = "http://tv-vip.com/json/repo/%s/thumbnail.jpg" % child["id"] # Thumbnail if child['hasPoster']: thumbnail = "http://tv-vip.com/json/repo/%s/poster.jpg" % child["id"] else: thumbnail = fanart thumbnail += head fanart += head url = "http://tv-vip.com/json/repo/%s/index.json" % child["id"] if child['name'] == "": title = scrapertools.slugify(child['id'].rsplit(".", 1)[0]) else: title = scrapertools.slugify(child['name']) title = title.replace('-', ' ').replace('_', ' ').capitalize() infolabels['title'] = title try: from platformcode import library new_item = item.clone(title=title, url=url, fulltitle=title, fanart=fanart, thumbnail=thumbnail, infoLabels=infolabels, category="Cine") library.save_library_movie(new_item) error = False except: error = True pass if not error: itemlist.append(Item(channel=item.channel, title='Lista añadida correctamente a la biblioteca', action="", folder=False)) else: itemlist.append(Item(channel=item.channel, title='ERROR. Han ocurrido uno o varios errores en el proceso', action="", folder=False)) return itemlist
def listas(item): logger.info() # Para añadir listas a la videoteca en carpeta CINE itemlist = [] data = httptools.downloadpage(item.url).data data = jsontools.load(data) for child in data.get("a", []): infolabels = {} # Fanart fanart = host % "movie/%s/background_1080.jpg" % child["id"] thumbnail = host % "movie/%s/poster_167x250.jpg" % child["id"] url = host % "movie/%s/movie.js" % child["id"] if child['name'] == "": title = scrapertools.slugify(child['id'].rsplit(".", 1)[0]) else: title = scrapertools.slugify(child['name']) title = title.replace('-', ' ').replace('_', ' ') title = unicode(title, "utf-8").capitalize().encode("utf-8") infolabels['title'] = title try: from core import videolibrarytools thumbnail += "|User-Agent=%s" % httptools.get_user_agent new_item = item.clone(title=title, url=url, fulltitle=title, fanart=fanart, extra="findvideos", thumbnail=thumbnail, infoLabels=infolabels, category="Cine") videolibrarytools.add_movie(new_item) error = False except: error = True import traceback logger.error(traceback.format_exc()) if not error: itemlist.append( Item(channel=item.channel, title='Lista añadida correctamente a la videoteca', action="")) else: itemlist.append( Item( channel=item.channel, title='ERROR. Han ocurrido uno o varios errores en el proceso', action="")) return itemlist
def listas(item): logger.info("pelisalacarta.channels.tvvip listas") # Para añadir listas a la biblioteca en carpeta CINE itemlist = [] data = scrapertools.anti_cloudflare(item.url, host=host, headers=headers) data = jsontools.load_json(data) head = header_string + get_cookie_value() for child in data["sortedRepoChilds"]: infolabels = {} # Fanart if child['hashBackground']: fanart = "http://tv-vip.com/json/repo/%s/background.jpg" % child["id"] else: fanart = "http://tv-vip.com/json/repo/%s/thumbnail.jpg" % child["id"] # Thumbnail if child['hasPoster']: thumbnail = "http://tv-vip.com/json/repo/%s/poster.jpg" % child["id"] else: thumbnail = fanart thumbnail += head fanart += head url = "http://tv-vip.com/json/repo/%s/index.json" % child["id"] if child['name'] == "": title = scrapertools.slugify(child['id'].rsplit(".", 1)[0]) else: title = scrapertools.slugify(child['name']) title = title.replace('-', ' ').replace('_', ' ').capitalize() infolabels['title'] = title try: from core import library new_item = item.clone(title=title, url=url, fulltitle=title, fanart=fanart, extra="findvideos", thumbnail=thumbnail, infoLabels=infolabels, category="Cine") library.add_pelicula_to_library(new_item) error = False except: error = True import traceback logger.info(traceback.format_exc()) if not error: itemlist.append(Item(channel=item.channel, title='Lista añadida correctamente a la biblioteca', action="", folder=False)) else: itemlist.append(Item(channel=item.channel, title='ERROR. Han ocurrido uno o varios errores en el proceso', action="", folder=False)) return itemlist
def series(item): logger.info("[mitele.py] series") itemlist = [] # Extrae los programas data = scrapertools.downloadpage("http://cdn-search-mediaset.carbyne.ps.ooyala.com/search/v1/full/providers/104951/mini?q={%22types%22:%22tv_series%22,%22genres%22:[%22_ca_"+item.extra+"%22],%22page_size%22:%221000%22,%22product_id%22:[%22Free_Web%22,%22Free_Web_Mobile%22,%22Register_Web%22,%22Free_Live_Web%22,%22Register_Live_Web%22]}&format=full&size=200&include_titles=Series,Season&&product_name=test&format=full", headers=GLOBAL_HEADERS) data = jsontools.load_json(data)["hits"]["hits"] for child in data: child = child["_source"] emision = "No" for additional_metadata in child["additional_metadata"]: if additional_metadata["key"]=="categoria_principal": category_slug = additional_metadata["value"] if additional_metadata["key"]=="en_emision": emision = additional_metadata["value"] title = child["localizable_titles"][0]["title_long"] emision = child["additional_metadata"][2]["value"] if emision == "Si": title += "[COLOR red] [En emisión][/COLOR]" try: thumbnail = child["images"][0]["url"] except: try: thumbnail = child["thumbnail"]["url"] except: thumbnail = "" try: fanart = child["images"][1]["url"] except: fanart = "" try: plot = child["localizable_titles"][0]["summary_long"] except: plot = "" if category_slug=="_ca_series": category_slug="_ca_series-online" elif category_slug=="_ca_programas": category_slug="_ca_programas-tv" clean_title = re.compile("\[COLOR.*?\[\/COLOR\]",re.DOTALL).sub("",title) clean_title = scrapertools.slugify(clean_title) page = "https://www.mitele.es/"+category_slug[4:]+"/"+clean_title+"/"+child["external_id"] uid = "https://www.mitele.es/"+category_slug[4:]+"/"+clean_title logger.info("page="+page) url = child["external_id"] itemlist.append( Item(channel=__channel__, action="temporadas" , title=title, fulltitle=title, url=url, thumbnail=thumbnail, plot=plot, show=title, category=item.category, page=page, uid=uid, fanart=fanart)) itemlist.sort(key=lambda item: item.title) return itemlist
def generos(item): logger.info("pelisalacarta.channels.oranline generos") itemlist = [] genres = {'Deporte': '3/Sports%20Film.jpg', 'Película de la televisión': '3/Tv%20Movie.jpg', 'Estrenos de cine': '0/New%20Releases.png', 'Estrenos dvd y hd': '0/HDDVD%20Bluray.png'} # Descarga la página data = scrapertools.downloadpage(item.url) bloque = scrapertools.find_single_match(data, '<div class="sub_title">Géneros</div>(.*?)</ul>') # Extrae las entradas patron = '<li><a href="([^"]+)".*?<i>(.*?)</i>.*?<b>(.*?)</b>' matches = scrapertools.find_multiple_matches(bloque, patron) for scrapedurl, scrapedtitle, cuantas in matches: scrapedtitle = scrapedtitle.strip().capitalize() title = scrapedtitle + " (" + cuantas + ")" name_thumb = scrapertools.slugify(scrapedtitle) if scrapedtitle == "Foreign" or scrapedtitle == "Suspense" or scrapedtitle == "Thriller": thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/2/%s.jpg" \ % name_thumb.capitalize() elif scrapedtitle in genres: thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/%s" \ % genres[scrapedtitle] else: thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/1/%s.jpg" \ % name_thumb.replace("-", "%20") if DEBUG: logger.info("title=[{0}], url=[{1}], thumbnail=[{2}]".format(title, scrapedurl, thumbnail)) itemlist.append(item.clone(action="peliculas", title=title, url=scrapedurl, thumbnail=thumbnail, text_color=color2)) return itemlist
def savebookmark( canal=CHANNELNAME, titulo="", url="", thumbnail="", server="", plot="", fulltitle="", savepath=BOOKMARK_PATH ): logger.info("[favoritos.py] savebookmark(path=" + savepath + ")") # Crea el directorio de favoritos si no existe if not usingsamba(savepath): try: os.mkdir(savepath) except: pass # Lee todos los ficheros if usingsamba(savepath): ficheros = samba.get_files(savepath) else: ficheros = os.listdir(savepath) ficheros.sort() # Averigua el último número if len(ficheros) > 0: # XRJ: Linea problemática, sustituida por el bucle siguiente # filenumber = int( ficheros[len(ficheros)-1][0:-4] )+1 filenumber = 1 for fichero in ficheros: logger.info("[favoritos.py] fichero=" + fichero) try: tmpfilenumber = int(fichero[0:8]) + 1 if tmpfilenumber > filenumber: filenumber = tmpfilenumber except: pass else: filenumber = 1 # Genera el contenido filecontent = "" filecontent = filecontent + urllib.quote_plus(downloadtools.limpia_nombre_excepto_1(titulo)) + "\n" filecontent = filecontent + urllib.quote_plus(url) + "\n" filecontent = filecontent + urllib.quote_plus(thumbnail) + "\n" filecontent = filecontent + urllib.quote_plus(server) + "\n" filecontent = filecontent + urllib.quote_plus(downloadtools.limpia_nombre_excepto_1(plot)) + "\n" filecontent = filecontent + urllib.quote_plus(fulltitle) + "\n" filecontent = filecontent + urllib.quote_plus(canal) + "\n" # Genera el nombre de fichero from core import scrapertools filename = "%08d-%s.txt" % (filenumber, scrapertools.slugify(fulltitle)) logger.info("[favoritos.py] savebookmark filename=" + filename) # Graba el fichero if not usingsamba(savepath): fullfilename = os.path.join(savepath, filename) bookmarkfile = open(fullfilename, "w") bookmarkfile.write(filecontent) bookmarkfile.flush() bookmarkfile.close() else: samba.write_file(filename, filecontent, savepath)
def savebookmark(canal=CHANNELNAME, titulo="", url="", thumbnail="", server="", plot="", fulltitle="", savepath=BOOKMARK_PATH): logger.info("streamondemand.channels.favoritos savebookmark(path="+savepath+")") # Crea el directorio de favoritos si no existe if not samba.usingsamba(savepath): try: os.mkdir(savepath) except: pass # Lee todos los ficheros if samba.usingsamba(savepath): ficheros = samba.get_files(savepath) else: ficheros = os.listdir(savepath) ficheros.sort() # Averigua el último número if len(ficheros) > 0: # XRJ: Linea problemática, sustituida por el bucle siguiente # filenumber = int( ficheros[len(ficheros)-1][0:-4] )+1 filenumber = 1 for fichero in ficheros: logger.info("streamondemand.channels.favoritos fichero="+fichero) try: tmpfilenumber = int(fichero[0:8])+1 if tmpfilenumber > filenumber: filenumber = tmpfilenumber except: pass else: filenumber = 1 # Genera el contenido filecontent = "" filecontent = filecontent + urllib.quote_plus(titulo)+'\n' filecontent = filecontent + urllib.quote_plus(url)+'\n' filecontent = filecontent + urllib.quote_plus(thumbnail)+'\n' filecontent = filecontent + urllib.quote_plus(server)+'\n' filecontent = filecontent + urllib.quote_plus(plot)+'\n' filecontent = filecontent + urllib.quote_plus(fulltitle)+'\n' filecontent = filecontent + urllib.quote_plus(canal)+'\n' # Genera el nombre de fichero from core import scrapertools filename = '%08d-%s.txt' % (filenumber, scrapertools.slugify(fulltitle)) logger.info("streamondemand.channels.favoritos savebookmark filename="+filename) # Graba el fichero if not samba.usingsamba(savepath): fullfilename = os.path.join(savepath, filename) bookmarkfile = open(fullfilename, "w") bookmarkfile.write(filecontent) bookmarkfile.flush() bookmarkfile.close() else: samba.store_File(filename, filecontent, savepath)
def search(item, texto): logger.info("pelisalacarta.sipeliculas search") itemlist = [] texto = scrapertools.slugify(texto) item.url = "http://www.sipeliculas.com/ver/" + texto item.extra = "" itemlist.extend(lista1(item)) return itemlist
def search(item,texto): logger.info("pelisalacarta.sipeliculas search") itemlist = [] texto = scrapertools.slugify(texto) item.url="http://www.sipeliculas.com/ver/"+texto item.extra = "" itemlist.extend(lista1(item)) return itemlist
def listas(item): logger.info() # Para añadir listas a la biblioteca en carpeta CINE itemlist = [] data = httptools.downloadpage(item.url).data data = jsontools.load_json(data) for child in data.get("a", []): infolabels = {} # Fanart fanart = host % "movie/%s/background_1080.jpg" % child["id"] thumbnail = host % "movie/%s/poster_167x250.jpg" % child["id"] url = host % "movie/%s/movie.js" % child["id"] if child['name'] == "": title = scrapertools.slugify(child['id'].rsplit(".", 1)[0]) else: title = scrapertools.slugify(child['name']) title = title.replace('-', ' ').replace('_', ' ') title = unicode(title, "utf-8").capitalize().encode("utf-8") infolabels['title'] = title try: from core import library new_item = item.clone(title=title, url=url, fulltitle=title, fanart=fanart, extra="findvideos", thumbnail=thumbnail, infoLabels=infolabels, category="Cine") library.add_pelicula_to_library(new_item) error = False except: error = True import traceback logger.error(traceback.format_exc()) if not error: itemlist.append(Item(channel=item.channel, title='Lista añadida correctamente a la biblioteca', action="")) else: itemlist.append(Item(channel=item.channel, title='ERROR. Han ocurrido uno o varios errores en el proceso', action="")) return itemlist
def categorias(item): logger.info() itemlist = [] soup = get_source(item.url, soup=True).find('div', class_='filter-genres') matches= soup.find_all('li') for elem in matches: # id = elem.input['value'] # post = "action=action_filter&number=10&paged=1&genre[]=%s" %id title = elem.text.strip() name = scrapertools.slugify(title).lower() url = "%s/category/%s" %(host, name.decode("utf8")) itemlist.append(item.clone(action="lista", title=title, url=url, thumbnail=get_thumb("movies", auto=True)) ) return itemlist
def search(item, text): logger.info() itemlist = [] if text != '': try: text = scrapertools.slugify(text) text = text.replace('-', '+') item.url += text item.param = "search" return list_all(item) except: for line in sys.exc_info(): logger.error("%s" % line) return itemlist
def search(item, text): logger.info() itemlist = [] if text: try: text = scrapertools.slugify(text) text = text.replace('-', '%20') item.url = '{}/buscar/{}/40'.format(apihost,text) item.list_type = 'buscar' return list_all(item) except: for line in sys.exc_info(): logger.error("%s" % line) return itemlist
def search(item, text): logger.info() itemlist = [] if text: try: text = scrapertools.slugify(text) text = text.replace('-', '%20') item.url = '{}/buscar/{}/40'.format(apihost, text) item.list_type = 'buscar' return list_all(item) except Exception: import traceback logger.error(traceback.format_exc()) return itemlist
def GuardarDescarga(item, Ruta=DOWNLOAD_LIST_PATH): logger.info("[descargas.py] GuardarDescarga") if usingsamba(Ruta): ficheros = samba.get_files(Ruta) else: ficheros = os.listdir(Ruta) ficheros.sort() # Averigua el último número if len(ficheros) > 0: # XRJ: Linea problemática, sustituida por el bucle siguiente #filenumber = int( ficheros[len(ficheros)-1][0:-4] )+1 filenumber = 1 for fichero in ficheros: logger.info("[favoritos.py] fichero=" + fichero) try: tmpfilenumber = int(fichero[0:8]) + 1 if tmpfilenumber > filenumber: filenumber = tmpfilenumber except: pass else: filenumber = 1 # Genera el nombre de fichero from core import scrapertools filename = '%08d-%s.txt' % (filenumber, scrapertools.slugify(item.title)) fullfilename = os.path.join(Ruta, filename) logger.info("[descargas.py] GuardarDescarga filename=" + fullfilename) # Genera el contenido if Ruta == DOWNLOAD_LIST_PATH: item.category = "pendientes" if Ruta == ERROR_PATH: item.category = "errores" item.channel = "descargas" item.extra = fullfilename item.folder = False filecontent = item.serialize() # Graba el fichero if not usingsamba(Ruta): bookmarkfile = open(fullfilename.decode("utf-8"), "w") bookmarkfile.write(filecontent) bookmarkfile.flush() bookmarkfile.close() else: samba.write_file(filename, filecontent, Ruta)
def GuardarFavorito(item, Ruta=BOOKMARK_PATH): logger.info("[favoritos.py] GuardarFavorito") # Lee todos los ficheros if usingsamba(Ruta): ficheros = samba.get_files(Ruta) else: ficheros = os.listdir(Ruta) ficheros.sort() # Averigua el último número if len(ficheros) > 0: # XRJ: Linea problemática, sustituida por el bucle siguiente #filenumber = int( ficheros[len(ficheros)-1][0:-4] )+1 filenumber = 1 for fichero in ficheros: logger.info("[favoritos.py] fichero=" + fichero) try: tmpfilenumber = int(fichero[0:8]) + 1 if tmpfilenumber > filenumber: filenumber = tmpfilenumber except: pass else: filenumber = 1 # Genera el nombre de fichero from core import scrapertools filename = '%08d-%s.txt' % (filenumber, scrapertools.slugify(item.title)) fullfilename = os.path.join(Ruta, filename) logger.info("[favoritos.py] savebookmark filename=" + filename) # Genera el contenido item.channel = "favoritos" item.extra = fullfilename filecontent = item.serialize() item.folder = False # Graba el fichero if not usingsamba(Ruta): bookmarkfile = open(fullfilename.decode("utf-8"), "w") bookmarkfile.write(filecontent) bookmarkfile.flush() bookmarkfile.close() else: samba.write_file(filename, filecontent, Ruta)
def GuardarFavorito(item, Ruta=BOOKMARK_PATH): logger.info("[favoritos.py] GuardarFavorito") # Lee todos los ficheros if usingsamba(Ruta): ficheros = samba.get_files(Ruta) else: ficheros = os.listdir(Ruta) ficheros.sort() # Averigua el último número if len(ficheros)>0: # XRJ: Linea problemática, sustituida por el bucle siguiente #filenumber = int( ficheros[len(ficheros)-1][0:-4] )+1 filenumber = 1 for fichero in ficheros: logger.info("[favoritos.py] fichero="+fichero) try: tmpfilenumber = int( fichero[0:8] )+1 if tmpfilenumber > filenumber: filenumber = tmpfilenumber except: pass else: filenumber=1 # Genera el nombre de fichero from core import scrapertools filename = '%08d-%s.txt' % (filenumber,scrapertools.slugify(item.title)) fullfilename = os.path.join(Ruta,filename) logger.info("[favoritos.py] savebookmark filename="+filename) # Genera el contenido item.channel = "favoritos" item.extra = fullfilename filecontent = item.serialize() item.folder = False # Graba el fichero if not usingsamba(Ruta): bookmarkfile = open(fullfilename.decode("utf-8"),"w") bookmarkfile.write(filecontent) bookmarkfile.flush(); bookmarkfile.close() else: samba.write_file(filename, filecontent, Ruta)
def GuardarDescarga(item, Ruta=DOWNLOAD_LIST_PATH): logger.info("[descargas.py] GuardarDescarga") if usingsamba(Ruta): ficheros = samba.get_files(Ruta) else: ficheros = os.listdir(Ruta) ficheros.sort() # Averigua el último número if len(ficheros)>0: # XRJ: Linea problemática, sustituida por el bucle siguiente #filenumber = int( ficheros[len(ficheros)-1][0:-4] )+1 filenumber = 1 for fichero in ficheros: logger.info("[favoritos.py] fichero="+fichero) try: tmpfilenumber = int( fichero[0:8] )+1 if tmpfilenumber > filenumber: filenumber = tmpfilenumber except: pass else: filenumber=1 # Genera el nombre de fichero from core import scrapertools filename = '%08d-%s.txt' % (filenumber,scrapertools.slugify(item.title)) fullfilename = os.path.join(Ruta,filename) logger.info("[descargas.py] GuardarDescarga filename="+fullfilename) # Genera el contenido if Ruta==DOWNLOAD_LIST_PATH: item.category="pendientes" if Ruta==ERROR_PATH: item.category="errores" item.channel="descargas" item.file=fullfilename item.folder=False filecontent = item.serialize() # Graba el fichero if not usingsamba(Ruta): bookmarkfile = open(fullfilename.decode("utf-8"),"w") bookmarkfile.write(filecontent) bookmarkfile.flush(); bookmarkfile.close() else: samba.write_file(filename, filecontent, Ruta)
def generos(item): logger.info("pelisalacarta.channels.oranline generos") itemlist = [] genres = { 'Deporte': '3/Sports%20Film.jpg', 'Película de la televisión': '3/Tv%20Movie.jpg', 'Estrenos de cine': '0/New%20Releases.png', 'Estrenos dvd y hd': '0/HDDVD%20Bluray.png' } # Descarga la página data = scrapertools.downloadpage(item.url) bloque = scrapertools.find_single_match( data, '<div class="sub_title">Géneros</div>(.*?)</ul>') # Extrae las entradas patron = '<li><a href="([^"]+)".*?<i>(.*?)</i>.*?<b>(.*?)</b>' matches = scrapertools.find_multiple_matches(bloque, patron) for scrapedurl, scrapedtitle, cuantas in matches: scrapedtitle = scrapedtitle.strip().capitalize() title = scrapedtitle + " (" + cuantas + ")" name_thumb = scrapertools.slugify(scrapedtitle) if scrapedtitle == "Foreign" or scrapedtitle == "Suspense" or scrapedtitle == "Thriller": thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/2/%s.jpg" \ % name_thumb.capitalize() elif scrapedtitle in genres: thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/%s" \ % genres[scrapedtitle] else: thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/1/%s.jpg" \ % name_thumb.replace("-", "%20") if DEBUG: logger.info("title=[{0}], url=[{1}], thumbnail=[{2}]".format( title, scrapedurl, thumbnail)) itemlist.append( item.clone(action="peliculas", title=title, url=scrapedurl, thumbnail=thumbnail, text_color=color2)) return itemlist
def search(item, texto): logger.info() try: if texto: texto = scrapertools.slugify(texto).replace('-', '+') item.url = '{}{}'.format(item.url, texto) return list_all(item) else: return except Exception: # Se captura la excepción, para no interrumpir al buscador global si un canal falla import traceback logger.error(traceback.format_exc()) return []
def list_all(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data # ~ logger.debug(data) matches = scrapertools.find_multiple_matches(data, '<article id="(.*?)</div>') for article in matches: url = scrapertools.find_single_match(article, ' href="([^"]+)"') title = scrapertools.find_single_match(article, ' title="([^"]+)"') title = scrapertools.slugify(title) title = title.lower().capitalize().replace('-', ' ') thumb = scrapertools.find_single_match(article, ' src="([^"]+)') itemlist.append( item.clone(action='findvideos', url=url, title=title, thumbnail=thumb, contentType='movie', contentTitle=title, contentExtra='documentary')) next_page_link = scrapertools.find_single_match( data, 'class="next page-numbers" href="([^"]+)">Siguiente') if next_page_link: itemlist.append( item.clone(title='>> Página siguiente', action='list_all', url=next_page_link)) return itemlist
def listas(item): logger.info("pelisalacarta.channels.tvvip listas") # Para añadir listas a la biblioteca en carpeta CINE itemlist = [] data, cloudflare = anti_cloudflare(item.url) data = jsontools.load_json(data) for child in data["sortedRepoChilds"]: url = "http://tv-vip.com/json/repo/%s/index.json" % child["id"] title = scrapertools.slugify(child['name']) title = title.replace('-',' ').capitalize() try: from platformcode import library library.savelibrary(titulo=title, url=url, server='', canal="tvvip", category="Cine") error = False except: error = True pass if not error: itemlist.append(Item(title = 'Lista añadida correctamente a la biblioteca')) else: itemlist.append(Item(title = 'ERROR. Han ocurrido uno o varios errores en el proceso')) return itemlist
def controller(plugin_name,port,host,path,headers): respuesta = '<?xml version=\'1.0\' encoding="UTF-8" ?>\n<rss version="2.0" xmlns:dc="http://purl.org/dc/elements/1.1/">\n' respuesta += "<channel>\n" respuesta += "<link>%s</link>\n\n" % path if path == "/rss" or path == "/rss/": import channelselector channelslist = channelselector.getmainlist() respuesta += "<title>Menú Principal</title>\n\n" # Parametrizar # Actualización automática de canales, actualiza la lista if config.get_setting("updatechannels")=="true": try: from core import updater actualizado = updater.updatechannel("channelselector") if actualizado: respuesta += "<title>¡Lista de canales actualizada!</title>\n" respuesta += "\n" respuesta += "<link>http://"+host+"/rss/</link>\n" respuesta += "\n" except: pass for channel in channelslist: respuesta += "<item>\n" respuesta += "<title>"+channel.title+"</title>\n" respuesta += "\n" if channel.channel=="trailertools": enlace = "http://"+host+"/rss/"+channel.channel+"/search/none/none/none/none/none/none/none/playlist.rss" respuesta += "<link>rss_command://search</link>" respuesta += "<search url=\""+enlace+"%s\" />" else: respuesta += "<link>http://"+host+"/rss/"+channel.channel+"/"+channel.action+"/none/none/none/none/none/none/none/playlist.rss</link>\n" respuesta += "</item>\n" respuesta += "\n" elif path.startswith("/rss/channelselector/channeltypes"): import channelselector channelslist = channelselector.getchanneltypes() respuesta += "<title>Tipo de contenido</title>\n\n" # Parametrizar for channel in channelslist: respuesta += "<item>\n" respuesta += "<title>"+channel.title+"</title>\n" respuesta += "<link>http://"+host+"/rss/"+channel.channel+"/"+channel.action+"/"+channel.category+"/none/none/none/none/none/playlist.rss</link>\n" respuesta += "\n" respuesta += "</item>\n" respuesta += "\n" elif path.startswith("/rss/channelselector/listchannels"): category = path.split("/")[4] logger.info("##category="+category) import channelselector channelslist = channelselector.filterchannels(category) respuesta += "<title>Canales</title>\n\n" # Parametrizar for channel in channelslist: if channel.type=="generic" or channel.type=="rss": # or channel.type=="wiimc": respuesta += "<item>\n" respuesta += "<title>"+channel.title.replace("_generico","").replace(" (Multiplataforma)","")+"</title>\n" respuesta += "<link>http://"+host+"/rss/"+channel.channel+"/mainlist/none/none/none/none/none/none/playlist.rss</link>\n" respuesta += "\n" respuesta += "</item>\n" respuesta += "\n" else: import rsstools itemlist,channel = rsstools.getitems(path) # Las listas vacías son problemáticas, añade un elemento dummy if len(itemlist)==0: itemlist.append( Item(title="(No hay elementos)", action=path.split("/")[3]) ) ## <--- import urllib respuesta += "<title>%s</title>\n" % channel.replace("_generico","").replace(" (Multiplataforma)","") for item in itemlist: respuesta += "<item>\n" if item.server=="": item.server="none" if item.url=="": item.url="none" if item.extra=="": item.extra="none" if item.title=="": item.title="none" if item.fulltitle=="": item.fulltitle="none" if item.category=="": item.category="none" if item.channel=="": item.channel=channel if item.action == "search": url = "http://%s/rss/%s/%s/%s/%s/%s/%s/%s/%s/playlist.rss" % ( host , channel , item.action , urllib.quote_plus(item.url) , item.server, urllib.quote_plus(item.title),urllib.quote_plus(item.extra),urllib.quote_plus(item.category),urllib.quote_plus(item.fulltitle)) respuesta += "<title>%s</title>\n" % entityunescape(item.title) if item.fulltitle not in ("","none"): respuesta += "<fulltitle>%s</fulltitle>\n" % item.fulltitle if item.thumbnail != "": respuesta += "\n" % item.thumbnail respuesta += "<link>rss_command://search</link>\n" respuesta += "<search url=\""+url+"%s\" />\n" respuesta += "\n" elif item.action=="EXIT": respuesta += "<title>%s</title>\n" % entityunescape(item.title) if item.thumbnail != "": respuesta += "\n" % item.thumbnail url = "http://%s/rss/" % host respuesta += "<link>%s</link>\n" % url respuesta += "\n" elif item.folder or item.action=="play" or item.action=="downloadall": logger.info(" Nivel intermedio") item.fulltitle = DepuraTitulo(item.fulltitle, "false", "false") from core.scrapertools import slugify play_name = "%s_%s.dat" % ( item.channel , urllib.quote(item.fulltitle) ) play_name = slugify(play_name) if item.plot not in ("none",""): item.plot = item.plot.replace("\n"," ") salva_descripcion(play_name, item.fulltitle, item.plot, item.thumbnail) else: fulltitle,plot,thumbnail = recupera_descripcion(play_name) if fulltitle != "" and item.fulltitle in ("","none"): item.fulltitle = fulltitle if plot != "" and item.plot == "": item.plot = plot if thumbnail != "" and item.thumbnail == "": item.thumbnail = thumbnail if item.title=="none": item.title="Ver el video" url = "http://%s/rss/%s/%s/%s/%s/%s/%s/%s/%s/playlist.rss" % ( host , item.channel , item.action , urllib.quote_plus(item.url) , item.server , urllib.quote(item.title),urllib.quote_plus(item.extra),urllib.quote_plus(item.category),urllib.quote_plus(item.fulltitle) ) respuesta += "<title><![CDATA[%s]]></title>\n" % unicode(item.title,"iso-8859-1",errors="ignore").encode("utf-8") if item.fulltitle not in ("","none"): respuesta += "<fulltitle><![CDATA[%s]]></fulltitle>\n" % unicode(item.title,"iso-8859-1",errors="ignore").encode("utf-8") if item.plot != "": respuesta += "<description><![CDATA[ %s ]]></description>\n" % unicode(item.plot,"iso-8859-1",errors="ignore").encode("utf-8") if item.thumbnail != "": respuesta += "\n" % item.thumbnail respuesta += "<link>%s</link>\n" % url respuesta += "\n" else: logger.info(" Video") from core.scrapertools import slugify play_name = "%s_%s.dat" % ( item.channel , urllib.quote(item.fulltitle) ) play_name = slugify(play_name) fulltitle,plot,thumbnail = recupera_descripcion(play_name) if fulltitle != "" and item.fulltitle in ("","none"): item.fulltitle = fulltitle if plot != "" and item.plot == "": item.plot = plot if thumbnail != "" and item.thumbnail == "": item.thumbnail = thumbnail #respuesta += "<title><![CDATA[%s]]></title>\n" % entityunescape(item.title) respuesta += "<title><![CDATA[%s]]></title>\n" % unicode(fulltitle,"iso-8859-1",errors="ignore").encode("utf-8") respuesta += "<fulltitle><![CDATA[%s]]></fulltitle>\n" % unicode(item.title,"iso-8859-1",errors="ignore").encode("utf-8") respuesta += "<description><![CDATA[%s]]></description>\n" % unicode(plot,"iso-8859-1",errors="ignore").encode("utf-8") respuesta += "<enclosure url=\"%s\" type=\"video/x-flv\" />\n" % item.url respuesta += "\n" % thumbnail respuesta += "</item>\n\n" respuesta += "</channel>\n" respuesta += "</rss>\n" print "--------------------------------------------" print respuesta print "--------------------------------------------" return respuesta
def temporadas(item): logger.info("tvalacarta.channels.dplay temporadas") itemlist = [] data = scrapertools.cache_page(item.url) patron = '<div class="episode_carousel"[^<]+' patron += '<h2 class="carousel-title">([^<]+)</h2>' matches = scrapertools.find_multiple_matches(data, patron) for scrapedtitle in matches: itemlist.append( Item(channel=CHANNELNAME, title=scrapedtitle, action="episodios", url=urlparse.urljoin(item.url,"episodios/"+scrapertools.slugify(scrapedtitle)), show=item.show, folder=True) ) return itemlist
def controller(plugin_name, port, host, path, headers): netplayer_ua = "Mozilla/5.0 (Windows; U; en-US; rv:1.8.1.11; Gecko/20071129; Firefox/2.5.0) Maple 6.0.00067 Navi" respuesta = '<?xml version=\'1.0\' encoding="UTF-8" ?>\n<rss version="2.0" xmlns:dc="http://purl.org/dc/elements/1.1/">\n' respuesta += "<channel>\n" respuesta += "<link>%s</link>\n\n" % path if path == "/rss" or path == "/rss/": import channelselector channelslist = channelselector.getmainlist() respuesta += "<title>Menú Principal</title>\n\n" # Parametrizar # Actualización automática de canales, actualiza la lista if config.get_setting("updatechannels") == "true": try: from core import updater actualizado = updater.updatechannel("channelselector") if actualizado: respuesta += "<title>¡Lista de canales actualizada!</title>\n" respuesta += "\n" if headers.get("User-Agent") == netplayer_ua: respuesta += "<enclosure url=\"http://" + host + "/rss/\" type=\"text/xml\" />\n" else: respuesta += "<link>http://" + host + "/rss/</link>\n" respuesta += "\n" except: pass for channel in channelslist: respuesta += "<item>\n" respuesta += "<title>" + channel.title + "</title>\n" if headers.get("User-Agent") == netplayer_ua: respuesta += "<description><![CDATA[<img src=\"http://" + plugin_name + ".mimediacenter.info/posters/" + channel.channel + ".png\" />]]></description>\n" else: respuesta += "\n" if channel.channel == "trailertools": enlace = "http://" + host + "/rss/" + channel.channel + "/search/none/none/none/none/none/none/none/playlist.rss" if headers.get("User-Agent") == netplayer_ua: respuesta += "<textinput>\n" #respuesta += "<title>%s</title>\n" % entityunescape(channel.title) respuesta += "<link>" + enlace + "</link>\n" respuesta += "</textinput>\n" else: respuesta += "<link>rss_command://search</link>" respuesta += "<search url=\"" + enlace + "%s\" />" else: if headers.get("User-Agent") == netplayer_ua: respuesta += "<enclosure url=\"http://" + host + "/rss/" + channel.channel + "/" + channel.action + "/none/none/none/none/none/none/none/playlist.rss\" type=\"text/xml\" />\n" else: respuesta += "<link>http://" + host + "/rss/" + channel.channel + "/" + channel.action + "/none/none/none/none/none/none/none/playlist.rss</link>\n" respuesta += "</item>\n" respuesta += "\n" elif path.startswith("/rss/channelselector/channeltypes"): import channelselector channelslist = channelselector.getchanneltypes() respuesta += "<title>Tipo de contenido</title>\n\n" # Parametrizar for channel in channelslist: respuesta += "<item>\n" respuesta += "<title>" + channel.title + "</title>\n" if headers.get("User-Agent") == netplayer_ua: respuesta += "<description><![CDATA[<img src=\"http://" + plugin_name + ".mimediacenter.info/wiimc/" + channel.thumbnail + ".png\" />]]></description>\n" respuesta += "<enclosure url=\"http://" + host + "/rss/" + channel.channel + "/" + channel.action + "/" + channel.category + "/none/none/none/none/none/playlist.rss\" type=\"text/xml\" />\n" else: respuesta += "<link>http://" + host + "/rss/" + channel.channel + "/" + channel.action + "/" + channel.category + "/none/none/none/none/none/playlist.rss</link>\n" respuesta += "\n" respuesta += "</item>\n" respuesta += "\n" elif path.startswith("/rss/channelselector/listchannels"): category = path.split("/")[4] logger.info("##category=" + category) import channelselector channelslist = channelselector.filterchannels(category) respuesta += "<title>Canales</title>\n\n" # Parametrizar for channel in channelslist: if channel.type == "generic" or channel.type == "rss": # or channel.type=="wiimc": respuesta += "<item>\n" respuesta += "<title>" + channel.title.replace( "_generico", "").replace(" (Multiplataforma)", "") + "</title>\n" if headers.get("User-Agent") == netplayer_ua: respuesta += "<description><![CDATA[<img src=\"http://" + plugin_name + ".mimediacenter.info/posters/" + channel.channel + ".png\" />]]></description>\n" respuesta += "<enclosure url=\"http://" + host + "/rss/" + channel.channel + "/mainlist/none/none/none/none/none/none/playlist.rss\" type=\"text/xml\" />\n" else: respuesta += "<link>http://" + host + "/rss/" + channel.channel + "/mainlist/none/none/none/none/none/none/playlist.rss</link>\n" respuesta += "\n" respuesta += "</item>\n" respuesta += "\n" else: import rsstools itemlist, channel = rsstools.getitems(path, headers.get("User-Agent")) # Las listas vacías son problemáticas, añade un elemento dummy if len(itemlist) == 0: itemlist.append( Item(title="(No hay elementos)", action=path.split("/")[3])) ## <--- import urllib respuesta += "<title>%s</title>\n" % channel.replace( "_generico", "").replace(" (Multiplataforma)", "") for item in itemlist: respuesta += "<item>\n" if item.server == "": item.server = "none" if item.url == "": item.url = "none" if item.extra == "": item.extra = "none" if item.title == "": item.title = "none" if item.fulltitle == "": item.fulltitle = "none" if item.category == "": item.category = "none" if item.channel == "": item.channel = channel if item.action == "search": url = "http://%s/rss/%s/%s/%s/%s/%s/%s/%s/%s/playlist.rss" % ( host, channel, item.action, urllib.quote_plus( item.url), item.server, urllib.quote_plus( item.title), urllib.quote_plus( item.extra), urllib.quote_plus(item.category), urllib.quote_plus(item.fulltitle)) if headers.get("User-Agent") == netplayer_ua: respuesta += "<textinput>\n" respuesta += "<title>%s</title>\n" % entityunescape( item.title) if item.fulltitle not in ("", "none"): respuesta += "<description>%s</description>\n" % item.fulltitle #if item.thumbnail != "": respuesta += "\n" % item.thumbnail respuesta += "<link>" + url + "</link>\n" respuesta += "</textinput>\n" else: respuesta += "<title>%s</title>\n" % entityunescape( item.title) if item.fulltitle not in ("", "none"): respuesta += "<fulltitle>%s</fulltitle>\n" % item.fulltitle if item.thumbnail != "": respuesta += "\n" % item.thumbnail respuesta += "<link>rss_command://search</link>\n" respuesta += "<search url=\"" + url + "%s\" />\n" respuesta += "\n" elif item.action == "EXIT": respuesta += "<title>%s</title>\n" % entityunescape(item.title) url = "http://%s/rss/" % host if headers.get("User-Agent") == netplayer_ua: if item.thumbnail != "": respuesta += "<description><![CDATA[<img src=\"%s\" />]]></description>\n" % item.thumbnail respuesta += "<enclosure url=\"%s\" type=\"text/xml\" />\n" % url else: if item.thumbnail != "": respuesta += "\n" % item.thumbnail respuesta += "<link>%s</link>\n" % url respuesta += "\n" elif item.folder or item.action == "play" or item.action == "downloadall": logger.info(" Nivel intermedio") item.fulltitle = DepuraTitulo(item.fulltitle, "false", "false") from core.scrapertools import slugify play_name = "%s_%s.dat" % (item.channel, urllib.quote(item.fulltitle)) play_name = slugify(play_name) if item.plot not in ("none", ""): item.plot = item.plot.replace("\n", " ") salva_descripcion(play_name, item.fulltitle, item.plot, item.thumbnail) else: fulltitle, plot, thumbnail = recupera_descripcion( play_name) if fulltitle != "" and item.fulltitle in ("", "none"): item.fulltitle = fulltitle if plot != "" and item.plot == "": item.plot = plot if thumbnail != "" and item.thumbnail == "": item.thumbnail = thumbnail if item.title == "none": item.title = "Ver el video" url = "http://%s/rss/%s/%s/%s/%s/%s/%s/%s/%s/playlist.rss" % ( host, item.channel, item.action, urllib.quote_plus( item.url), item.server, urllib.quote( item.title), urllib.quote_plus( item.extra), urllib.quote_plus(item.category), urllib.quote_plus(item.fulltitle)) respuesta += "<title><![CDATA[%s]]></title>\n" % unicode( item.title, "iso-8859-1", errors="ignore").encode("utf-8") if item.fulltitle not in ("", "none"): respuesta += "<fulltitle><![CDATA[%s]]></fulltitle>\n" % unicode( item.title, "iso-8859-1", errors="ignore").encode("utf-8") if headers.get("User-Agent") == netplayer_ua: if item.plot != "" or item.thumbnail != "": respuesta += "<description><![CDATA[ " if item.thumbnail != "": respuesta += "<img src=\"%s\" />" % item.thumbnail if item.plot != "" or item.thumbnail != "": respuesta += "%s ]]></description>\n" % unicode( item.plot, "iso-8859-1", errors="ignore").encode("utf-8") respuesta += "<enclosure url=\"%s\" type=\"text/xml\"/>\n" % url else: if item.plot != "": respuesta += "<description><![CDATA[ %s ]]></description>\n" % unicode( item.plot, "iso-8859-1", errors="ignore").encode("utf-8") if item.thumbnail != "": respuesta += "\n" % item.thumbnail respuesta += "<link>%s</link>\n" % url respuesta += "\n" else: logger.info(" Video") from core.scrapertools import slugify play_name = "%s_%s.dat" % (item.channel, urllib.quote(item.fulltitle)) play_name = slugify(play_name) fulltitle, plot, thumbnail = recupera_descripcion(play_name) if fulltitle != "" and item.fulltitle in ("", "none"): item.fulltitle = fulltitle if plot != "" and item.plot == "": item.plot = plot if thumbnail != "" and item.thumbnail == "": item.thumbnail = thumbnail if fulltitle == "": respuesta += "<title><![CDATA[%s]]></title>\n" % entityunescape( item.title) else: respuesta += "<title><![CDATA[%s]]></title>\n" % unicode( fulltitle, "iso-8859-1", errors="ignore").encode("utf-8") respuesta += "<fulltitle><![CDATA[%s]]></fulltitle>\n" % unicode( item.title, "iso-8859-1", errors="ignore").encode("utf-8") if headers.get("User-Agent") == netplayer_ua: if plot != "" or thumbnail != "": respuesta += "<description><![CDATA[" if thumbnail != "": respuesta += "<img src=\"%s\" />" % thumbnail if plot != "" or thumbnail != "": respuesta += "%s]]></description>\n" % unicode( plot, "iso-8859-1", errors="ignore").encode("utf-8") cad = item.url if cad.find(".flv") != -1: respuesta += "<enclosure url=\"%s\" type=\"video/x-flv\" />\n" % item.url else: respuesta += "<enclosure url=\"%s\" type=\"video/mpeg\" />\n" % item.url else: respuesta += "<description><![CDATA[%s]]></description>\n" % unicode( plot, "iso-8859-1", errors="ignore").encode("utf-8") respuesta += "<enclosure url=\"%s\" type=\"video/x-flv\" />\n" % item.url respuesta += "\n" % thumbnail respuesta += "</item>\n\n" respuesta += "</channel>\n" respuesta += "</rss>\n" print "--------------------------------------------" print respuesta print "--------------------------------------------" return respuesta