def findvideos(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) if item.fulltitle == "Cosmos (Carl Sagan)": matches = scrapertools.find_multiple_matches(data, '<p><strong>(.*?)</strong><br /><iframe.+?src="(https://www\.youtube\.com/[^?]+)') for title, url in matches: new_item = item.clone(title=title, url=url) from core import servertools aux_itemlist = servertools.find_video_items(new_item) for videoitem in aux_itemlist: videoitem.title = new_item.title videoitem.fulltitle = new_item.title videoitem.channel = item.channel # videoitem.thumbnail = item.thumbnail itemlist.extend(aux_itemlist) else: data = scrapertools.find_multiple_matches(data, '<iframe.+?src="(https://www\.youtube\.com/[^?]+)') from core import servertools itemlist.extend(servertools.find_video_items(data=",".join(data))) for videoitem in itemlist: videoitem.fulltitle = item.fulltitle videoitem.channel = item.channel # videoitem.thumbnail = item.thumbnail return itemlist
def search(item,texto): logger.info("[tengourl.py] search texto="+texto) if not texto.startswith("http://"): texto = "http://"+texto itemlist = [] if "servidor" in item.title: itemlist = servertools.find_video_items(data=texto) for item in itemlist: item.channel="tengourl" item.action="play" elif "directo" in item.title: itemlist.append( Item(channel=item.channel, action="play", url=texto, server="directo", title="Ver enlace directo")) else: data = scrapertools.downloadpage(texto) itemlist = servertools.find_video_items(data=data) for item in itemlist: item.channel="tengourl" item.action="play" if len(itemlist)==0: itemlist.append( Item(channel=item.channel, action="search", title="No hay ningún vídeo compatible en esa URL")) return itemlist
def findvideos(item): logger.info("[GuardaSerieOnline.py]==> findvideos") try: if item.url: data = scrapertools.anti_cloudflare(item.url, headers=headers) data = scrapertools.find_single_match(data, item.extra) itemlist = servertools.find_video_items(data=data) else: itemlist = servertools.find_video_items(data=item.extra) # Non sono riuscito a trovare un modo migliore di questo, se qualcuno ha un metodo migliore di questo # per estrarre il video lo sistemi per favore. if len(itemlist) > 1: itemlist.remove(itemlist[1]) server = re.sub(r'[-\[\]\s]+', '', itemlist[0].title) itemlist[0].title = "".join(["[%s] " % color(server, 'orange'), item.title]) itemlist[0].fulltitle = item.fulltitle itemlist[0].show = item.show itemlist[0].thumbnail = item.thumbnail itemlist[0].channel = __channel__ except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] return itemlist
def findvideos(item): show = item.title.replace("Añadir esta serie a la biblioteca de XBMC","") logger.info("[megaforo.py] findvideos show "+ show) itemlist=[] data = scrapertools.cache_page(item.url) if 'mega-foro' in data: patronimage = '<div class="inner" id="msg_\d{1,9}".*?<img src="([^"]+)".*?mega.co.nz/\#\![A-Za-z0-9\-\_]+\![A-Za-z0-9\-\_]+' matches = re.compile(patronimage,re.DOTALL).findall(data) if len(matches)>0: thumbnail = matches[0] thumbnail = scrapertools.htmlclean(thumbnail) thumbnail = unicode( thumbnail, "iso-8859-1" , errors="replace" ).encode("utf-8") item.thumbnail = thumbnail patronplot = '<div class="inner" id="msg_\d{1,9}".*?<img src="[^"]+"[^/]+/>(.*?)lgf_facebook_share' matches = re.compile(patronplot,re.DOTALL).findall(data) if len(matches)>0: plot = matches[0] title = item.title plot = re.sub(' ', '', plot) plot = re.sub('\s\s', '', plot) plot = scrapertools.htmlclean(plot) item.plot = "" from core import servertools itemlist.extend(servertools.find_video_items(data=data)) for videoitem in itemlist: videoitem.channel=item.channel videoitem.action="play" videoitem.folder=False videoitem.thumbnail=item.thumbnail videoitem.plot = item.plot videoitem.title = "["+videoitem.server+videoitem.title + " " + item.title videoitem.show = show if config.get_library_support(): itemlist.append( Item(channel=item.channel, title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="findvideos") ) return itemlist else: item.thumbnail = "" item.plot = "" from core import servertools itemlist.extend(servertools.find_video_items(data=data)) for videoitem in itemlist: videoitem.channel=item.channel videoitem.action="play" videoitem.folder=False videoitem.thumbnail=item.thumbnail videoitem.plot = item.plot videoitem.title = "["+videoitem.server+videoitem.title + " " + item.title return itemlist
def findvideos(item): logger.info() itemlist =[] audio = {'Latino':'[COLOR limegreen]LATINO[/COLOR]','Español':'[COLOR yellow]ESPAÑOL[/COLOR]','Ingles':'[COLOR red]ORIGINAL SUBTITULADO[/COLOR]', 'Latino-Ingles':'DUAL'} data = httptools.downloadpage(item.url).data if item.extra != 'series': patron ='data-video="([^"]+)" class="reproductorVideo"><ul><li>([^<]+)<\/li><li>([^<]+)<\/li>' tipotitle = item.contentTitle elif item.extra == 'series': tipotitle = str(item.contentSeasonNumber)+'x'+str(item.contentEpisodeNumber)+' '+item.contentSerieName patron = '<li class="enlaces-l"><a href="([^"]+)" target="_blank"><ul><li>([^<]+)<.*?>([^<]+)<.*?>Reproducir<' matches = re.compile(patron,re.DOTALL).findall(data) if item.extra != 'documental': n=0 for scrapedurl, scrapedcalidad, scrapedaudio in matches: if 'series' in item.extra: datab = httptools.downloadpage(host+scrapedurl).data url = scrapertools.find_single_match(datab,'class="reproductor"><iframe src="([^"]+)"') print url+'esta es la direccion' else: url = scrapedurl title = tipotitle idioma = audio[scrapedaudio] itemlist.extend(servertools.find_video_items(data=url)) if n < len(itemlist): itemlist[n].title = tipotitle+ ' ('+idioma+' ) '+'('+itemlist[n].server+' )' n = n+1 else: url = scrapertools.find_single_match(data,'class="reproductor"><iframe src="([^"]+)"') itemlist.extend(servertools.find_video_items(data=url)) for videoitem in itemlist: if item.extra == 'documental': videoitem.title = item.title+' ('+videoitem.server+')' videoitem.channel=item.channel videoitem.action="play" videoitem.folder=False if config.get_library_support() and len(itemlist) > 0 and item.extra !='series': itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la biblioteca[/COLOR]', url=item.url, action="add_pelicula_to_library", extra="findvideos", contentTitle = item.contentTitle)) return itemlist
def findvideos(item): logger.info("pelisalacarta.channels.mocosoftx findvideos") itemlist=[] # Busca el thumbnail y el argumento data = scrapertools.cache_page(item.url) logger.info("pelisalacarta.channels.mocosoftx data="+data) try: thumbnail = scrapertools.get_match(data,'<div class="post">.*?<img src="([^"]+)"') except: thumbnail = "" plot = "" # Ahora busca los vídeos itemlist = servertools.find_video_items(data=data) for videoitem in itemlist: videoitem.channel = item.channel videoitem.plot = plot videoitem.thumbnail = thumbnail videoitem.fulltitle = item.title parsed_url = urlparse.urlparse(videoitem.url) fichero = parsed_url.path partes = fichero.split("/") titulo = partes[ len(partes)-1 ] videoitem.title = titulo + " - [" + videoitem.server+"]" if not itemlist: patron = '<a href="([^"]+)" class="bbc_link" target="_blank"><span style="color: orange;" class="bbc_color">' matches = re.compile(patron, re.DOTALL).findall(data) if matches: data = scrapertools.cache_page(matches[0]) logger.info(data) itemlist = servertools.find_video_items(data=data) for videoitem in itemlist: videoitem.channel = item.channel videoitem.plot = plot videoitem.thumbnail = thumbnail videoitem.fulltitle = item.title parsed_url = urlparse.urlparse(videoitem.url) fichero = parsed_url.path partes = fichero.split("/") titulo = partes[ len(partes)-1 ] videoitem.title = titulo + " - [" + videoitem.server+"]" return itemlist
def findvideos(item): logger.info("streamondemand.animeforce findvideos") itemlist = [] if item.extra: data = scrapertools.cache_page(item.url, headers=headers) blocco = scrapertools.get_match(data, r'%s(.*?)</tr>' % item.extra) scrapedurl = scrapertools.find_single_match(blocco, r'<a href="([^"]+)"[^>]+>') url = scrapedurl else: url = item.url if 'adf.ly' in url: url = adfly.get_long_url(url) elif 'bit.ly' in url: url = scrapertools.getLocationHeaderFromResponse(url) if 'animeforce' in url: headers.append(['Referer', item.url]) data = scrapertools.cache_page(url, headers=headers) itemlist.extend(servertools.find_video_items(data=data)) for videoitem in itemlist: videoitem.title = item.title + videoitem.title videoitem.fulltitle = item.fulltitle videoitem.show = item.show videoitem.thumbnail = item.thumbnail videoitem.channel = __channel__ url = url.split('&')[0] data = scrapertools.cache_page(url, headers=headers) patron = """<source\s*src=(?:"|')([^"']+?)(?:"|')\s*type=(?:"|')video/mp4(?:"|')>""" matches = re.compile(patron, re.DOTALL).findall(data) headers.append(['Referer', url]) for video in matches: itemlist.append(Item(channel=__channel__, action="play", title=item.title, url=video + '|' + urllib.urlencode(dict(headers)), folder=False)) else: itemlist.extend(servertools.find_video_items(data=url)) for videoitem in itemlist: videoitem.title = item.title + videoitem.title videoitem.fulltitle = item.fulltitle videoitem.show = item.show videoitem.thumbnail = item.thumbnail videoitem.channel = __channel__ return itemlist
def findvideos(item): logger.info("streamondemand.casacinema findvideos") itemlist = [] data = item.url if item.extra == 'serie' else scrapertools.cache_page(item.url, headers=headers) html = scrapertools.cache_page(data) patron = '"http:\/\/shrink-service\.it\/[^\/]+\/[^\/]+\/([^"]+)"' matches = re.compile(patron, re.DOTALL).findall(html) for url in matches: if url is not None: data = data else: continue itemlist = servertools.find_video_items(data=data) for videoitem in itemlist: videoitem.title = item.title + videoitem.title videoitem.fulltitle = item.fulltitle videoitem.thumbnail = item.thumbnail videoitem.show = item.show videoitem.plot = item.plot videoitem.channel = __channel__ return itemlist
def findvideos(item): logger.info("pelisalacarta.channels.mocosoftx findvideos") itemlist=[] # Busca el thumbnail y el argumento data = scrapertools.cache_page(item.url) logger.info("pelisalacarta.channels.mocosoftx data="+data) try: thumbnail = scrapertools.get_match(data,'<div class="post">.*?<img src="([^"]+)"') except: thumbnail = "" plot = "" # Ahora busca los vídeos itemlist = servertools.find_video_items(data=data) for videoitem in itemlist: videoitem.channel = item.channel videoitem.plot = plot videoitem.thumbnail = thumbnail videoitem.fulltitle = item.title parsed_url = urlparse.urlparse(videoitem.url) fichero = parsed_url.path partes = fichero.split("/") titulo = partes[ len(partes)-1 ] videoitem.title = titulo + " - [" + videoitem.server+"]" return itemlist
def download_from_best_server(item, ask=False): logger.info( "contentAction: %s | contentChannel: %s | url: %s" % (item.contentAction, item.contentChannel, item.url)) result = {"downloadStatus": STATUS_CODES.error} progreso = platformtools.dialog_progress("Download", "Recupero l'elenco dei server disponibili...") channel = __import__('channels.%s' % item.contentChannel, None, None, ["channels.%s" % item.contentChannel]) progreso.update(50, "Recupero l'elenco dei server disponibili.", "Connessione a %s..." % item.contentChannel) if hasattr(channel, item.contentAction): play_items = getattr(channel, item.contentAction)( item.clone(action=item.contentAction, channel=item.contentChannel)) else: play_items = servertools.find_video_items(item.clone(action=item.contentAction, channel=item.contentChannel)) play_items = filter(lambda x: x.action == "play", play_items) progreso.update(100, "Recupero l'elenco dei server disponibili.", "Server disponibili: %s" % len(play_items), "Identifico i server...") for i in play_items: if not i.server: i.server = servertools.get_server_from_url(i.url) if progreso.iscanceled(): return {"downloadStatus": STATUS_CODES.canceled} play_items.sort(key=sort_method) if progreso.iscanceled(): return {"downloadStatus": STATUS_CODES.canceled} progreso.close() if not ask: # Recorremos el listado de servers, hasta encontrar uno que funcione for play_item in play_items: play_item = item.clone(**play_item.__dict__) play_item.contentAction = play_item.action play_item.infoLabels = item.infoLabels result = download_from_server(play_item) if progreso.iscanceled(): result["downloadStatus"] = STATUS_CODES.canceled # Tanto si se cancela la descarga como si se completa dejamos de probar mas opciones if result["downloadStatus"] in [STATUS_CODES.canceled, STATUS_CODES.completed]: break else: seleccion = platformtools.dialog_select("Selezionare il server", [s.title for s in play_items]) if seleccion > -1: play_item = item.clone(**play_items[seleccion].__dict__) play_item.contentAction = play_item.action play_item.infoLabels = item.infoLabels result = download_from_server(play_item) else: result["downloadStatus"] = STATUS_CODES.canceled return result
def findvideos(item): logger.info("pelisalacarta.altorrent findvideos") itemlist = [] th = Thread(target=get_art(item)) th.setDaemon(True) th.start() data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ","",data) enlaces = scrapertools.find_multiple_matches(data,'id="modal-quality-\w+"><span>(.*?)</span>.*?class="quality-size">(.*?)</p>.*?href="([^"]+)"') for calidad,size,url in enlaces: title ="[COLOR palegreen][B]Torrent[/B][/COLOR]"+" "+"[COLOR chartreuse]"+calidad+"[/COLOR]"+"[COLOR teal] ( [/COLOR]"+"[COLOR forestgreen]"+size+"[/COLOR]"+"[COLOR teal] )[/COLOR]" itemlist.append( Item(channel=item.channel, title = title, url=url, action="play",server="torrent", fanart=item.fanart,thumbnail= item.thumbnail,extra=item.extra,InfoLabels=item.infoLabels, folder=False) ) dd=scrapertools.find_single_match(data,'button-green-download-big".*?href="([^"]+)"><span class="icon-play">') if dd: if item.library: itemlist.append( Item(channel=item.channel, title = "[COLOR floralwhite][B]Online[/B][/COLOR]" , url=dd, action="dd_y_o", thumbnail="http://imgur.com/mRmBIV4.png", fanart=item.extra.split("|")[0],contentType=item.contentType, extra=item.extra, folder=True) ) else: videolist = servertools.find_video_items(data=str(dd)) for video in videolist: icon_server = os.path.join( config.get_runtime_path() , "resources" , "images" , "servers" , "server_"+video.server+".png" ) if not os.path.exists(icon_server): icon_server = "" itemlist.append(Item(channel=item.channel ,url=video.url, server=video.server,title="[COLOR floralwhite][B]"+video.server+"[/B][/COLOR]",thumbnail=icon_server,fanart=item.extra.split("|")[1], action="play", folder=False) ) if item.library and config.get_library_support() and itemlist : infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'], 'title': item.infoLabels['title']} itemlist.append(Item(channel=item.channel, title="Añadir esta película a la biblioteca", action="add_pelicula_to_library", url=item.url, infoLabels=infoLabels, text_color="0xFFe5ffcc", thumbnail='http://imgur.com/DNCBjUB.png',extra="library")) return itemlist
def findvideos(item): logger.info("pelisalacarta.channels.vertelenovelas findvideos") data = scrapertools.cache_page(item.url) itemlist=[] #<embed type="application/x-shockwave-flash" src="http://vertelenovelas.net/player.swf" width="680" height="430" id="mpl" name="mpl" quality="high" allowscriptaccess="always" allowfullscreen="true" wmode="transparent" flashvars="&file=http://content1.catalog.video.msn.com/e2/ds/4eeea8b3-6228-492b-a2be-e8b920cf4d4e.flv&backcolor=fd4bc5&frontcolor=fc9dde&lightcolor=ffffff&controlbar=over&volume=100&autostart=false&image="> #<embed type="application/x-shockwave-flash" src="http://vertelenovelas.net/player.swf" width="680" height="430" id="mpl" name="mpl" quality="high" allowscriptaccess="always" allowfullscreen="true" wmode="transparent" flashvars="&file=http://content1.catalog.video.msn.com/e2/ds/4eeea8b3-6228-492b-a2be-e8b920cf4d4e.flv&backcolor=fd4bc5&frontcolor=fc9dde&lightcolor=ffffff&controlbar=over&volume=100&autostart=false&image="></embed></d patron = '<embed type="application/x-shockwave-flash" src="http://vertelenovelas.net/player.swf".*?file=([^\&]+)&' matches = re.compile(patron,re.DOTALL).findall(data) for match in matches: itemlist.append( Item(channel=item.channel, action="play", server="directo", title=item.title , url=match , thumbnail=item.thumbnail , plot=item.plot , folder=False) ) #<embed width="680" height="450" flashvars="file=mp4:p/459791/sp/45979100/serveFlavor/flavorId/0_0pacv7kr/forceproxy/true&image=&skin=&abouttext=&dock=false&streamer=rtmp://rtmpakmi.kaltura.com/ondemand/& patron = '<embed width="[^"]+" height="[^"]+" flashvars="file=([^\&]+)&.*?streamer=(rtmp[^\&]+)&' matches = re.compile(patron,re.DOTALL).findall(data) for final,principio in matches: itemlist.append( Item(channel=item.channel, action="play", server="directo", title=item.title , url=principio+final , thumbnail=item.thumbnail , plot=item.plot , folder=False) ) #file=mp4:/c/g1MjYyYjpCnH8dRolOZ2G7u1KsleMuDS/DOcJ-FxaFrRg4gtDIwOjkzOjBrO8N_l0&streamer=rtmp://cp96275.edgefcs.net/ondemand& patron = 'file=([^\&]+)&streamer=(rtmp[^\&]+)&' matches = re.compile(patron,re.DOTALL).findall(data) for final,principio in matches: itemlist.append( Item(channel=item.channel, action="play", server="directo", title=item.title , url=principio+"/"+final , thumbnail=item.thumbnail , plot=item.plot , folder=False) ) from core import servertools itemlist.extend(servertools.find_video_items(data=data)) for videoitem in itemlist: videoitem.channel=item.channel videoitem.action="play" videoitem.folder=False videoitem.title = "["+videoitem.server+"]" return itemlist
def findvideos(item): logger.info("fusionse.tantifilm findvideos") # Descarga la página data = item.extra if item.extra != '' else scrapertools.cache_page(item.url, headers=headers) itemlist = servertools.find_video_items(data=data) for videoitem in itemlist: videoitem.title = item.title + videoitem.title videoitem.fulltitle = item.fulltitle videoitem.thumbnail = item.thumbnail videoitem.show = item.show videoitem.plot = item.plot videoitem.channel = __channel__ # Extrae las entradas patron = r'\{"file":"([^"]+)","type":"[^"]+","label":"([^"]+)"\}' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle in matches: title = item.title + " " + scrapedtitle + " quality" itemlist.append( Item(channel=__channel__, action="play", title=title, url=scrapedurl.replace(r'\/', '/').replace('%3B', ';'), thumbnail=item.thumbnail, fulltitle=item.title, show=item.title, server='', folder=False)) return itemlist
def findvideos(item): logger.info("[peliculasonlineflv.py] findvideos") itemlist=[] # Descarga la p?gina data = scrapertools.cachePage(item.url) from core import servertools itemlist.extend(servertools.find_video_items(data=data)) for videoitem in itemlist: videoitem.channel=item.channel videoitem.action="play" videoitem.folder=False videoitem.title = "Ver en "+videoitem.server videoitem.fulltitle = item.fulltitle # Ahora busca patrones manuales try: vk_code = scrapertools.get_match(data,"vklat\=([a-zA-Z0-9]+)") vk_url = scrapertools.get_header_from_response("http://goo.gl/"+vk_code,header_to_get="location") itemlist.append( Item( channel=item.channel , action="play" , title="Ver en VK (Latino)" , server="vk" , url=vk_url , folder=False ) ) except: logger.info("No encontrado enlace VK") try: putlocker_code = scrapertools.get_match(data,"plat\=([A-Z0-9]+)") putlocker_url = "http://www.putlocker.com/embed/"+putlocker_code itemlist.append( Item( channel=item.channel , action="play" , title="Ver en Putlocker (Latino)" , server="putlocker" , url=putlocker_url , folder=False ) ) except: logger.info("No encontrado enlace PUTLOCKER") try: vk_code = scrapertools.get_match(data,"vksub\=([a-zA-Z0-9]+)") vk_url = scrapertools.get_header_from_response("http://goo.gl/"+vk_code,header_to_get="location") itemlist.append( Item( channel=item.channel , action="play" , title="Ver en VK (Subtitulado)" , server="vk" , url=vk_url , folder=False ) ) except: logger.info("No encontrado enlace VK") try: putlocker_code = scrapertools.get_match(data,"plsub\=([A-Z0-9]+)") putlocker_url = "http://www.putlocker.com/embed/"+putlocker_code itemlist.append( Item( channel=item.channel , action="play" , title="Ver en Putlocker (Subtitulado)" , server="putlocker" , url=putlocker_url , folder=False ) ) except: logger.info("No encontrado enlace PUTLOCKER") try: vk_code = scrapertools.get_match(data,"vk\=([a-zA-Z0-9]+)") vk_url = scrapertools.get_header_from_response("http://goo.gl/"+vk_code,header_to_get="location") itemlist.append( Item( channel=item.channel , action="play" , title="Ver en VK" , server="vk" , url=vk_url , folder=False ) ) except: logger.info("No encontrado enlace VK") try: putlocker_code = scrapertools.get_match(data,"put\=([A-Z0-9]+)") putlocker_url = "http://www.putlocker.com/embed/"+putlocker_code itemlist.append( Item( channel=item.channel , action="play" , title="Ver en Putlocker" , server="putlocker" , url=putlocker_url , folder=False ) ) except: logger.info("No encontrado enlace PUTLOCKER") return itemlist
def findvideos(item): logger.info ("pelisalacarta.channels.qserie findvideos") itemlist=[] data = scrapertools.cache_page(item.url) anterior = scrapertools.find_single_match(data,'<a class="left" href="([^"]+)" title="Cap.tulo Anterior"></a>') siguiente = scrapertools.find_single_match(data,'<a class="right" href="([^"]+)" title="Cap.tulo Siguiente"></a>') titulo = scrapertools.find_single_match(data,'<h1 class="tithd bold fs18px lnht30px ico_b pdtop10px">([^<]+)</h1> ') existe = scrapertools.find_single_match(data,'<center>La pel.cula que quieres ver no existe.</center>') from core import servertools itemlist.extend(servertools.find_video_items(data=data)) for videoitem in itemlist: if 'youtube' in videoitem.url: itemlist.remove(videoitem) for videoitem in itemlist: videoitem.channel=item.channel videoitem.action="play" videoitem.folder=False videoitem.fanart =item.fanart # videoitem.thumbnail = servertools.guess_server_thumbnail(videoite.server) videoitem.title = titulo+" "+videoitem.server data = scrapertools.cache_page(anterior) existe = scrapertools.find_single_match(data,'<center>La pel.cula que quieres ver no existe.</center>') if not existe: itemlist.append( Item(channel=item.channel, action="findvideos" , title='Capitulo Anterior' , url=anterior, thumbnail='https://s31.postimg.org/k5kpwyrgb/anterior.png', folder ="true" )) data = scrapertools.cache_page(siguiente) existe = scrapertools.find_single_match(data,'<center>La pel.cula que quieres ver no existe.</center>') if not existe: itemlist.append( Item(channel=item.channel, action="findvideos" , title='Capitulo Siguiente' , url=siguiente, thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png', folder ="true" )) return itemlist
def play(item): logger.info("play: {0}".format(item.url)) data = httptools.downloadpage(item.url).data videoURL = scrapertools.find_single_match(data, "location.href='([^']+)") logger.debug("Video URL = {0}".format(videoURL)) itemlist = servertools.find_video_items(data=videoURL) return itemlist
def play(item): itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r'\\','',data) item.url = scrapertools.find_single_match(data,'src="([^"]+)"') data = httptools.downloadpage(item.url).data if item.extra!="yes": patron = '"label":(.*?),.*?"type":"(.*?)",.*?"file":"(.*?)"' matches = re.compile(patron,re.DOTALL).findall(data) if not matches: patron = '"label":(.*?),.*?"file":"(.*?)"' matches = re.compile(patron,re.DOTALL).findall(data) for dato_a,type, dato_b in matches: if 'http' in dato_a: url = dato_a calidad = dato_b else: url = dato_b calidad = dato_a url= url.replace('\\','') type=type.replace('\\','') itemlist.append(Item(channel=item.channel ,url=url,action="play",title=item.fulltitle+" ("+dato_a+")", folder=False) ) else: url = scrapertools.find_single_match(data,'window.location="([^"]+)"') videolist = servertools.find_video_items(data=url) for video in videolist: itemlist.append(Item(channel=item.channel ,url=video.url, server=video.server,title="[COLOR floralwhite][B]"+video.server+"[/B][/COLOR]",action="play", folder=False) ) return itemlist
def findvideos(item): logger.info("pelisalacarta.channels.mundoflv findvideos") itemlist = [] data = scrapertools.cache_page(item.url) patron = 'href="([^"]+)".*?' patron += 'color="gold">([^<]+)<' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedidioma in matches: if scrapedidioma == item.extra: url = scrapedurl data = scrapertools.cache_page(url) from core import servertools itemlist.extend(servertools.find_video_items(item=item, data=data)) for videoitem in itemlist: videoitem.channel = item.channel videoitem.folder = False videoitem.extra = item.thumvid videoitem.fulltitle = item.fulltitle # videoitem.action = play return itemlist
def findvideo(item): logger.info("streamondemand.animeinstreaming play") itemlist = [] url = item.url if '.ly' in item.url: url = adfly.get_long_url(item.url) if 'animeforce' in url: url = url.split('&')[0] headers.append(['Referer', item.url]) data = scrapertools.cache_page(url, headers=headers) patron = """<source\s*src=(?:"|')([^"']+?)(?:"|')\s*type=(?:"|')video/mp4(?:"|')>""" matches = re.compile(patron, re.DOTALL).findall(data) headers.append(['Referer', url]) for video in matches: itemlist.append(Item(channel=__channel__, action="play", title=item.title, url=video + '|' + urllib.urlencode(dict(headers)), folder=False)) else: itemlist.extend(servertools.find_video_items(data=url)) for videoitem in itemlist: videoitem.title = item.title + videoitem.title videoitem.fulltitle = item.fulltitle videoitem.show = item.show videoitem.thumbnail = item.thumbnail videoitem.channel = __channel__ return itemlist
def findvideos(item): logger.info("pelisalacarta.channels.pelisdanko findvideos") itemlist = [] if item.url[-2:] == "ss": prefix = "strms" else: prefix = "lnks" # Descarga la pagina data = scrapertools.downloadpage(item.url) # Parametros para redireccion donde muestra los enlaces data_slug = scrapertools.find_single_match(data, '<div id="ad" data-id="[^"]+" data-slug="([^"]+)"') data_id = scrapertools.find_single_match(data, '<tr class="rip hover" data-id="([^"]+)"') url = "http://pelisdanko.com/%s/%s/%s/%s" % (prefix, data_id, item.id_enlaces, data_slug) data = scrapertools.downloadpage(url, post="") from core import servertools video_item_list = servertools.find_video_items(data=data) for video_item in video_item_list: title = "[COLOR green]%s[/COLOR] | [COLOR darkorange][%s][/COLOR]" % (video_item.server, item.calidad) itemlist.append(item.clone(title=bbcode_kodi2html(title), url=video_item.url, action="play", server=video_item.server, text_color="")) # Opción "Añadir esta película a la biblioteca de XBMC" if config.get_library_support() and len(itemlist) > 0 and item.category != "Cine": itemlist.append(Item(channel=item.channel, title="Añadir película a la biblioteca", url=item.url, infoLabels={'title': item.fulltitle}, action="add_pelicula_to_library", fulltitle=item.fulltitle, text_color="green", id_enlaces=item.id_enlaces)) return itemlist
def findvideos(item): logger.info ("pelisalacarta.channels.pelisplus findvideos") itemlist=[] datas=scrapertools.cache_page(item.url) patron ="<iframe.*?src='([^']+)' frameborder='0' allowfullscreen.*?" matches = re.compile(patron,re.DOTALL).findall(datas) for scrapedurl in matches: if 'elreyxhd' or 'pelisplus.biz'in scrapedurl: data = scrapertools.cachePage(scrapedurl, headers=headers) patron ='file":"([^"]+)","label":"([^"]+)","type":".*?","default":".*?"' matches = re.compile(patron,re.DOTALL).findall(data) for scrapedurl, scrapedcalidad in matches: url = scrapedurl title = item.title+' ('+scrapedcalidad+')' thumbnail = item.thumbnail fanart=item.fanart if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"])") itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail,fanart =fanart)) else: url = scrapedurl from core import servertools itemlist.extend(servertools.find_video_items(data=datas)) for videoitem in itemlist: videoitem.channel = item.channel videoitem.thumbnail = item.thumbnail videoitem.action = 'play' videoitem.fulltitle = item.title return itemlist
def download_url(url,titulo,server): url = url.replace("\\","") print "Analizando enlace "+url # Averigua el servidor if server=="": itemlist = servertools.find_video_items(data=url) if len(itemlist)==0: print "No se puede identificar el enlace" return item = itemlist[0] print "Es un enlace en "+item.server else: item = Item() item.server = server # Obtiene las URL de descarga video_urls, puedes, motivo = servertools.resolve_video_urls_for_playing(item.server,url) if len(video_urls)==0: print "No se ha encontrado nada para descargar" return # Descarga el de mejor calidad, como hace pelisalacarta print "Descargando..." print video_urls devuelve = downloadtools.downloadbest(video_urls,titulo,continuar=True)
def play(item): logger.info("[somosmovies.py] play(item.url="+item.url+")") itemlist=[] if "bit.ly" in item.url: logger.info("Acortador bit.ly") location = scrapertools.get_header_from_response(item.url,header_to_get="location") logger.info("[somosmovies.py] location="+location) item.url = location return play(item) if "goo.gl" in item.url: logger.info("Acortador goo.gl") location = scrapertools.get_header_from_response(item.url,header_to_get="location") item.url = location return play(item) #adf.ly elif "j.gs" in item.url: logger.info("Acortador j.gs (adfly)") from servers import adfly location = adfly.get_long_url(item.url) item.url = location return play(item) else: from core import servertools itemlist=servertools.find_video_items(data=item.url) for videoitem in itemlist: videoitem.channel=item.channel videoitem.folder=False return itemlist
def play(item): logger.info("{0} - {1} = {2}".format(item.show, item.title, item.url)) if item.url.startswith(HOST): data = scrapertools.cache_page(item.url) patron = "<input type='button' value='Ver o Descargar' onclick='window.open\(\"([^\"]+)\"\);'/>" url = scrapertools.find_single_match(data, patron) else: url = item.url itemlist = servertools.find_video_items(data=url) titulo = scrapertools.find_single_match(item.fulltitle, "^(.*?)\s\[.+?$") if titulo: titulo += " [{language}]".format(language=item.language) for videoitem in itemlist: if titulo: videoitem.title = titulo else: videoitem.title = item.title videoitem.channel = item.channel return itemlist
def findvideos(item): logger.info("pelisalacarta.channels.tusnovelas findvideos") data = scrapertools.cache_page(item.url) itemlist=[] patron = '<embed type="application/x-shockwave-flash" src="http://www.todoanimes.com/reproductor/player.swf".*?file=([^\&]+)&' matches = re.compile(patron,re.DOTALL).findall(data) for match in matches: itemlist.append( Item(channel=item.channel, action="play", server="directo", title=item.title , url=match , thumbnail=item.thumbnail , plot=item.plot , folder=False) ) #<embed width="680" height="450" flashvars="file=mp4:p/459791/sp/45979100/serveFlavor/flavorId/0_0pacv7kr/forceproxy/true&image=&skin=&abouttext=&dock=false&streamer=rtmp://rtmpakmi.kaltura.com/ondemand/& patron = '<embed width="[^"]+" height="[^"]+" flashvars="file=([^\&]+)&.*?streamer=(rtmp[^\&]+)&' matches = re.compile(patron,re.DOTALL).findall(data) for final,principio in matches: itemlist.append( Item(channel=item.channel, action="play", server="directo", title=item.title , url=principio+final , thumbnail=item.thumbnail , plot=item.plot , folder=False) ) from core import servertools itemlist.extend(servertools.find_video_items(data=data)) for videoitem in itemlist: videoitem.channel=item.channel videoitem.action="play" videoitem.folder=False videoitem.title = "["+videoitem.server+"]" return itemlist
def findepisodevideo(item): logger.info("[SerieTVU.py]==> findepisodevideo") try: # Download Pagina data = scrapertools.anti_cloudflare(item.url, headers=headers) # Prendo il blocco specifico per la stagione richiesta patron = r'<div class="list [active]*" data-id="%s">(.*?)</div>\s*</div>' % item.extra[0][0] blocco = scrapertools.find_single_match(data, patron) # Estraggo l'episodio patron = r'<a data-id="%s[^"]*" data-href="([^"]+)" data-original="([^"]+)" class="[^"]+">' % item.extra[0][1].lstrip("0") matches = re.compile(patron, re.DOTALL).findall(blocco) itemlist = servertools.find_video_items(data=matches[0][0]) # Non sono riuscito a trovare un modo migliore di questo, se qualcuno ha un metodo migliore di questo # per estrarre il video lo sistemi per favore. if len(itemlist) > 1: itemlist.remove(itemlist[1]) server = re.sub(r'[-\[\]\s]+', '', itemlist[0].title) itemlist[0].title = "".join(["[%s] " % color(server, 'orange'), item.title]) itemlist[0].fulltitle = item.fulltitle itemlist[0].show = item.show itemlist[0].thumbnail = matches[0][1] itemlist[0].channel = __channel__ except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] return itemlist
def findvideos(item): logger.info("streamondemand.iluvanime play") itemlist = [] url = 'http://www' + item.url.split('www')[1] head = [['Upgrade-Insecure-Requests', '1'], ['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0']] data = scrapertools.cache_page(url, headers=head) patron = '<source.*?src="(.*?)".*?type=\'video/mp4\'' matches = re.compile(patron, re.DOTALL).findall(data) for video in matches: itemlist.append(Item(action="play", url=video)) itemlist.extend(servertools.find_video_items(data=data)) for videoitem in itemlist: videoitem.title = item.title + videoitem.title videoitem.fulltitle = item.fulltitle videoitem.thumbnail = item.thumbnail videoitem.show = item.show videoitem.plot = item.plot videoitem.channel = __channel__ return itemlist
def play(item): logger.info("pelisalacarta.channels.hdfull play") if "###" in item.url: id = item.url.split("###")[1].split(";")[0] type = item.url.split("###")[1].split(";")[1] item.url = item.url.split("###")[0] if "aHR0c" in item.url: import base64 item.url = base64.decodestring(item.url.split("/")[-1]) if "VideoMega" in item.title and not "videomega" in item.url: item.url = "http://videomega.tv/cdn.php?" + item.url itemlist = servertools.find_video_items(data=item.url) for videoitem in itemlist: videoitem.title = item.show #videoitem.title = "Enlace encontrado en "+videoitem.server+" ("+scrapertools.get_filename_from_url(videoitem.url)+")" videoitem.fulltitle = item.fulltitle videoitem.thumbnail = item.thumbnail videoitem.channel = item.channel post = "target_id=%s&target_type=%s&target_status=1" % (id, type) data = scrapertools.cache_page(host+"/a/status",post=post) return itemlist
def play(item): logger.info("{0} - {1} = {2}".format(item.show, item.title, item.url)) if item.url.startswith(HOST): data = httptools.downloadpage(item.url).data ajaxLink = re.findall("loadEnlace\((\d+),(\d+),(\d+),(\d+)\)", data) ajaxData = "" for serie, temp, cap, linkID in ajaxLink: logger.debug("Ajax link request: Sherie = {0} - Temp = {1} - Cap = {2} - Link = {3}".format(serie, temp, cap, linkID)) ajaxData += httptools.downloadpage(HOST + '/ajax/load_enlace.php?serie=' + serie + '&temp=' + temp + '&cap=' + cap + '&id=' + linkID).data if ajaxData: data = ajaxData patron = "onclick='window.open\(\"([^\"]+)\"\);'/>" url = scrapertools.find_single_match(data, patron) else: url = item.url itemlist = servertools.find_video_items(data=url) titulo = scrapertools.find_single_match(item.fulltitle, "^(.*?)\s\[.+?$") if titulo: titulo += " [{language}]".format(language=item.language) for videoitem in itemlist: if titulo: videoitem.title = titulo else: videoitem.title = item.title videoitem.channel = item.channel return itemlist
def play(item): logger.info("pelisalacarta.bricocine findvideos") itemlist = servertools.find_video_items(data=item.url) data = scrapertools.cache_page(item.url) listavideos = servertools.findvideos(data) for video in listavideos: videotitle = scrapertools.unescape(video[0]) url = item.url server = video[2] # xbmctools.addnewvideo( item.channel , "play" , category , server , , url , thumbnail , plot ) itemlist.append( Item( channel=item.channel, action="play", server=server, title="Trailer - " + videotitle, url=url, thumbnail=item.thumbnail, plot=item.plot, fulltitle=item.title, fanart="http://s23.postimg.org/84vkeq863/movietrailers.jpg", folder=False, ) ) return itemlist
def findvideos(item): logger.info() itemlist = [] duplicados = [] data = httptools.downloadpage(item.url).data logger.debug('data: %s' % data) video_page = scrapertools.find_single_match( data, "<iframe width='100%' height='500' src='(.*?)' frameborder='0'") data = httptools.downloadpage(video_page).data patron = '<li data-id=".*?">\s+<a href="(.*?)" >' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl in matches: if 'tipo' in scrapedurl: server = 'gvideo' gvideo_data = httptools.downloadpage(scrapedurl).data video_url = scrapertools.find_single_match( gvideo_data, '<div id="player">.*?border: none" src="\/\/(.*?)" ') video_url = 'http://%s' % video_url gvideo_url = httptools.downloadpage(video_url).data videourl = servertools.findvideosbyserver(gvideo_url, server) logger.debug('videourl: %s' % videourl) language = 'latino' quality = 'default' url = videourl[0][1] title = '%s (%s)' % (item.contentTitle, server) thumbnail = item.thumbnail fanart = item.fanart if video_url not in duplicados: itemlist.append( item.clone(action="play", title=title, url=url, thumbnail=thumbnail, fanart=fanart, show=title, extra='gvideo', language=language, quality=quality, server=server)) duplicados.append(video_url) itemlist.extend(servertools.find_video_items(data=data)) for videoitem in itemlist: # videoitem.infoLabels = item.infoLabels videoitem.channel = item.channel if videoitem.quality == '' or videoitem.language == '': videoitem.quality = 'default' videoitem.language = 'Latino' if videoitem.server != '': videoitem.thumbnail = servertools.guess_server_thumbnail( videoitem.server) else: videoitem.thumbnail = item.thumbnail videoitem.server = 'directo' videoitem.action = 'play' videoitem.fulltitle = item.title if videoitem.extra != 'directo' and 'youtube' not in videoitem.url: videoitem.title = item.contentTitle + ' (' + videoitem.server + ')' n = 0 for videoitem in itemlist: if 'youtube' in videoitem.url: videoitem.title = '[COLOR orange]Trailer en' + ' (' + videoitem.server + ')[/COLOR]' itemlist[n], itemlist[-1] = itemlist[-1], itemlist[n] n = n + 1 if item.extra == 'findvideos' and 'youtube' in itemlist[-1]: itemlist.pop(1) # Requerido para FilterTools itemlist = filtertools.get_links(itemlist, item, list_language) # Requerido para AutoPlay autoplay.start(itemlist, item) if 'serie' not in item.url: if config.get_videolibrary_support( ) and len(itemlist) > 0 and item.extra != 'findvideos': itemlist.append( Item( channel=item.channel, title= '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url, action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle)) return itemlist
def play(item): logger.info(item) itemlist = servertools.find_video_items( item.clone(url=item.url, contentTitle=item.title)) return itemlist
def play(item): logger.info("[thegroove360.cb1] play") itemlist = [] ### Handling new cb01 wrapper if host[9:]+"/film/" in item.url: iurl=httptools.downloadpage(item.url, only_headers=True, follow_redirects=False).headers.get("location", "") logger.info("/film/ wrapper: %s"%iurl) if iurl: item.url=iurl if '/goto/' in item.url: item.url = item.url.split('/goto/')[-1].decode('base64') item.url = item.url.replace('http://cineblog01.uno', 'http://k4pp4.pw') logger.debug("##############################################################") if "go.php" in item.url: data = httptools.downloadpage(item.url, headers=headers).data try: data = scrapertools.get_match(data, 'window.location.href = "([^"]+)";') except IndexError: try: # data = scrapertools.get_match(data, r'<a href="([^"]+)">clicca qui</a>') # In alternativa, dato che a volte compare "Clicca qui per proseguire": data = scrapertools.get_match(data, r'<a href="([^"]+)".*?class="btn-wrapper">.*?licca.*?</a>') except IndexError: data = httptools.downloadpage(item.url, only_headers=True, follow_redirects=False).headers.get("location", "") #while 'vcrypt' in data: #data = httptools.downloadpage(data, only_headers=True, follow_redirects=False).headers.get("location", "") logger.debug("##### play go.php data ##\n%s\n##" % data) elif "/link/" in item.url: data = httptools.downloadpage(item.url, headers=headers).data from lib import jsunpack try: data = scrapertools.get_match(data, "(eval\(function\(p,a,c,k,e,d.*?)</script>") data = jsunpack.unpack(data) logger.debug("##### play /link/ unpack ##\n%s\n##" % data) except IndexError: logger.debug("##### The content is yet unpacked ##\n%s\n##" % data) data = scrapertools.find_single_match(data, 'var link(?:\s)?=(?:\s)?"([^"]+)";') #while 'vcrypt' in data: #data = httptools.downloadpage(data, only_headers=True, follow_redirects=False).headers.get("location", "") if data.startswith('/'): data = urlparse.urljoin("http://swzz.xyz", data) data = httptools.downloadpage(data, headers=headers).data logger.debug("##### play /link/ data ##\n%s\n##" % data) else: data = item.url logger.debug("##### play else data ##\n%s\n##" % data) logger.debug("##############################################################") try: itemlist = servertools.find_video_items(data=data) for videoitem in itemlist: videoitem.title = item.show videoitem.fulltitle = item.fulltitle videoitem.show = item.show videoitem.thumbnail = item.thumbnail videoitem.channel = __channel__ except AttributeError: logger.error("vcrypt data doesn't contain expected URL") return itemlist
def play(item): logger.info("[crimenes] play url=" + item.url) itemlist = servertools.find_video_items(data=item.url) return itemlist
def findvideos(item): logger.info("[thegroove360.seriehd] findvideos") itemlist = [] # Descarga la página data = httptools.downloadpage(item.url).data.replace('\n', '') patron = r'<iframe id="iframeVid" width=".+?" height=".+?" src="([^"]+)" allowfullscreen' url = scrapertools.find_single_match(data, patron) if not url.startswith("https:"): url = "https:" + url if 'hdpass' in url: data = httptools.downloadpage(url, headers=headers).data start = data.find('<div class="row mobileRes">') end = data.find('<div id="playerFront">', start) data = data[start:end] patron_res = '<div class="row mobileRes">(.*?)</div>' patron_mir = '<div class="row mobileMirrs">(.*?)</div>' patron_media = r'<input type="hidden" name="urlEmbed" data-mirror="([^"]+)" id="urlEmbed" value="([^"]+)".*?>' res = scrapertools.find_single_match(data, patron_res) urls = [] for res_url, res_video in scrapertools.find_multiple_matches( res, '<option.*?value="([^"]+?)">([^<]+?)</option>'): data = httptools.downloadpage(urlparse.urljoin(url, res_url), headers=headers).data.replace( '\n', '') mir = scrapertools.find_single_match(data, patron_mir) for mir_url in scrapertools.find_multiple_matches( mir, '<option.*?value="([^"]+?)">[^<]+?</value>'): data = httptools.downloadpage(urlparse.urljoin(url, mir_url), headers=headers).data.replace( '\n', '') for media_label, media_url in re.compile(patron_media).findall( data): urls.append(url_decode(media_url)) itemlist = servertools.find_video_items(data='\n'.join(urls)) for videoitem in itemlist: servername = re.sub(r'[-\[\]\s]+', '', videoitem.title) videoitem.title = "".join([ '[COLOR azure][[COLOR orange]' + servername.capitalize() + '[/COLOR]] - ', item.fulltitle ]) videoitem.fulltitle = item.fulltitle videoitem.thumbnail = item.thumbnail videoitem.show = item.show videoitem.plot = item.plot videoitem.channel = __channel__ return itemlist
def run(): logger.info("dss.platformcode.launcher run") # Extract item from sys.argv if sys.argv[2]: item = Item().fromurl(sys.argv[2]) # If no item, this is mainlist else: item = Item(channel="channelselector", action="getmainlist", viewmode="movie") if item.action != "actualiza": logger.info("dss.platformcode.launcher " + item.tostring()) try: if item.action == "editor_keymap": from platformcode import editor_keymap return editor_keymap.start() # If item has no action, stops here if item.action == "": logger.info("dss.platformcode.launcher Item sin accion") return # Action for main menu in channelselector if item.action == "getmainlist": import channelselector itemlist = channelselector.getmainlist() # Check for updates only on first screen if config.get_setting("updatecheck") == "true": logger.info("Check for plugin updates enabled") from core import updater try: update, version_publicada, message, url_repo, serv = updater.check( ) if update: new_item = Item( title="Descargar versión " + version_publicada, channel="updater", action="actualiza", thumbnail=channelselector.get_thumbnail_path() + "Crystal_Clear_action_info.png", version=version_publicada, url=url_repo, server=serv) if config.get_setting("updateauto") == "true": updater.actualiza(new_item) new_item = Item( title= "Info para ver los cambios en la nueva versión instalada", plot=message, action="", channel="", thumbnail=channelselector.get_thumbnail_path() + "Crystal_Clear_action_info.png", text_color="red") itemlist.insert(0, new_item) else: platformtools.dialog_ok( "Versión " + version_publicada + " disponible", message) itemlist.insert(0, new_item) except: import traceback logger.info(traceback.format_exc()) logger.info( "dss.platformcode.launcher Fallo al verificar la actualización" ) else: logger.info( "dss.platformcode.launcher Check for plugin updates disabled" ) if not config.get_setting("primer_uso_matchcenter"): config.set_setting("primer_uso_matchcenter", "true") platformtools.dialog_ok( "MatchCenter activado", "Reinicia Kodi para usarlo (pulsar tecla U)", "La tecla, botones y otras opciones pueden cambiarse en Configuración -> Preferencias -> MatchCenter" ) file_keyboard = xbmc.translatePath( "special://profile/keymaps/deportesalacarta.xml") if config.get_setting( "matchcenter_enabled" ) == "true" and not os.path.exists(file_keyboard): tecla = "61525" tecla_guardada = config.get_setting("keymap_edit", "editor_keymap") if tecla_guardada: tecla = tecla_guardada from core import filetools data = '<keymap><global><keyboard><key id="%s">' % tecla + 'runplugin(plugin://plugin.video.dss/?ewogICAgImFjdGlvbiI6ICJzdGFydCIsIAogICAgImNoYW5uZWwiOiAibWF0Y2hjZW50ZXIiLCAKICAgICJpbmZvTGFiZWxzIjoge30KfQ%3D%3D))</key></keyboard></global></keymap>' filetools.write(file_keyboard, data) elif config.get_setting( "matchcenter_enabled") == "false" and os.path.exists( file_keyboard): from core import filetools try: filetools.remove(file_keyboard) except: pass platformtools.render_items(itemlist, item) # Action for updating plugin elif item.action == "actualiza": from core import updater updater.actualiza(item) xbmc.executebuiltin("Container.Refresh") # Action for channel listing on channelselector elif item.action == "filterchannels": import channelselector itemlist = channelselector.filterchannels(item.channel_type) platformtools.render_items(itemlist, item) # Action in certain channel specified in "action" and "channel" parameters else: can_open_channel = True # Checks if channel exists channel_file = os.path.join(config.get_runtime_path(), 'channels', item.channel + ".py") logger.info("dss.platformcode.launcher channel_file=%s" % channel_file) channel = None if os.path.exists(channel_file): try: channel = __import__('channels.%s' % item.channel, None, None, ["channels.%s" % item.channel]) except ImportError: exec "import channels." + item.channel + " as channel" logger.info( "deportesalacarta.platformcode.launcher running channel " + channel.__name__ + " " + channel.__file__) # Special play action if item.action == "play": logger.info("dss.platformcode.launcher play") # logger.debug("item_toPlay: " + "\n" + item.tostring('\n')) # First checks if channel has a "play" function if hasattr(channel, 'play'): logger.info( "dss.platformcode.launcher executing channel 'play' method" ) itemlist = channel.play(item) b_favourite = item.isFavourite # Play should return a list of playable URLS if len(itemlist) > 0 and isinstance(itemlist[0], Item): item = itemlist[0] if b_favourite: item.isFavourite = True platformtools.play_video(item) #Permitir varias calidades desde play en el canal elif len(itemlist) > 0 and isinstance(itemlist[0], list): item.video_urls = itemlist platformtools.play_video(item) # If not, shows user an error message else: platformtools.dialog_ok("plugin", "There is nothing to play") # If player don't have a "play" function, not uses the standard play from platformtools else: logger.info( "dss.platformcode.launcher executing core 'play' method" ) platformtools.play_video(item) # Special action for findvideos, where the plugin looks for known urls elif item.action == "findvideos": # First checks if channel has a "findvideos" function if hasattr(channel, 'findvideos'): itemlist = getattr(channel, item.action)(item) # If not, uses the generic findvideos function else: logger.info( "dss.platformcode.launcher no channel 'findvideos' method, " "executing core method") from core import servertools itemlist = servertools.find_video_items(item) platformtools.render_items(itemlist, item) # Special action for searching, first asks for the words then call the "search" function elif item.action == "search": logger.info("dss.platformcode.launcher search") tecleado = platformtools.dialog_input("") if tecleado is not None: tecleado = tecleado.replace(" ", "+") # TODO revisar 'personal.py' porque no tiene función search y daría problemas itemlist = channel.search(item, tecleado) else: itemlist = [] platformtools.render_items(itemlist, item) # For all other actions else: logger.info("dss.platformcode.launcher executing channel '" + item.action + "' method") itemlist = getattr(channel, item.action)(item) platformtools.render_items(itemlist, item) except urllib2.URLError, e: import traceback logger.error("dss.platformcode.launcher " + traceback.format_exc()) # Grab inner and third party errors if hasattr(e, 'reason'): logger.info("dss.platformcode.launcher Razon del error, codigo: " + str(e.reason[0]) + ", Razon: " + str(e.reason[1])) texto = config.get_localized_string( 30050) # "No se puede conectar con el sitio web" platformtools.dialog_ok("plugin", texto) # Grab server response errors elif hasattr(e, 'code'): logger.info("dss.platformcode.launcher codigo de error HTTP : %d" % e.code) # "El sitio web no funciona correctamente (error http %d)" platformtools.dialog_ok( "plugin", config.get_localized_string(30051) % e.code)
def play(item): logger.info("url=%s" % item.url) itemlist = [] subtitle = "" # html5 - http://www.pelispedia.vip if item.url.startswith("http://www.pelispedia.vip"): headers = dict() headers["Referer"] = item.referer data = httptools.downloadpage(item.url, headers=headers).data data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) from lib import jsunpack match = scrapertools.find_single_match(data, '\.</div><script type="text/rocketscript">(.*?)</script>') data = jsunpack.unpack(match) data = data.replace("\\'", "'") subtitle = scrapertools.find_single_match(data, "tracks:\[{file:'([^']+)',label:'Spanish'") media_urls = scrapertools.find_multiple_matches(data, "{file:'(.+?)',label:'(.+?)',type:'video/mp4'") # la calidad más baja tiene que ir primero media_urls = sorted(media_urls, key=lambda k: k[1]) if len(media_urls) > 0: for url, desc in media_urls: itemlist.append([desc, url, 0, subtitle]) # otro html5 - https://pelispedia.co/ver/f.php elif item.url.startswith("https://pelispedia.co/ver/f.php"): headers = dict() headers["Referer"] = item.referer data = httptools.downloadpage(item.url, headers=headers).data sub = scrapertools.find_single_match(data, "subtitulo='([^']+)'") data_sub = httptools.downloadpage(sub).data subtitle = save_sub(data_sub) from lib import jsunpack match = scrapertools.find_single_match(data, '<script type="text/rocketscript">(.*?)</script>') data = jsunpack.unpack(match) data = data.replace("\\'", "'") media_urls = scrapertools.find_multiple_matches(data, "{file:'(.+?)',label:'(.+?)'") # la calidad más baja tiene que ir primero media_urls = sorted(media_urls, key=lambda k: k[1]) if len(media_urls) > 0: for url, desc in media_urls: itemlist.append([desc, url, 0, subtitle]) # NUEVO # otro html5 - http://player.pelispedia.tv/ver?v= elif item.url.startswith("http://player.pelispedia.tv/ver?v="): _id = scrapertools.find_single_match(item.url, 'ver\?v=(.+?)$') headers = dict() headers["Referer"] = item.referer data = httptools.downloadpage(item.url, headers=headers).data sub = scrapertools.find_single_match(data, 'var parametros = "\?pic=20&id=([^&]+)&sub=ES";') sub = "http://player.pelispedia.tv/cdn" + sub data_sub = httptools.downloadpage(sub).data subtitle = save_sub(data_sub) csrf_token = scrapertools.find_single_match(data, '<meta name="csrf-token" content="([^"]+)">') ct = "" iv = "" s = "" pre_token = '{"ct": %s,"iv": %s,"s":%s}' % (ct, iv, s) import base64 token = base64.b64encode(pre_token) url = "http://player.pelispedia.tv/template/protected.php" post = "fv=%s&url=%s&sou=%s&token=%s" % ("0", _id, "pic", token) # eyJjdCI6IkVNYUd3Z2IwS2szSURzSGFGdkxGWlE9PSIsIml2IjoiZDI0NzhlYzU0OTZlYTJkNWFlOTFkZjAzZTVhZTNlNmEiLCJzIjoiOWM3MTM3MjNhMTkyMjFiOSJ9 data = httptools.downloadpage(url, post=post).data logger.debug("datito %s " % data) media_urls = scrapertools.find_multiple_matches(data, '"url":"([^"]+)".*?"width":([^,]+),') # la calidad más baja tiene que ir primero media_urls = sorted(media_urls, key=lambda k: int(k[1])) if len(media_urls) > 0: for url, desc in media_urls: itemlist.append([desc, url, 0, subtitle]) # netu elif item.url.startswith("http://www.pelispedia.tv/netu.html?"): url = item.url.replace("http://www.pelispedia.tv/netu.html?url=", "") from servers import netutv media_urls = netutv.get_video_url(urllib.unquote(url)) itemlist.append(media_urls[0]) # flash elif item.url.startswith("http://www.pelispedia.tv"): key = scrapertools.find_single_match(item.url, 'index.php\?id=([^&]+).+?sub=([^&]+)&.+?imagen=([^&]+)') # if len(key) > 2: # thumbnail = key[2] if key[1] != "": url_sub = "http://www.pelispedia.tv/sub/%s.srt" % key[1] data_sub = httptools.downloadpage(url_sub).data subtitle = save_sub(data_sub) url = "http://www.pelispedia.tv/gkphp_flv/plugins/gkpluginsphp.php" post = "link=" + urllib.quote(key[0]) data = httptools.downloadpage(url, post=post).data media_urls = scrapertools.find_multiple_matches(data, 'link":"([^"]+)","type":"([^"]+)"') # la calidad más baja tiene que ir primero media_urls = sorted(media_urls, key=lambda k: k[1]) if len(media_urls) > 0: for url, desc in media_urls: url = url.replace("\\", "") itemlist.append([desc, url, 0, subtitle]) # openload elif item.url.startswith("https://load.pelispedia.co/embed/openload.co"): url = item.url.replace("/embed/", "/stream/") data = httptools.downloadpage(url).data url = scrapertools.find_single_match(data, '<meta name="og:url" content="([^"]+)"') from servers import openload media_urls = openload.get_video_url(url) itemlist.append(media_urls[0]) # raptu elif item.url.startswith("https://load.pelispedia.co/embed/raptu.com"): url = item.url.replace("/embed/", "/stream/") data = httptools.downloadpage(url).data url = scrapertools.find_single_match(data, '<meta property="og:url" content="([^"]+)"') from servers import raptu media_urls = raptu.get_video_url(url) if len(media_urls) > 0: for desc, url, numero, subtitle in media_urls: itemlist.append([desc, url, numero, subtitle]) else: itemlist = servertools.find_video_items(data=item.url) for videoitem in itemlist: videoitem.title = item.title videoitem.channel = __channel__ return itemlist
def findvideos(item): logger.info("streamondemand-pureita [tantifilm findvideos]") # Descarga la página data = item.extra if item.extra != '' else httptools.downloadpage( item.url, headers=headers).data if 'protectlink.stream' in data: urls = scrapertools.find_multiple_matches( data, r'<iframe src=".*?//.*?=([^"]+)"') for url in urls: url = url.decode('base64') data += '\t' + url url = httptools.downloadpage(url, only_headers=True, follow_redirects=False).headers.get( "location", "") data += '\t' + url itemlist = servertools.find_video_items(data=data) for videoitem in itemlist: servername = re.sub(r'[-\[\]\s]+', '', videoitem.title) videoitem.title = "".join([ "[COLOR azure]" + item.title, ' - [[COLOR orange]' + servername.capitalize() + '[/COLOR]]' ]) videoitem.fulltitle = item.fulltitle videoitem.show = item.show videoitem.thumbnail = item.thumbnail videoitem.plot = item.plot videoitem.channel = __channel__ patron = r'\{"file":"([^"]+)","type":"[^"]+","label":"([^"]+)"\}' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle in matches: title = item.title + " " + scrapedtitle + " quality" itemlist.append( Item(channel=__channel__, action="play", title=title, url=scrapedurl.replace(r'\/', '/').replace('%3B', ';'), thumbnail=item.thumbnail, fulltitle=item.title, show=item.title, server='', folder=False)) patron = '<span>([^<]+)</span>\s*' patron += '<iframe width="298" height="220" frameborder="0" allowfullscreen="" src="([^"]+)"></iframe>' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedtitle, scrapedurl in matches: scrapedtitle = scrapedtitle.replace("Film", "") title = item.title + " [[COLOR yellow]" + scrapedtitle + "[/COLOR]]" itemlist.append( Item(channel=__channel__, action="play", title=title, url=scrapedurl.replace(r'\/', '/').replace('%3B', ';'), thumbnail=item.thumbnail, fulltitle=item.title, show=item.title, server='', folder=False)) return itemlist
def findvideos(item): logger.info() itemlist = [] data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", httptools.downloadpage(item.url).data) list_videos = scrapertools.find_multiple_matches( data, 'video\[\d\]\s=\s\'<iframe.+?src="([^"]+)"') download_list = scrapertools.find_multiple_matches( data, 'href="http://ouo.io/s/y0d65LCP\?s=([^"]+)"') for i in download_list: list_videos.append(urllib.unquote_plus(i)) aux_url = [] cldup = False for e in list_videos: url_api = "https://s3.animeflv.com/check.php?server=%s&v=%s" # izanagi, yourupload, hyperion if e.startswith("https://s3.animeflv.com/embed"): server, v = scrapertools.find_single_match( e, 'server=([^&]+)&v=(.*?)$') data = httptools.downloadpage(url_api % (server, v)).data.replace( "\\", "") if '{"error": "Por favor intenta de nuevo en unos segundos", "sleep": 3}' in data: time.sleep(3) data = httptools.downloadpage( url_api % (server, v)).data.replace("\\", "") if server != "hyperion": url = scrapertools.find_single_match(data, '"file":"([^"]+)"') if url: itemlist.append( item.clone(title="Enlace encontrado en %s" % server, url=url, action="play")) else: # pattern = '"direct":"([^"]+)"' # url = scrapertools.find_single_match(data, pattern) # itemlist.append(item.clone(title="Enlace encontrado en %s" % server, url=url, action="play")) pattern = '"label":([^,]+),"type":"video/mp4","file":"([^"]+)"' matches = scrapertools.find_multiple_matches(data, pattern) video_urls = [] for label, url in matches: video_urls.append([label, "mp4", url]) if video_urls: video_urls.sort(key=lambda u: int(u[0])) itemlist.append( item.clone(title="Enlace encontrado en %s" % server, action="play", video_urls=video_urls)) else: if e.startswith("https://cldup.com") and not cldup: itemlist.append( item.clone(title="Enlace encontrado en Cldup", action="play", url=e)) cldup = True aux_url.append(e) itemlist.extend(servertools.find_video_items(data=",".join(aux_url))) for videoitem in itemlist: videoitem.fulltitle = item.fulltitle videoitem.channel = item.channel videoitem.thumbnail = item.thumbnail return itemlist
def findvideos_tv(item): logger.info("streamondemand.mondolunatico findvideos") itemlist = [] # Descarga la pagina data = item.url if item.extra == 'serie' else httptools.downloadpage( item.url).data # Estrae i contenuti patron = r'noshade>(.*?)<br>.*?<a href="(%s/pass/index\.php\?ID=[^"]+)"' % host matches = re.compile(patron, re.DOTALL).findall(data) for scrapedtitle, scrapedurl in matches: scrapedtitle = scrapedtitle.replace('*', '').replace('Streaming', '').strip() title = '%s - [%s]' % (item.title, scrapedtitle) itemlist.append( Item(channel=__channel__, action="play", title=title, url=scrapedurl, thumbnail=item.thumbnail, fulltitle=item.fulltitle, show=item.show, server='captcha', folder=False)) patron = 'href="(%s/stream/links/\d+/)"' % host matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl in matches: data += httptools.downloadpage(scrapedurl).data ### robalo fix obfuscator - start #### patron = 'href="(https?://www\.keeplinks\.(?:co|eu)/p92/([^"]+))"' matches = re.compile(patron, re.DOTALL).findall(data) for keeplinks, id in matches: headers = [[ 'Cookie', 'flag[' + id + ']=1; defaults=1; nopopatall=' + str(int(time.time())) ], ['Referer', keeplinks]] html = httptools.downloadpage(keeplinks, headers=headers).data data += str( scrapertools.find_multiple_matches( html, '</lable><a href="([^"]+)" target="_blank"')) ### robalo fix obfuscator - end #### patron = 'src="([^"]+)" frameborder="0"' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl in matches: data += httptools.downloadpage(scrapedurl).data for videoitem in servertools.find_video_items(data=data): videoitem.title = item.title + videoitem.title videoitem.fulltitle = item.fulltitle videoitem.thumbnail = item.thumbnail videoitem.show = item.show videoitem.plot = item.plot videoitem.channel = __channel__ itemlist.append(videoitem) return itemlist
def findvideos(item): logger.info () itemlist=[] duplicados=[] datas=httptools.downloadpage(item.url).data patron ="<iframe.*?src='(.*?)' frameborder.*?" matches = re.compile(patron,re.DOTALL).findall(datas) for scrapedurl in matches: if 'elreyxhd' in scrapedurl or 'pelisplus.biz'in scrapedurl: data = httptools.downloadpage(scrapedurl, headers=headers).data quote = scrapertools.find_single_match(data,'sources.*?file.*?http') if quote and "'" in quote: patronr ="file:'([^']+)',label:'([^.*?]+)',type:.*?'.*?}" elif '"' in quote: patronr ='{file:"(.*?)",label:"(.*?)"}' matchesr = re.compile(patronr,re.DOTALL).findall(data) for scrapedurl, scrapedcalidad in matchesr: url = scrapedurl title = item.contentTitle+' ('+str(scrapedcalidad)+')' thumbnail = item.thumbnail fanart=item.fanart if url not in duplicados: itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail,fanart =fanart, extra='directo')) duplicados.append(url) url = scrapedurl from core import servertools itemlist.extend(servertools.find_video_items(data=datas)) for videoitem in itemlist: videoitem.infoLabels = item.infoLabels videoitem.channel = item.channel if videoitem.server != '': videoitem.thumbnail = servertools.guess_server_thumbnail (videoitem.server) else: videoitem.thumbnail = item.thumbnail videoitem.action = 'play' videoitem.fulltitle = item.title if videoitem.extra !='directo' and 'youtube' not in videoitem.url: videoitem.title = item.contentTitle+' ('+videoitem.server+')' n=0 for videoitem in itemlist: if 'youtube' in videoitem.url: videoitem.title='[COLOR orange]Trailer en'+' ('+videoitem.server+')[/COLOR]' itemlist[n], itemlist[-1] = itemlist[-1], itemlist[n] n=n+1 if item.extra =='findvideos'and 'youtube' in itemlist[-1]: itemlist.pop(1) if 'serie' not in item.url: if config.get_library_support() and len(itemlist) > 0 and item.extra !='findvideos': itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la biblioteca[/COLOR]', url=item.url, action="add_pelicula_to_library", extra="findvideos", contentTitle = item.contentTitle)) return itemlist
def findvideos(item): logger.info("[streamondemand-pureita documentaristreamingdb] findvideos") data = scrapertools.cache_page(item.url, headers=headers) links = [] begin = data.find('<div class="moview-details-text">') if begin != -1: end = data.find('<!-- //movie-details -->', begin) mdiv = data[begin:end] items = [[ m.end(), m.group(1) ] for m in re.finditer('<b style="color:#333333;">(.*?)<\/b>', mdiv)] if items: for idx, val in enumerate(items): if idx == len(items) - 1: _data = mdiv[val[0]:-1] else: _data = mdiv[val[0]:items[idx + 1][0]] for link in re.findall( '<a.*?href="([^"]+)"[^>]+>.*?<b>(.*?)<\/b><\/a>+', _data): if not link[0].strip() in [l[1] for l in links]: links.append( [val[1], link[0].strip(), link[1].strip()]) items = [[m.end(), m.group(1)] for m in re.finditer('<p><strong>(.*?)<\/strong><\/p>', mdiv)] if items: _title = '' for idx, val in enumerate(items): if idx == len(items) - 1: _data = mdiv[val[0]:-1] else: _data = mdiv[val[0]:items[idx + 1][0]] for link in re.findall( '<a\s.*?href="([^"]+)".*?>(?:<span[^>]+>)*(?:<strong>)*([^<]+)', _data): if not link[0].strip() in [l[1] for l in links]: if not link[1].strip() in link[0]: _title = link[1].strip() links.append([_title, link[0].strip(), 'unknown']) items = [[m.start(), m.group(1)] for m in re.finditer('<li><strong>([^<]+)<', mdiv)] if items: for idx, val in enumerate(items): if idx == len(items) - 1: _data = mdiv[val[0]:-1] else: _data = mdiv[val[0]:items[idx + 1][0]] for link in re.findall( '<a\s.*?href="([^"]+)".*?>(?:<span[^>]+>)*(?:<strong>)*([^<]+)', _data): if not link[0].strip() in [l[1] for l in links]: links.append( [val[1], link[0].strip(), link[1].strip()]) itemlist = [] if links: for l in links: title = unicode(l[0], 'utf8', 'ignore') title = title.replace(u'\xa0', ' ').replace( 'Documentario ', '').replace(' doc ', ' ').replace(' streaming', '').replace(' Streaming', '') url = l[1] action = "play" server = "unknown" folder = False if url == '#' or not title: continue logger.info('server: %s' % l[2]) if l[2] != 'unknown': server = unicode(l[2], 'utf8', 'ignore') else: logger.info(url) match = re.search('https?:\/\/(?:www\.)*([^\.]+)\.', url) if match: server = match.group(1) if server == "documentari-streaming-db": action = "findvideos" folder = True logger.info('server: %s, action: %s' % (server, action)) logger.info(title + ' - [COLOR blue]' + server + '[/COLOR]') itemlist.append( Item( channel=item.channel, title=title + ' - [COLOR blue]' + server + '[/COLOR]', action=action, server=server, #servertools.get_server_from_url(url), url=url, thumbnail=item.thumbnail, fulltitle=title, show=item.show, plot=item.plot, parentContent=item, folder=folder)) else: itemlist = servertools.find_video_items(data=data) for videoitem in itemlist: videoitem.title = "".join([ item.title, '[COLOR orange][B]' + videoitem.title + '[/B][/COLOR]' ]) videoitem.fulltitle = item.fulltitle videoitem.show = item.show videoitem.thumbnail = item.thumbnail videoitem.channel = __channel__ return itemlist
def findvideos(item): logger.info() itemlist = [] duplicados = [] datas = httptools.downloadpage(item.url).data patron = "<iframe.*?src='([^']+)' frameborder='0' allowfullscreen.*?" matches = re.compile(patron, re.DOTALL).findall(datas) for scrapedurl in matches: if 'elreyxhd' or 'pelisplus.biz' in scrapedurl: patronr = '' data = httptools.downloadpage(scrapedurl, headers=headers).data quote = scrapertools.find_single_match(data, 'sources.*?file.*?http') if quote and "'" in quote: patronr = "file:'([^']+)',label:'([^.*?]+)',type:.*?'.*?}" elif '"' in quote: patronr = '{file:"(.*?)",label:"(.*?)"}' if patronr != '': matchesr = re.compile(patronr, re.DOTALL).findall(data) for scrapedurl, scrapedcalidad in matchesr: url = scrapedurl language = 'latino' quality = scrapedcalidad.decode('cp1252').encode('utf8') title = item.contentTitle + ' (' + str( scrapedcalidad) + ')' thumbnail = item.thumbnail fanart = item.fanart if url not in duplicados: itemlist.append( item.clone( action="play", title=title, url=url, thumbnail=thumbnail, fanart=fanart, show=title, extra='directo', language=language, quality=quality, server='directo', )) duplicados.append(url) url = scrapedurl from core import servertools itemlist.extend(servertools.find_video_items(data=datas)) for videoitem in itemlist: # videoitem.infoLabels = item.infoLabels videoitem.channel = item.channel if videoitem.quality == '' or videoitem.language == '': videoitem.quality = 'default' videoitem.language = 'Latino' if videoitem.server != '': videoitem.thumbnail = servertools.guess_server_thumbnail( videoitem.server) else: videoitem.thumbnail = item.thumbnail videoitem.server = 'directo' videoitem.action = 'play' videoitem.fulltitle = item.title if videoitem.extra != 'directo' and 'youtube' not in videoitem.url: videoitem.title = item.contentTitle + ' (' + videoitem.server + ')' n = 0 for videoitem in itemlist: if 'youtube' in videoitem.url: videoitem.title = '[COLOR orange]Trailer en' + ' (' + videoitem.server + ')[/COLOR]' itemlist[n], itemlist[-1] = itemlist[-1], itemlist[n] n = n + 1 if item.extra == 'findvideos' and 'youtube' in itemlist[-1]: itemlist.pop(1) # Requerido para FilterTools itemlist = filtertools.get_links(itemlist, item, list_language) # Requerido para AutoPlay autoplay.start(itemlist, item) if 'serie' not in item.url: if config.get_videolibrary_support( ) and len(itemlist) > 0 and item.extra != 'findvideos': itemlist.append( Item( channel=item.channel, title= '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url, action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle)) return itemlist
def play(item): logger.info(item) embededURL = urljoin(item.url, "/v/%s" % (item.vID)) itemlist = servertools.find_video_items(item.clone(url=embededURL)) return itemlist
def get_next_items(item): plugintools.log("navigation.get_next_items item=" + item.tostring()) try: # ---------------------------------------------------------------- # Main menu # ---------------------------------------------------------------- if item.channel == "navigation": if item.action == "mainlist": plugintools.log("navigation.get_next_items Main menu") itemlist = channelselector.getmainlist("bannermenu") elif item.channel == "channelselector": if item.action == "getchanneltypes": plugintools.log("navigation.get_next_items Channel types menu") itemlist = channelselector.getchanneltypes("bannermenu") elif item.action == "filterchannels": plugintools.log( "navigation.get_next_items Channel list menu, channel_type=" + item.channel_type) itemlist = channelselector.filterchannels( item.channel_type, "bannermenu") else: if item.action == "": item.action = "mainlist" plugintools.log("navigation.get_next_items Channel code (" + item.channel + "." + item.action + ")") try: exec "import channels." + item.channel + " as channel" except: exec "import core." + item.channel + " as channel" from platformcode import platformtools if item.action == "findvideos": plugintools.log("navigation.get_next_items findvideos") # Si el canal tiene una acción "findvideos" tiene prioridad if hasattr(channel, 'findvideos'): plugintools.log( "navigation.get_next_items play Channel has its own 'findvideos' method" ) itemlist = channel.findvideos(item) else: itemlist = [] if len(itemlist) == 0: from core import servertools itemlist = servertools.find_video_items(item) if len(itemlist) == 0: itemlist = [ Item( title="No se han encontrado vídeos", thumbnail= "https://raw.githubusercontent.com/pelisalacarta-ce/media/master/pelisalacarta/thumb_error.png" ) ] else: if item.action == "search": tecleado = plugintools.keyboard_input() if tecleado != "": tecleado = tecleado.replace(" ", "+") itemlist = channel.search(item, tecleado) elif item.channel == "novedades" and item.action == "mainlist": itemlist = channel.mainlist(item, "bannermenu") elif item.channel == "buscador" and item.action == "mainlist": itemlist = channel.mainlist(item) else: exec "itemlist = channel." + item.action + "(item)" if itemlist is None: itemlist = [] for loaded_item in itemlist: if loaded_item.thumbnail == "": if loaded_item.folder: loaded_item.thumbnail = "https://raw.githubusercontent.com/pelisalacarta-ce/media/master/pelisalacarta/thumb_folder.png" else: loaded_item.thumbnail = "https://raw.githubusercontent.com/pelisalacarta-ce/media/master/pelisalacarta/thumb_nofolder.png" if len(itemlist) == 0: itemlist = [ Item( title="No hay elementos para mostrar", thumbnail= "https://raw.githubusercontent.com/pelisalacarta-ce/media/master/pelisalacarta/thumb_error.png" ) ] except: import traceback plugintools.log("navigation.get_next_items " + traceback.format_exc()) itemlist = [ Item( title="Se ha producido un error", thumbnail= "https://raw.githubusercontent.com/pelisalacarta-ce/media/master/pelisalacarta/thumb_error.png" ) ] plugintools.log("navigation.get_next_items " + str(len(itemlist)) + " channels") return itemlist
def findvideos(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data) #logger.debug(data) patron = '<iframe.*?rptss src=(.*?) (?:width.*?|frameborder.*?) allowfullscreen><\/iframe>' matches = re.compile(patron, re.DOTALL).findall(data) for video_url in matches: logger.debug('video_url: %s' % video_url) if 'stream' in video_url: data = httptools.downloadpage('https:' + video_url).data logger.debug(data) if not 'iframe' in video_url: new_url = scrapertools.find_single_match( data, 'iframe src="(.*?)"') new_data = httptools.downloadpage(new_url).data logger.debug('new_data %s' % new_data) url = '' try: url, quality = scrapertools.find_single_match( new_data, 'file:.*?(?:\"|\')(https.*?)(?:\"|\'),' 'label:.*?(?:\"|\')(.*?)(?:\"|\'),') except: pass if url != '': headers_string = '|Referer=%s' % url url = url.replace('download', 'preview') + headers_string sub = scrapertools.find_single_match(new_data, 'file:.*?"(.*?srt)"') new_item = (Item(title=item.title, url=url, quality=quality, subtitle=sub, server='directo')) itemlist.append(new_item) else: itemlist.extend(servertools.find_video_items(data=video_url)) for videoitem in itemlist: videoitem.channel = item.channel videoitem.action = 'play' videoitem.thumbnail = item.thumbnail videoitem.infoLabels = item.infoLabels videoitem.title = item.contentTitle + ' (' + videoitem.server + ')' if 'youtube' in videoitem.url: videoitem.title = '[COLOR orange]Trailer en Youtube[/COLOR]' itemlist = servertools.get_servers_itemlist(itemlist) if config.get_videolibrary_support( ) and len(itemlist) > 0 and item.extra != 'findvideos': itemlist.append( Item(channel=item.channel, title= '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url, action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle)) return itemlist
def download_all_episodes(item, channel, first_episode="", preferred_server="vidspot", filter_language=""): logger.info("show=" + item.show) show_title = item.show # Obtiene el listado desde el que se llamó action = item.extra # Esta marca es porque el item tiene algo más aparte en el atributo "extra" if "###" in item.extra: action = item.extra.split("###")[0] item.extra = item.extra.split("###")[1] episode_itemlist = getattr(channel, action)(item) # Ordena los episodios para que funcione el filtro de first_episode episode_itemlist = sorted(episode_itemlist, key=lambda it: it.title) from core import servertools from core import scrapertools best_server = preferred_server # worst_server = "moevideos" # Para cada episodio if first_episode == "": empezar = True else: empezar = False for episode_item in episode_itemlist: try: logger.info("episode=" + episode_item.title) episode_title = scrapertools.find_single_match( episode_item.title, "(\d+x\d+)") logger.info("episode=" + episode_title) except: import traceback logger.error(traceback.format_exc()) continue if first_episode != "" and episode_title == first_episode: empezar = True if episodio_ya_descargado(show_title, episode_title): continue if not empezar: continue # Extrae los mirrors try: mirrors_itemlist = channel.findvideos(episode_item) except: mirrors_itemlist = servertools.find_video_items(episode_item) print(mirrors_itemlist) descargado = False new_mirror_itemlist_1 = [] new_mirror_itemlist_2 = [] new_mirror_itemlist_3 = [] new_mirror_itemlist_4 = [] new_mirror_itemlist_5 = [] new_mirror_itemlist_6 = [] for mirror_item in mirrors_itemlist: # Si está en español va al principio, si no va al final if "(Italiano)" in mirror_item.title: if best_server in mirror_item.title.lower(): new_mirror_itemlist_1.append(mirror_item) else: new_mirror_itemlist_2.append(mirror_item) if "(Español)" in mirror_item.title: if best_server in mirror_item.title.lower(): new_mirror_itemlist_1.append(mirror_item) else: new_mirror_itemlist_2.append(mirror_item) elif "(Latino)" in mirror_item.title: if best_server in mirror_item.title.lower(): new_mirror_itemlist_3.append(mirror_item) else: new_mirror_itemlist_4.append(mirror_item) elif "(VOS)" in mirror_item.title: if best_server in mirror_item.title.lower(): new_mirror_itemlist_3.append(mirror_item) else: new_mirror_itemlist_4.append(mirror_item) else: if best_server in mirror_item.title.lower(): new_mirror_itemlist_5.append(mirror_item) else: new_mirror_itemlist_6.append(mirror_item) mirrors_itemlist = (new_mirror_itemlist_1 + new_mirror_itemlist_2 + new_mirror_itemlist_3 + new_mirror_itemlist_4 + new_mirror_itemlist_5 + new_mirror_itemlist_6) for mirror_item in mirrors_itemlist: logger.info("mirror=" + mirror_item.title) if "(Italiano)" in mirror_item.title: idioma = "(Italiano)" codigo_idioma = "it" if "(Español)" in mirror_item.title: idioma = "(Español)" codigo_idioma = "es" elif "(Latino)" in mirror_item.title: idioma = "(Latino)" codigo_idioma = "lat" elif "(VOS)" in mirror_item.title: idioma = "(VOS)" codigo_idioma = "vos" elif "(VO)" in mirror_item.title: idioma = "(VO)" codigo_idioma = "vo" else: idioma = "(Desconocido)" codigo_idioma = "desconocido" logger.info("filter_language=#" + filter_language + "#, codigo_idioma=#" + codigo_idioma + "#") if filter_language == "" or (filter_language != "" and filter_language == codigo_idioma): logger.info("downloading mirror") else: logger.info("language " + codigo_idioma + " filtered, skipping") continue if hasattr(channel, 'play'): video_items = channel.play(mirror_item) else: video_items = [mirror_item] if len(video_items) > 0: video_item = video_items[0] # Comprueba que está disponible video_urls, puedes, motivo = servertools.resolve_video_urls_for_playing( video_item.server, video_item.url, video_password="", muestra_dialogo=False) # Lo añade a la lista de descargas if puedes: logger.info("downloading mirror started...") # El vídeo de más calidad es el último # mediaurl = video_urls[len(video_urls) - 1][1] devuelve = downloadbest(video_urls, show_title + " " + episode_title + " " + idioma + " [" + video_item.server + "]", continuar=False) if devuelve == 0: logger.info("download ok") descargado = True break elif devuelve == -1: try: from platformcode import platformtools platformtools.dialog_ok("plugin", "Descarga abortada") except: pass return else: logger.info("download error, try another mirror") continue else: logger.info( "downloading mirror not available... trying next") if not descargado: logger.info("UNDOWNLOADED EPISODE " + episode_title)
def findvideos(item): logger.info() itemlist = servertools.find_video_items(item) return itemlist
def findvid_film(item): logger.info("[cineblog01.py] findvideos") itemlist = [] # Descarga la página data = scrapertools.anti_cloudflare(item.url, headers) data = scrapertools.decodeHtmlentities(data) # Extract the quality format patronvideos = '>([^<]+)</strong></div>' matches = re.compile(patronvideos, re.DOTALL).finditer(data) QualityStr = "" for match in matches: QualityStr = scrapertools.unescape(match.group(1))[6:] # Extrae las entradas streaming = scrapertools.find_single_match(data, '<strong>Streaming:</strong>(.*?)<table height="30">') patron = '<td><a\s*href="([^"]+)"\s*target="_blank">([^<]+)</a></td>' matches = re.compile(patron, re.DOTALL).findall(streaming) for scrapedurl, scrapedtitle in matches: logger.debug("##### findvideos Streaming ## %s ## %s ##" % (scrapedurl, scrapedtitle)) title = "[COLOR orange]Streaming:[/COLOR] " + item.title + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]" itemlist.append( Item(channel=__channel__, action="play", title=title, url=scrapedurl, fulltitle=item.fulltitle, thumbnail=item.thumbnail, show=item.show, folder=False)) streaming_hd = scrapertools.find_single_match(data, '<strong>Streaming HD[^<]+</strong>(.*?)<table height="30">') patron = '<td><a\s*href="([^"]+)"\s*target="_blank">([^<]+)</a></td>' matches = re.compile(patron, re.DOTALL).findall(streaming_hd) for scrapedurl, scrapedtitle in matches: logger.debug("##### findvideos Streaming HD ## %s ## %s ##" % (scrapedurl, scrapedtitle)) title = "[COLOR yellow]Streaming HD:[/COLOR] " + item.title + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]" itemlist.append( Item(channel=__channel__, action="play", title=title, url=scrapedurl, fulltitle=item.fulltitle, thumbnail=item.thumbnail, show=item.show, folder=False)) streaming_3D = scrapertools.find_single_match(data, '<strong>Streaming 3D[^<]+</strong>(.*?)<table height="30">') patron = '<td><a\s*href="([^"]+)"\s*target="_blank">([^<]+)</a></td>' matches = re.compile(patron, re.DOTALL).findall(streaming_3D) for scrapedurl, scrapedtitle in matches: logger.debug("##### findvideos Streaming 3D ## %s ## %s ##" % (scrapedurl, scrapedtitle)) title = "[COLOR pink]Streaming 3D:[/COLOR] " + item.title + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]" itemlist.append( Item(channel=__channel__, action="play", title=title, url=scrapedurl, fulltitle=item.fulltitle, thumbnail=item.thumbnail, show=item.show, folder=False)) download = scrapertools.find_single_match(data, '<strong>Download:</strong>(.*?)<table height="30">') patron = '<td><a\s*href="([^"]+)"\s*target="_blank">([^<]+)</a></td>' matches = re.compile(patron, re.DOTALL).findall(download) for scrapedurl, scrapedtitle in matches: logger.debug("##### findvideos Download ## %s ## %s ##" % (scrapedurl, scrapedtitle)) title = "[COLOR aqua]Download:[/COLOR] " + item.title + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]" itemlist.append( Item(channel=__channel__, action="play", title=title, url=scrapedurl, fulltitle=item.fulltitle, thumbnail=item.thumbnail, show=item.show, folder=False)) download_hd = scrapertools.find_single_match(data, '<strong>Download HD[^<]+</strong>(.*?)<table width="100%" height="20">') patron = '<td><a\s*href="([^"]+)"\s*target="_blank">([^<]+)</a></td>' matches = re.compile(patron, re.DOTALL).findall(download_hd) for scrapedurl, scrapedtitle in matches: logger.debug("##### findvideos Download HD ## %s ## %s ##" % (scrapedurl, scrapedtitle)) title = "[COLOR azure]Download HD:[/COLOR] " + item.title + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]" itemlist.append( Item(channel=__channel__, action="play", title=title, url=scrapedurl, fulltitle=item.fulltitle, thumbnail=item.thumbnail, show=item.show, folder=False)) if len(itemlist) == 0: itemlist = servertools.find_video_items(item=item) return itemlist
def run(item=None): logger.info() if not item: # Extract item from sys.argv if sys.argv[2]: item = Item().fromurl(sys.argv[2]) # If no item, this is mainlist else: item = Item(channel="channelselector", action="getmainlist", viewmode="movie") logger.info(item.tostring()) try: # If item has no action, stops here if item.action == "": logger.info("Item sin accion") return # Action for main menu in channelselector elif item.action == "getmainlist": import channelselector # # Check for updates only on first screen # if config.get_setting("check_for_plugin_updates") == True: # logger.info("Check for plugin updates enabled") # from core import updater # # try: # config.set_setting("plugin_updates_available", 0) # new_published_version_tag, number_of_updates = updater.get_available_updates() # # config.set_setting("plugin_updates_available", number_of_updates) # itemlist = channelselector.getmainlist() # # if new_published_version_tag != "": # platformtools.dialog_notification(new_published_version_tag + " disponible", # "Ya puedes descargar la nueva versión del plugin\n" # "desde el listado principal") # # itemlist = channelselector.getmainlist() # itemlist.insert(0, Item(title="Descargar version " + new_published_version_tag, # version=new_published_version_tag, channel="updater", # action="update", # thumbnail=channelselector.get_thumb("update.png"))) # except: # import traceback # logger.error(traceback.format_exc()) # platformtools.dialog_ok("No se puede conectar", "No ha sido posible comprobar", # "si hay actualizaciones") # logger.error("Fallo al verificar la actualización") # config.set_setting("plugin_updates_available", 0) # itemlist = channelselector.getmainlist() # # else: # logger.info("Check for plugin updates disabled") # config.set_setting("plugin_updates_available", 0) # itemlist = channelselector.getmainlist() itemlist = channelselector.getmainlist() platformtools.render_items(itemlist, item) # # Action for updating plugin # elif item.action == "update": # # from core import updater # updater.update(item) # config.set_setting("plugin_updates_available", 0) # # import xbmc # xbmc.executebuiltin("Container.Refresh") # Action for channel types on channelselector: movies, series, etc. elif item.action == "getchanneltypes": import channelselector itemlist = channelselector.getchanneltypes() platformtools.render_items(itemlist, item) # Action for channel listing on channelselector elif item.action == "filterchannels": import channelselector itemlist = channelselector.filterchannels(item.channel_type) platformtools.render_items(itemlist, item) # Special action for playing a video from the library elif item.action == "play_from_library": play_from_library(item) return elif item.action == "keymap": from platformcode import keymaptools if item.open: return keymaptools.open_shortcut_menu() else: return keymaptools.set_key() elif item.action == "script": from core import tmdb if tmdb.drop_bd(): platformtools.dialog_notification("Alfa", "caché eliminada", time=2000, sound=False) # Action in certain channel specified in "action" and "channel" parameters else: # Entry point for a channel is the "mainlist" action, so here we check parental control if item.action == "mainlist": # Parental control # If it is an adult channel, and user has configured pin, asks for it if channeltools.is_adult(item.channel) and config.get_setting( "adult_request_password"): tecleado = platformtools.dialog_input( "", "Contraseña para canales de adultos", True) if tecleado is None or tecleado != config.get_setting( "adult_password"): return # # Actualiza el canal individual # if (item.action == "mainlist" and item.channel != "channelselector" and # config.get_setting("check_for_channel_updates") == True): # from core import updater # updater.update_channel(item.channel) # Checks if channel exists channel_file = os.path.join(config.get_runtime_path(), 'channels', item.channel + ".py") logger.info("channel_file=%s" % channel_file) channel = None if os.path.exists(channel_file): try: channel = __import__('channels.%s' % item.channel, None, None, ["channels.%s" % item.channel]) except ImportError: exec "import channels." + item.channel + " as channel" logger.info("Running channel %s | %s" % (channel.__name__, channel.__file__)) # Special play action if item.action == "play": logger.info("item.action=%s" % item.action.upper()) # logger.debug("item_toPlay: " + "\n" + item.tostring('\n')) # First checks if channel has a "play" function if hasattr(channel, 'play'): logger.info("Executing channel 'play' method") itemlist = channel.play(item) b_favourite = item.isFavourite # Play should return a list of playable URLS if len(itemlist) > 0 and isinstance(itemlist[0], Item): item = itemlist[0] if b_favourite: item.isFavourite = True platformtools.play_video(item) # Permitir varias calidades desde play en el canal elif len(itemlist) > 0 and isinstance(itemlist[0], list): item.video_urls = itemlist platformtools.play_video(item) # If not, shows user an error message else: platformtools.dialog_ok("alfa", "No hay nada para reproducir") # If player don't have a "play" function, not uses the standard play from platformtools else: logger.info("Executing core 'play' method") platformtools.play_video(item) # Special action for findvideos, where the plugin looks for known urls elif item.action == "findvideos": # First checks if channel has a "findvideos" function if hasattr(channel, 'findvideos'): itemlist = getattr(channel, item.action)(item) itemlist = servertools.filter_servers(itemlist) # If not, uses the generic findvideos function else: logger.info("No channel 'findvideos' method, " "executing core method") itemlist = servertools.find_video_items(item) if config.get_setting("max_links", "videolibrary") != 0: itemlist = limit_itemlist(itemlist) from platformcode import subtitletools subtitletools.saveSubtitleName(item) platformtools.render_items(itemlist, item) # Special action for adding a movie to the library elif item.action == "add_pelicula_to_library": videolibrarytools.add_movie(item) # Special action for adding a serie to the library elif item.action == "add_serie_to_library": videolibrarytools.add_tvshow(item, channel) # Special action for downloading all episodes from a serie elif item.action == "download_all_episodes": from channels import downloads item.action = item.extra del item.extra downloads.save_download(item) # Special action for searching, first asks for the words then call the "search" function elif item.action == "search": logger.info("item.action=%s" % item.action.upper()) last_search = "" last_search_active = config.get_setting( "last_search", "search") if last_search_active: try: current_saved_searches_list = list( config.get_setting("saved_searches_list", "search")) last_search = current_saved_searches_list[0] except: pass tecleado = platformtools.dialog_input(last_search) if tecleado is not None: if last_search_active and not tecleado.startswith("http"): from channels import search search.save_search(tecleado) itemlist = channel.search(item, tecleado) else: return platformtools.render_items(itemlist, item) # For all other actions else: logger.info("Executing channel '%s' method" % item.action) itemlist = getattr(channel, item.action)(item) platformtools.render_items(itemlist, item) except urllib2.URLError, e: import traceback logger.error(traceback.format_exc()) # Grab inner and third party errors if hasattr(e, 'reason'): logger.error("Razon del error, codigo: %s | Razon: %s" % (str(e.reason[0]), str(e.reason[1]))) texto = config.get_localized_string( 30050) # "No se puede conectar con el sitio web" platformtools.dialog_ok("alfa", texto) # Grab server response errors elif hasattr(e, 'code'): logger.error("Codigo de error HTTP : %d" % e.code) # "El sitio web no funciona correctamente (error http %d)" platformtools.dialog_ok( "alfa", config.get_localized_string(30051) % e.code)
def play(item): logger.info("pelisalacarta.channels.pelispedia play url={0}".format( item.url)) itemlist = [] # Para videos flash y html5 if item.url.startswith("http://www.pelispedia.tv"): key = scrapertools.find_single_match( item.url, 'index.php\?id=([^&]+)&sub=([^&]+)&.+?imagen=([^&]+)') subtitle = "" thumbnail = "" if len(key) > 2: thumbnail = key[2] if key[1] != "": url_sub = "http://www.pelispedia.tv/sub/%s.srt" % key[1] data_sub = scrapertools.anti_cloudflare(url_sub, host=CHANNEL_HOST) subtitle = save_sub(data_sub) if "Player_Html5" in item.url: url = "http://www.pelispedia.tv/Pe_Player_Html5/pk/pk_2/plugins/protected.php" post = "fv=21&url=" + urllib.quote(key[0]) + "&sou=pic" else: url = "http://www.pelispedia.tv/Pe_flsh/plugins/gkpluginsphp.php" post = "link=" + urllib.quote(key[0]) data = scrapertools.cache_page(url, post=post, headers=CHANNEL_DEFAULT_HEADERS) media_urls = scrapertools.find_multiple_matches( data, '(?:link|url)":"([^"]+)"') # Si hay varias urls se añade la última que es la de mayor calidad if len(media_urls) > 0: url = media_urls[len(media_urls) - 1].replace("\\", "") itemlist.append( Item(channel=__channel__, title=item.title, url=url, server="directo", action="play", subtitle=subtitle, thumbnail=thumbnail)) elif item.url.startswith("http://www.pelispedia.biz"): logger.info("estoy en el otro html5") key = scrapertools.find_single_match(item.url, 'v=([^&]+).+?imagen=([^&]+)') thumbnail = "" if len(key) > 1: thumbnail = key[1] data = scrapertools.anti_cloudflare(item.url, host=CHANNEL_HOST, headers=CHANNEL_DEFAULT_HEADERS) media_url = scrapertools.find_single_match(data, '"file":"([^"]+)"').replace( "\\", "") sub = scrapertools.find_single_match( data, 'file:\s"([^"]+)".+?label:\s"Spanish"') itemlist.append( Item(channel=__channel__, title=item.title, url=media_url, server="directo", action="play", subtitle=sub, thumbnail=thumbnail)) else: itemlist = servertools.find_video_items(data=item.url) for videoitem in itemlist: videoitem.title = item.title videoitem.channel = __channel__ return itemlist
def download_from_best_server(item, ask=False): logger.info("contentAction: %s | contentChannel: %s | url: %s" % (item.contentAction, item.contentChannel, item.url)) result = {"downloadStatus": STATUS_CODES.error} progreso = platformtools.dialog_progress( "Descargas", "Obteniendo lista de servidores disponibles...") channel = __import__('channels.%s' % item.contentChannel, None, None, ["channels.%s" % item.contentChannel]) progreso.update(50, "Obteniendo lista de servidores disponibles.", "Conectando con %s..." % item.contentChannel) if hasattr(channel, item.contentAction): play_items = getattr(channel, item.contentAction)(item.clone( action=item.contentAction, channel=item.contentChannel)) else: play_items = servertools.find_video_items( item.clone(action=item.contentAction, channel=item.contentChannel)) play_items = filter(lambda x: x.action == "play", play_items) progreso.update(100, "Obteniendo lista de servidores disponibles.", "Servidores disponibles: %s" % len(play_items), "Identificando servidores...") for i in play_items: if not i.server: i.server = servertools.get_server_from_url(i.url) if progreso.iscanceled(): return {"downloadStatus": STATUS_CODES.canceled} play_items.sort(key=sort_method) if progreso.iscanceled(): return {"downloadStatus": STATUS_CODES.canceled} progreso.close() if not ask: # Recorremos el listado de servers, hasta encontrar uno que funcione for play_item in play_items: play_item = item.clone(**play_item.__dict__) play_item.contentAction = play_item.action play_item.infoLabels = item.infoLabels result = download_from_server(play_item) if progreso.iscanceled(): result["downloadStatus"] = STATUS_CODES.canceled # Tanto si se cancela la descarga como si se completa dejamos de probar mas opciones if result["downloadStatus"] in [ STATUS_CODES.canceled, STATUS_CODES.completed ]: break else: seleccion = platformtools.dialog_select("Selecciona el servidor", [s.title for s in play_items]) if seleccion > -1: play_item = item.clone(**play_items[seleccion].__dict__) play_item.contentAction = play_item.action play_item.infoLabels = item.infoLabels result = download_from_server(play_item) else: result["downloadStatus"] = STATUS_CODES.canceled return result
def findvideos(item): logger.info("deportesalacarta.channels.f1fullraces findvideos") itemlist = [] data = scrapertools.cachePage(item.url) data = re.sub(r"\n|\r|\t", '', data) bloque = scrapertools.find_single_match( data, '<div class="entry-content">(.*?)</div>') bloque = bloque.replace("<p>","").replace("</p>","").replace("<em>","").replace("<br />","") \ .replace("</em>","").replace("<strong>","").replace("</strong>","") \ .replace("<b>","").replace("</b>","").replace("<i>","").replace("</i>","") \ .replace("<del>","").replace("</del>","").replace("<center>","").replace("</center>","") bloque = re.sub(r'(?i)(<font[^>]+>)|</font>', " ", bloque) urls = scrapertools.find_multiple_matches( data, '(?i)(>)<iframe.*?src="([^"]+)"') if len(urls) > 1: urls = scrapertools.find_multiple_matches( bloque, '(?i)(.*?)<iframe.*?src="([^"]+)".*?</iframe>') for title, url in urls: title = re.sub( r'(?i)google:|pcloud:|nosvideo:|google|nosvideo|pcloud|_', '', title) title = title.strip() if "drive.google" in url or "yourvideohost" in url or "filepup" in url: if title != ">": scrapedtitle = "[COLOR orange]" + title + " [/COLOR]Enlace encontrado en " + scrapertools.find_single_match( url, '//(?:www.|)(\w+)') else: scrapedtitle = "Enlace encontrado en " + scrapertools.find_single_match( url, '//(?:www.|)(\w+)') itemlist.append( Item(channel=__channel__, action="play", title=scrapedtitle, url=url, thumbnail=item.thumbnail, fanart=item.fanart, folder=False)) elif 'nosvideo' in url: scrapedtitle = "[COLOR orange]" + title + " [/COLOR]Enlace encontrado en nosvideo" itemlist.append( Item(channel=__channel__, action="play", title=scrapedtitle, server="nosvideo", url=url, thumbnail=item.thumbnail, fanart=item.fanart, folder=False)) elif 'pcloud' in url: scrapedtitle = "[COLOR orange]" + title + " [/COLOR]Enlace encontrado en pCloud" itemlist.append( Item(channel=__channel__, action="play", title=scrapedtitle, server="pcloud", url=url, thumbnail=item.thumbnail, fanart=item.fanart, folder=False)) else: video_itemlist = servertools.find_video_items(data=url) for video_item in video_itemlist: if title != ">": if title.count(':') > 1: scrapedtitle = "[COLOR orange]" + title.rsplit( ':', 2)[1] + ": [/COLOR]" + video_item.title else: scrapedtitle = "[COLOR orange]" + title + " [/COLOR]" + video_item.title else: scrapedtitle = video_item.title itemlist.append( Item(channel=__channel__, action="play", server=video_item.server, title=scrapedtitle, url=video_item.url, thumbnail=item.thumbnail, fanart=item.fanart, folder=False)) if len(itemlist) == 0: urls_f1gp = scrapertools.find_multiple_matches( data, '<p style="text-align: center;">(.*?)</p>.*?src="([^"]+)"') for title, url in urls_f1gp: scrapedtitle = "[COLOR orange]" + title + " [/COLOR]Enlace encontrado en directo" itemlist.append( Item(channel=__channel__, action="play", server="directo", title=scrapedtitle, url=url, thumbnail=item.thumbnail, fanart=item.fanart, folder=False)) return itemlist
def findvid_film(item): logger.info("[cineblog01.py] findvid_film") itemlist = [] # Descarga la página data = httptools.downloadpage(item.url, headers=headers).data data = scrapertools.decodeHtmlentities(data) # Extract the quality format patronvideos = '>([^<]+)</strong></div>' matches = re.compile(patronvideos, re.DOTALL).finditer(data) QualityStr = "" for match in matches: QualityStr = scrapertools.unescape(match.group(1))[6:] # STREAMANGO matches = [] u = scrapertools.find_single_match( data, '(?://|\.)streamango\.com/(?:f/|embed/)?[0-9a-zA-Z]+') if u: matches.append((u, 'Streamango')) # Extrae las entradas streaming_hd = scrapertools.find_single_match( data, '<strong>Streaming HD[^<]+</strong>(.*?)<table height="30">') patron = '<td><a[^h]href="([^"]+)"[^>]+>([^<]+)<' matches = re.compile(patron, re.DOTALL).findall(streaming_hd) for scrapedurl, scrapedtitle in matches: logger.debug("##### findvideos Streaming HD ## %s ## %s ##" % (scrapedurl, scrapedtitle)) title = "[COLOR yellow]" + scrapedtitle + " HD:[/COLOR] " + item.title + " [COLOR grey]" + QualityStr + "[/COLOR]" itemlist.append( Item(channel=__channel__, action="play", title=title, url=scrapedurl, fulltitle=item.fulltitle, thumbnail=item.thumbnail, plot=item.plot, show=item.show, folder=False)) streaming_3D = scrapertools.find_single_match( data, '<strong>Streaming 3D[^<]+</strong>(.*?)<table height="30">') patron = '<td><a[^h]href="([^"]+)"[^>]+>([^<]+)<' matches = re.compile(patron, re.DOTALL).findall(streaming_3D) for scrapedurl, scrapedtitle in matches: logger.debug("##### findvideos Streaming 3D ## %s ## %s ##" % (scrapedurl, scrapedtitle)) title = "[COLOR pink]" + scrapedtitle + " 3D:[/COLOR] " + item.title + " [COLOR grey]" + QualityStr + "[/COLOR]" itemlist.append( Item(channel=__channel__, action="play", title=title, url=scrapedurl, fulltitle=item.fulltitle, thumbnail=item.thumbnail, plot=item.plot, show=item.show, folder=False)) streaming = scrapertools.find_single_match( data, '<strong>Streaming:</strong>(.*?)<table height="30">') patron = '<td><a[^h]href="([^"]+)"[^>]+>([^<]+)<' matches = re.compile(patron, re.DOTALL).findall(streaming) + matches for scrapedurl, scrapedtitle in matches: logger.debug("##### findvideos Streaming ## %s ## %s ##" % (scrapedurl, scrapedtitle)) title = "[COLOR orange]" + scrapedtitle + " SD:[/COLOR] " + item.title + " [COLOR grey]" + QualityStr + "[/COLOR]" itemlist.append( Item(channel=__channel__, action="play", title=title, url=scrapedurl, fulltitle=item.fulltitle, thumbnail=item.thumbnail, plot=item.plot, show=item.show, folder=False)) download = scrapertools.find_single_match( data, '<strong>Download:</strong>(.*?)<table height="30">') patron = '<td><a[^h]href="([^"]+)"[^>]+>([^<]+)<' matches = re.compile(patron, re.DOTALL).findall(download) for scrapedurl, scrapedtitle in matches: logger.debug("##### findvideos Download ## %s ## %s ##" % (scrapedurl, scrapedtitle)) title = "[COLOR orange]" + scrapedtitle + "[/COLOR] " + "[COLOR aqua]Download:[/COLOR] " + item.title + " [COLOR grey]" + QualityStr + "[/COLOR]" itemlist.append( Item(channel=__channel__, action="play", title=title, url=scrapedurl, fulltitle=item.fulltitle, thumbnail=item.thumbnail, show=item.show, folder=False)) download_hd = scrapertools.find_single_match( data, '<strong>Download HD[^<]+</strong>(.*?)<table width="100%" height="20">' ) patron = '<td><a[^h]href="([^"]+)"[^>]+>([^<]+)<' matches = re.compile(patron, re.DOTALL).findall(download_hd) for scrapedurl, scrapedtitle in matches: logger.debug("##### findvideos Download HD ## %s ## %s ##" % (scrapedurl, scrapedtitle)) title = "[COLOR orange]" + scrapedtitle + "[/COLOR] " + "[COLOR azure]Download HD:[/COLOR] " + item.title + " [COLOR grey]" + QualityStr + "[/COLOR]" itemlist.append( Item(channel=__channel__, action="play", title=title, url=scrapedurl, fulltitle=item.fulltitle, thumbnail=item.thumbnail, show=item.show, folder=False)) if len(itemlist) == 0: itemlist = servertools.find_video_items(item=item) return itemlist
def run(item=None): logger.info() if not item: # Extract item from sys.argv if sys.argv[2]: sp = sys.argv[2].split('&') url = sp[0] item = Item().fromurl(url) if len(sp) > 1: for e in sp[1:]: key, val = e.split('=') item.__setattr__(key, val) # If no item, this is mainlist else: if config.get_setting("start_page"): if not config.get_setting("custom_start"): category = config.get_setting("category").lower() item = Item(channel="news", action="novedades", extra=category, mode = 'silent') else: from channels import side_menu item= Item() item = side_menu.check_user_home(item) item.start = True; else: item = Item(channel="channelselector", action="getmainlist", viewmode="movie") if not config.get_setting('show_once'): from platformcode import xbmc_videolibrary xbmc_videolibrary.ask_set_content(1) config.set_setting('show_once', True) logger.info(item.tostring()) try: if not config.get_setting('tmdb_active'): config.set_setting('tmdb_active', True) # If item has no action, stops here if item.action == "": logger.info("Item sin accion") return # Action for main menu in channelselector elif item.action == "getmainlist": import channelselector itemlist = channelselector.getmainlist() platformtools.render_items(itemlist, item) # Action for channel types on channelselector: movies, series, etc. elif item.action == "getchanneltypes": import channelselector itemlist = channelselector.getchanneltypes() platformtools.render_items(itemlist, item) # Action for channel listing on channelselector elif item.action == "filterchannels": import channelselector itemlist = channelselector.filterchannels(item.channel_type) platformtools.render_items(itemlist, item) # Special action for playing a video from the library elif item.action == "play_from_library": play_from_library(item) return elif item.action == "keymap": from platformcode import keymaptools if item.open: return keymaptools.open_shortcut_menu() else: return keymaptools.set_key() elif item.action == "script": from core import tmdb if tmdb.drop_bd(): platformtools.dialog_notification(config.get_localized_string(20000), config.get_localized_string(60011), time=2000, sound=False) # Action in certain channel specified in "action" and "channel" parameters else: # Entry point for a channel is the "mainlist" action, so here we check parental control if item.action == "mainlist": # Parental control # If it is an adult channel, and user has configured pin, asks for it if channeltools.is_adult(item.channel) and config.get_setting("adult_request_password"): tecleado = platformtools.dialog_input("", config.get_localized_string(60334), True) if tecleado is None or tecleado != config.get_setting("adult_password"): return # # Actualiza el canal individual # if (item.action == "mainlist" and item.channel != "channelselector" and # config.get_setting("check_for_channel_updates") == True): # from core import updater # updater.update_channel(item.channel) # Checks if channel exists channel_file = os.path.join(config.get_runtime_path(), 'channels', item.channel + ".py") logger.info("channel_file=%s" % channel_file) channel = None if os.path.exists(channel_file): try: channel = __import__('channels.%s' % item.channel, None, None, ["channels.%s" % item.channel]) except ImportError: exec("import channels." + item.channel + " as channel") logger.info("Running channel %s | %s" % (channel.__name__, channel.__file__)) # Special play action if item.action == "play": #define la info para trakt try: trakt_tools.set_trakt_info(item) except: pass logger.info("item.action=%s" % item.action.upper()) # logger.debug("item_toPlay: " + "\n" + item.tostring('\n')) # First checks if channel has a "play" function if hasattr(channel, 'play'): logger.info("Executing channel 'play' method") itemlist = channel.play(item) b_favourite = item.isFavourite # Play should return a list of playable URLS if len(itemlist) > 0 and isinstance(itemlist[0], Item): item = itemlist[0] if b_favourite: item.isFavourite = True platformtools.play_video(item) # Permitir varias calidades desde play en el canal elif len(itemlist) > 0 and isinstance(itemlist[0], list): item.video_urls = itemlist platformtools.play_video(item) # If not, shows user an error message else: platformtools.dialog_ok(config.get_localized_string(20000), config.get_localized_string(60339)) # If player don't have a "play" function, not uses the standard play from platformtools else: logger.info("Executing core 'play' method") platformtools.play_video(item) # Special action for findvideos, where the plugin looks for known urls elif item.action == "findvideos": # First checks if channel has a "findvideos" function if hasattr(channel, 'findvideos'): itemlist = getattr(channel, item.action)(item) itemlist = servertools.filter_servers(itemlist) # If not, uses the generic findvideos function else: logger.info("No channel 'findvideos' method, " "executing core method") itemlist = servertools.find_video_items(item) if config.get_setting("max_links", "videolibrary") != 0: itemlist = limit_itemlist(itemlist) from platformcode import subtitletools subtitletools.saveSubtitleName(item) platformtools.render_items(itemlist, item) # Special action for adding a movie to the library elif item.action == "add_pelicula_to_library": videolibrarytools.add_movie(item) # Special action for adding a serie to the library elif item.action == "add_serie_to_library": videolibrarytools.add_tvshow(item, channel) # Special action for downloading all episodes from a serie elif item.action == "download_all_episodes": from channels import downloads item.action = item.extra del item.extra downloads.save_download(item) # Special action for searching, first asks for the words then call the "search" function elif item.action == "search": logger.info("item.action=%s" % item.action.upper()) # last_search = "" # last_search_active = config.get_setting("last_search", "search") # if last_search_active: # try: # current_saved_searches_list = list(config.get_setting("saved_searches_list", "search")) # last_search = current_saved_searches_list[0] # except: # pass last_search = channeltools.get_channel_setting('Last_searched', 'search', '') tecleado = platformtools.dialog_input(last_search) if tecleado is not None: channeltools.set_channel_setting('Last_searched', tecleado, 'search') itemlist = channel.search(item, tecleado) else: return platformtools.render_items(itemlist, item) # For all other actions else: logger.info("Executing channel '%s' method" % item.action) itemlist = getattr(channel, item.action)(item) if config.get_setting('trakt_sync'): token_auth = config.get_setting("token_trakt", "trakt") if not token_auth: trakt_tools.auth_trakt() else: import xbmc if not xbmc.getCondVisibility('System.HasAddon(script.trakt)') and config.get_setting( 'install_trakt'): trakt_tools.ask_install_script() itemlist = trakt_tools.trakt_check(itemlist) else: config.set_setting('install_trakt', True) platformtools.render_items(itemlist, item) except urllib2.URLError as e: import traceback logger.error(traceback.format_exc()) # Grab inner and third party errors if hasattr(e, 'reason'): logger.error("Razon del error, codigo: %s | Razon: %s" % (str(e.reason[0]), str(e.reason[1]))) texto = config.get_localized_string(30050) # "No se puede conectar con el sitio web" platformtools.dialog_ok("alfa", texto) # Grab server response errors elif hasattr(e, 'code'): logger.error("Codigo de error HTTP : %d" % e.code) # "El sitio web no funciona correctamente (error http %d)" platformtools.dialog_ok("alfa", config.get_localized_string(30051) % e.code) except WebErrorException as e: import traceback logger.error(traceback.format_exc()) patron = 'File "' + os.path.join(config.get_runtime_path(), "channels", "").replace("\\", "\\\\") + '([^.]+)\.py"' canal = scrapertools.find_single_match(traceback.format_exc(), patron) platformtools.dialog_ok( config.get_localized_string(59985) + canal, config.get_localized_string(60013) %(e)) except: import traceback logger.error(traceback.format_exc()) patron = 'File "' + os.path.join(config.get_runtime_path(), "channels", "").replace("\\", "\\\\") + '([^.]+)\.py"' canal = scrapertools.find_single_match(traceback.format_exc(), patron) try: import xbmc if config.get_platform(True)['num_version'] < 14: log_name = "xbmc.log" else: log_name = "kodi.log" log_message = config.get_localized_string(50004) + xbmc.translatePath("special://logpath") + log_name except: log_message = "" if canal: platformtools.dialog_ok( config.get_localized_string(60087) %canal, config.get_localized_string(60014), log_message) else: platformtools.dialog_ok( config.get_localized_string(60038), config.get_localized_string(60015), log_message)
def findvideos(item): from channels import autoplay logger.info() # logger.debug("item:\n" + item.tostring('\n')) itemlist = [] list_canales = {} item_local = None # Desactiva autoplay autoplay.set_status(False) if not item.contentTitle or not item.strm_path: logger.debug("No se pueden buscar videos por falta de parametros") return [] content_title = filter(lambda c: c not in ":*?<>|\/", item.contentTitle.strip().lower()) if item.contentType == 'movie': item.strm_path = filetools.join(videolibrarytools.MOVIES_PATH, item.strm_path) path_dir = os.path.dirname(item.strm_path) item.nfo = filetools.join(path_dir, os.path.basename(path_dir) + ".nfo") else: item.strm_path = filetools.join(videolibrarytools.TVSHOWS_PATH, item.strm_path) path_dir = os.path.dirname(item.strm_path) item.nfo = filetools.join(path_dir, 'tvshow.nfo') for fd in filetools.listdir(path_dir): if fd.endswith('.json'): contenido, nom_canal = fd[:-6].split('[') if (contenido.startswith(content_title) or item.contentType == 'movie') and nom_canal not in \ list_canales.keys(): list_canales[nom_canal] = filetools.join(path_dir, fd) num_canales = len(list_canales) if 'downloads' in list_canales: json_path = list_canales['downloads'] item_json = Item().fromjson(filetools.read(json_path)) item_json.contentChannel = "local" # Soporte para rutas relativas en descargas if filetools.is_relative(item_json.url): item_json.url = filetools.join(videolibrarytools.VIDEOLIBRARY_PATH, item_json.url) del list_canales['downloads'] # Comprobar q el video no haya sido borrado if filetools.exists(item_json.url): item_local = item_json.clone(action='play') itemlist.append(item_local) else: num_canales -= 1 filtro_canal = '' if num_canales > 1 and config.get_setting("ask_channel", "videolibrary"): opciones = [ config.get_localized_string(70089) % k.capitalize() for k in list_canales.keys() ] opciones.insert(0, config.get_localized_string(70083)) if item_local: opciones.append(item_local.title) from platformcode import platformtools index = platformtools.dialog_select(config.get_localized_string(30163), opciones) if index < 0: return [] elif item_local and index == len(opciones) - 1: filtro_canal = 'downloads' platformtools.play_video(item_local) elif index > 0: filtro_canal = opciones[index].replace( config.get_localized_string(70078), "").strip() itemlist = [] for nom_canal, json_path in list_canales.items(): if filtro_canal and filtro_canal != nom_canal.capitalize(): continue item_canal = Item() item_canal.channel = nom_canal nom_canal = item_canal.channel # Importamos el canal de la parte seleccionada try: channel = __import__('channels.%s' % nom_canal, fromlist=["channels.%s" % nom_canal]) except ImportError: exec "import channels." + nom_canal + " as channel" item_json = Item().fromjson(filetools.read(json_path)) list_servers = [] try: # FILTERTOOLS # si el canal tiene filtro se le pasa el nombre que tiene guardado para que filtre correctamente. if "list_language" in item_json: # si se viene desde la videoteca del addon if "library_filter_show" in item: item_json.show = item.library_filter_show.get( nom_canal, "") # Ejecutamos find_videos, del canal o común item_json.contentChannel = 'videolibrary' if hasattr(channel, 'findvideos'): from core import servertools list_servers = getattr(channel, 'findvideos')(item_json) list_servers = servertools.filter_servers(list_servers) else: from core import servertools list_servers = servertools.find_video_items(item_json) except Exception, ex: logger.error("Ha fallado la funcion findvideos para el canal %s" % nom_canal) template = "An exception of type %s occured. Arguments:\n%r" message = template % (type(ex).__name__, ex.args) logger.error(message) logger.error(traceback.format_exc()) # Cambiarle el titulo a los servers añadiendoles el nombre del canal delante y # las infoLabels y las imagenes del item si el server no tiene for server in list_servers: #if not server.action: # Ignorar/PERMITIR las etiquetas # continue server.contentChannel = server.channel server.channel = "videolibrary" server.nfo = item.nfo server.strm_path = item.strm_path #### Compatibilidad con Kodi 18: evita que se quede la ruedecedita dando vueltas en enlaces Directos if server.action == 'play': server.folder = False # Se añade el nombre del canal si se desea if config.get_setting("quit_channel_name", "videolibrary") == 0: server.title = "%s: %s" % (nom_canal.capitalize(), server.title) #server.infoLabels = item_json.infoLabels if not server.thumbnail: server.thumbnail = item.thumbnail # logger.debug("server:\n%s" % server.tostring('\n')) itemlist.append(server)
def run(): logger.info("streamondemand.platformcode.launcher run") # The start() function is not always executed on old platforms (XBMC versions under 12.0) if config.OLD_PLATFORM: config.verify_directories_created() # Extract item from sys.argv if sys.argv[2]: item = Item().fromurl(sys.argv[2]) # If no item, this is mainlist else: item = Item(action="selectchannel", viewmode="movie") logger.info("streamondemand.platformcode.launcher " + item.tostring()) # Set server filters server_white_list = [] server_black_list = [] if config.get_setting('filter_servers') == 'true': server_white_list, server_black_list = set_server_list() try: # If item has no action, stops here if item.action == "": logger.info("streamondemand.platformcode.launcher Item sin accion") return # Action for main menu in channelselector if (item.action == "selectchannel"): import channelselector itemlist = channelselector.getmainlist() # Check for updates only on first screen if config.get_setting("updatecheck2") == "true": logger.info( "streamondemand.platformcode.launcher Check for plugin updates enabled" ) from core import updater try: version = updater.checkforupdates() if version: import xbmcgui advertencia = xbmcgui.Dialog() advertencia.ok( "Versione " + version + " disponible", "E' possibile fare il download della nuova versione\nselezionare la relativa voce nel menu principale" ) itemlist.insert( 0, Item( title="Download versione " + version, version=version, channel="updater", action="update", thumbnail=channelselector.get_thumbnail_path() + "Crystal_Clear_action_info.png")) except: import xbmcgui advertencia = xbmcgui.Dialog() advertencia.ok("Impossibile connettersi", "Non è stato possibile verificare", "aggiornamenti") logger.info( "cstreamondemand.platformcode.launcher Fallo al verificar la actualización" ) else: logger.info( "streamondemand.platformcode.launcher Check for plugin updates disabled" ) xbmctools.renderItems(itemlist, item) # Action for updating plugin elif (item.action == "update"): from core import updater updater.update(item) if config.get_system_platform() != "xbox": import xbmc xbmc.executebuiltin("Container.Refresh") # Action for channel types on channelselector: movies, series, etc. elif (item.action == "channeltypes"): import channelselector itemlist = channelselector.getchanneltypes() xbmctools.renderItems(itemlist, item) # Action for channel listing on channelselector elif (item.action == "listchannels"): import channelselector itemlist = channelselector.filterchannels(item.category) xbmctools.renderItems(itemlist, item) # Action in certain channel specified in "action" and "channel" parameters else: # Entry point for a channel is the "mainlist" action, so here we check parental control if item.action == "mainlist": # Parental control can_open_channel = False # If it is an adult channel, and user has configured pin, asks for it if channeltools.is_adult(item.channel) and config.get_setting( "adult_pin") != "": import xbmc keyboard = xbmc.Keyboard("", "PIN para canales de adultos", True) keyboard.doModal() if (keyboard.isConfirmed()): tecleado = keyboard.getText() if tecleado == config.get_setting("adult_pin"): can_open_channel = True # All the other cases can open the channel else: can_open_channel = True if not can_open_channel: return # Checks if channel exists channel_file = os.path.join(config.get_runtime_path(), 'channels', item.channel + ".py") logger.info( "streamondemand.platformcode.launcher channel_file=%s" % channel_file) if item.channel in [ "personal", "personal2", "personal3", "personal4", "personal5" ]: import channels.personal as channel elif os.path.exists(channel_file): try: channel = __import__( 'channels.%s' % item.channel, fromlist=["channels.%s" % item.channel]) except: exec "import channels." + item.channel + " as channel" logger.info( "streamondemand.platformcode.launcher running channel {0} {1}". format(channel.__name__, channel.__file__)) # Special play action if item.action == "play": logger.info("streamondemand.platformcode.launcher play") # First checks if channel has a "play" function if hasattr(channel, 'play'): logger.info( "streamondemand.platformcode.launcher executing channel 'play' method" ) itemlist = channel.play(item) # Play should return a list of playable URLS if len(itemlist) > 0: item = itemlist[0] xbmctools.play_video(item) # If not, shows user an error message else: import xbmcgui ventana_error = xbmcgui.Dialog() ok = ventana_error.ok("plugin", "No hay nada para reproducir") # If player don't have a "play" function, not uses the standard play from xbmctools else: logger.info( "streamondemand.platformcode.launcher executing core 'play' method" ) xbmctools.play_video(item) # Special action for findvideos, where the plugin looks for known urls elif item.action == "findvideos": if item.strm: # Special action for playing a video from the library play_from_library(item, channel, server_white_list, server_black_list) # First checks if channel has a "findvideos" function if hasattr(channel, 'findvideos'): itemlist = getattr(channel, item.action)(item) # If not, uses the generic findvideos function else: logger.info( "streamondemand.platformcode.launcher no channel 'findvideos' method, " "executing core method") from core import servertools itemlist = servertools.find_video_items(item) if config.get_setting('filter_servers') == 'true': itemlist = filtered_servers(itemlist, server_white_list, server_black_list) from platformcode import subtitletools subtitletools.saveSubtitleName(item) # Show xbmc items as "movies", so plot is visible import xbmcplugin handle = sys.argv[1] xbmcplugin.setContent(int(handle), "movies") # Add everything to XBMC item list if type(itemlist) == list and itemlist: xbmctools.renderItems(itemlist, item) # If not, it shows an empty list # FIXME: Aquí deberíamos mostrar alguna explicación del tipo "No hay elementos, esto pasa por bla bla bla" else: xbmctools.renderItems([], item) # DrZ3r0 # Special action for play_from_library, where the plugin looks for known urls elif item.action == "play_from_library": # Special action for playing a video from the library play_from_library(item, channel, server_white_list, server_black_list) # Special action for adding a movie to the library elif item.action == "add_pelicula_to_library": library.add_pelicula_to_library(item) # Special action for adding a serie to the library elif item.action == "add_serie_to_library": library.add_serie_to_library(item, channel) # Special action for downloading all episodes from a serie elif item.action == "download_all_episodes": downloadtools.download_all_episodes(item, channel) # Special action for searching, first asks for the words then call the "search" function elif item.action == "search": logger.info("streamondemand.platformcode.launcher search") import xbmc keyboard = xbmc.Keyboard("") keyboard.doModal() if (keyboard.isConfirmed()): tecleado = keyboard.getText() tecleado = tecleado.replace(" ", "+") itemlist = channel.search(item, tecleado) else: itemlist = [] xbmctools.renderItems(itemlist, item) # For all other actions else: logger.info( "streamondemand.platformcode.launcher executing channel '" + item.action + "' method") itemlist = getattr(channel, item.action)(item) # Activa el modo biblioteca para todos los canales genéricos, para que se vea el argumento import xbmcplugin handle = sys.argv[1] xbmcplugin.setContent(int(handle), "movies") # Añade los items a la lista de XBMC if type(itemlist) == list and itemlist: xbmctools.renderItems(itemlist, item) # If not, it shows an empty list # FIXME: Aquí deberíamos mostrar alguna explicación del tipo "No hay elementos, esto pasa por bla bla bla" else: xbmctools.renderItems([], item) except urllib2.URLError, e: import traceback logger.error("streamondemand.platformcode.launcher " + traceback.format_exc()) import xbmcgui ventana_error = xbmcgui.Dialog() # Grab inner and third party errors if hasattr(e, 'reason'): logger.info( "streamondemand.platformcode.launcher Razon del error, codigo: {0}, Razon: {1}" .format(e.reason[0], e.reason[1])) texto = config.get_localized_string( 30050) # "No se puede conectar con el sitio web" ok = ventana_error.ok("plugin", texto) # Grab server response errors elif hasattr(e, 'code'): logger.info( "streamondemand.platformcode.launcher codigo de error HTTP : %d" % e.code) texto = ( config.get_localized_string(30051) % e.code ) # "El sitio web no funciona correctamente (error http %d)" ok = ventana_error.ok("plugin", texto)
def play(item): logger.info("[cb01anime.py] play") if '/goto/' in item.url: item.url = item.url.split('/goto/')[-1].decode('base64') item.url = item.url.replace('http://cineblog01.pw', 'http://k4pp4.pw') logger.debug( "##############################################################") if "go.php" in item.url: data = scrapertools.anti_cloudflare(item.url, headers) try: data = scrapertools.get_match(data, 'window.location.href = "([^"]+)";') except IndexError: try: # data = scrapertools.get_match(data, r'<a href="([^"]+)">clicca qui</a>') # In alternativa, dato che a volte compare "Clicca qui per proseguire": data = scrapertools.get_match( data, r'<a href="([^"]+)".*?class="btn-wrapper">.*?licca.*?</a>') except IndexError: data = scrapertools.get_header_from_response( item.url, headers=headers, header_to_get="Location") while 'vcrypt' in data: data = scrapertools.get_header_from_response( data, headers=headers, header_to_get="Location") logger.debug("##### play go.php data ##\n%s\n##" % data) elif "/link/" in item.url: data = scrapertools.anti_cloudflare(item.url, headers) from lib import jsunpack try: data = scrapertools.get_match( data, "(eval\(function\(p,a,c,k,e,d.*?)</script>") data = jsunpack.unpack(data) logger.debug("##### play /link/ unpack ##\n%s\n##" % data) except IndexError: logger.debug("##### The content is yet unpacked ##\n%s\n##" % data) data = scrapertools.find_single_match( data, 'var link(?:\s)?=(?:\s)?"([^"]+)";') while 'vcrypt' in data: data = scrapertools.get_header_from_response( data, headers=headers, header_to_get="Location") logger.debug("##### play /link/ data ##\n%s\n##" % data) else: data = item.url logger.debug("##### play else data ##\n%s\n##" % data) logger.debug( "##############################################################") itemlist = servertools.find_video_items(data=data) for videoitem in itemlist: videoitem.title = videoitem.title + " - " + item.show + " - " + item.title videoitem.fulltitle = item.fulltitle videoitem.show = item.show videoitem.thumbnail = item.thumbnail videoitem.channel = __channel__ return itemlist
def findvideos(item): logger.info() url_list = [] itemlist = [] duplicados = [] data = get_source(item.url) src = data patron = 'id=(?:div|player)(\d+)>.*?<iframe src=.*? data-lazy-src=(.*?) marginheight' matches = re.compile(patron, re.DOTALL).findall(data) for option, videoitem in matches: lang = scrapertools.find_single_match( src, '<a href=#(?:div|player)%s.*?>.*?(Doblado|Subtitulado)<\/a>' % option) data = get_source(videoitem) if 'play' in videoitem: url = scrapertools.find_single_match( data, '<span>Ver Online<.*?<li><a href=(.*?)><span class=icon>') else: url = scrapertools.find_single_match( data, '<iframe src=(.*?) scrolling=') url_list.append([url, lang]) for video_url in url_list: language = video_url[1] if 'jw.miradetodo' in video_url[0]: data = get_source('http:' + video_url[0]) patron = 'label:.*?(.*?),.*?file:.*?(.*?)&app.*?\}' matches = re.compile(patron, re.DOTALL).findall(data) for quality, scrapedurl in matches: quality = quality title = item.contentTitle + ' (%s) %s' % (quality, language) server = 'directo' url = scrapedurl url = url.replace('\/', '/') subtitle = scrapertools.find_single_match( data, "tracks: \[\{file: '.*?linksub=(.*?)',label") if url not in duplicados: itemlist.append( item.clone(title=title, action='play', url=url, quality=quality, server=server, subtitle=subtitle, language=language)) duplicados.append(url) elif video_url != '': itemlist.extend(servertools.find_video_items(data=video_url[0])) import os for videoitem in itemlist: if videoitem.server != 'directo': quality = item.quality title = item.contentTitle + ' (%s) %s' % (videoitem.server, language) if item.quality != '': title = item.contentTitle + ' (%s) %s' % (quality, language) videoitem.title = title videoitem.channel = item.channel videoitem.thumbnail = os.path.join( config.get_runtime_path(), "resources", "media", "servers", "server_%s.png" % videoitem.server) videoitem.quality = item.quality if item.infoLabels['mediatype'] == 'movie': if config.get_videolibrary_support( ) and len(itemlist) > 0 and item.extra != 'findvideos': itemlist.append( Item( channel=item.channel, title= '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url, action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle)) return itemlist
def play(item): logger.info("deportesalacarta.channels.boxingclub play") itemlist = [] if "pokeryou" in item.url: data = scrapertools.cachePage(item.url) docid = scrapertools.find_single_match(data, 'docid=([^&]+)&') url = "https://docs.google.com/get_video_info?docid=%s&eurl=%s&authuser="******"play", server="directo", title=item.title, url=video_url, thumbnail=item.thumbnail, folder=False)) elif "mmatd.com" in item.url: data = scrapertools.cachePage(item.url) video_url = scrapertools.find_single_match(data, 'file: "([^"]+)"') itemlist.append( Item(channel=__channel__, action="play", server="directo", title=item.title, url=video_url, thumbnail=item.thumbnail, folder=False)) elif "mmaversus" in item.url: data = scrapertools.cachePage(item.url) url_redirect = scrapertools.find_single_match( data, '<a href="(http://bestinmma[^"]+)"') data = scrapertools.cachePage(url_redirect) video_itemlist = servertools.find_video_items(data=data) for video_item in video_itemlist: itemlist.append( Item(channel=__channel__, action="play", server=video_item.server, title=video_item.title, url=video_item.url, thumbnail=item.thumbnail, fanart=item.fanart, folder=False)) elif "prowrestlingreports" in item.url: headers.append(['Referer', item.referer]) data = scrapertools.cachePage(item.url, headers=headers) logger.info(data) url = scrapertools.find_single_match(data, '<iframe src="([^"]+)"') itemlist.append( Item(channel=__channel__, action="play", server=item.server, title=item.title, url=url, thumbnail=item.thumbnail, fanart=item.fanart, folder=False)) else: video_itemlist = servertools.find_video_items(data=item.url) for video_item in video_itemlist: itemlist.append( Item(channel=__channel__, action="play", server=video_item.server, title=video_item.title, url=video_item.url, thumbnail=item.thumbnail, fanart=item.fanart, folder=False)) return itemlist