def play(item): logger.info("pelisalacarta.channels.boxingclub play") itemlist = [] if "pokeryou" in item.url: data = scrapertools.cachePage(item.url) docid = scrapertools.find_single_match(data, 'docid=([^&]+)&') url = "https://docs.google.com/get_video_info?docid=%s&eurl=%s&authuser="******"play" , server="directo", title=item.title, url=video_url, thumbnail=item.thumbnail, folder=False)) elif "mmatd.com" in item.url: data = scrapertools.cachePage(item.url) video_url = scrapertools.find_single_match(data, 'file: "([^"]+)"') itemlist.append( Item(channel=__channel__ , action="play" , server="directo", title=item.title, url=video_url, thumbnail=item.thumbnail, folder=False)) elif "mmaversus" in item.url: data = scrapertools.cachePage(item.url) url_redirect = scrapertools.find_single_match(data, '<a href="(http://bestinmma[^"]+)"') data = scrapertools.cachePage(url_redirect) video_itemlist = servertools.find_video_items(data=data) for video_item in video_itemlist: itemlist.append( Item(channel=__channel__ , action="play" , server=video_item.server, title=video_item.title, url=video_item.url, thumbnail=item.thumbnail, fanart=item.fanart, folder=False)) elif "prowrestlingreports" in item.url: headers.append(['Referer',item.referer]) data = scrapertools.cachePage(item.url, headers=headers) logger.info(data) url = scrapertools.find_single_match(data, '<iframe src="([^"]+)"') itemlist.append( Item(channel=__channel__ , action="play" , server=item.server, title=item.title, url=url, thumbnail=item.thumbnail, fanart=item.fanart, folder=False)) else: video_itemlist = servertools.find_video_items(data=item.url) for video_item in video_itemlist: itemlist.append( Item(channel=__channel__ , action="play" , server=video_item.server, title=video_item.title, url=video_item.url, thumbnail=item.thumbnail, fanart=item.fanart, folder=False)) return itemlist
def findvideos(item): logger.info("pelisalacarta.channels.fullmatches findvideos") itemlist = [] if item.extra == "": data = scrapertools.cachePage(item.url) data = scrapertools.decodeHtmlentities(data) acp = "&acp_pid="+scrapertools.find_single_match(data,'<input id="acp_post".*?value="([^"]+)"/>') acp_shortcode = "&acp_shortcode="+scrapertools.find_single_match(data,'<input id="acp_shortcode".*?value="([^"]+)"/>') matches = scrapertools.find_multiple_matches(data, 'id="item.*?"><a href="([^"]+)"><div.*?>(.*?)</div>') if len (matches) > 1: for scrapedurl, scrapedtitle in matches: scrapedtitle = scrapedtitle \ .replace("HL ", "Resumen ").replace("Extended","Extendido") \ .replace("1st half ", "1ª parte ").replace("2nd half ","2ª parte ") \ .replace("Pre-Match", "Pre-partido").replace("Post-Match","Post-Partido") post = "acp_currpage=" + scrapedurl.replace("#","") + acp + acp_shortcode + "&action=pp_with_ajax" itemlist.append(Item(channel=__channel__, title=scrapedtitle, url=item.url, action="findvideos", thumbnail=item.thumbnail, extra=post, folder=True)) else: itemlist = servertools.find_video_items(data=data) for item in itemlist: item.channel = __channel__ else: post = item.extra data = scrapertools.cachePage("http://www.fullmatchesandshows.com/wp-admin/admin-ajax.php", post=post) itemlist = servertools.find_video_items(data=data) for item in itemlist: item.channel = __channel__ return itemlist
def play(item): logger.info("pelisalacarta.bricocine findvideos") media_url = scrapertools.get_header_from_response(item.url,header_to_get="location") itemlist = servertools.find_video_items(data=media_url) if len(itemlist) == 0: itemlist = servertools.find_video_items(data=item.url) data = scrapertools.cache_page(item.url) listavideos = servertools.findvideos(data) for video in listavideos: videotitle = item.title url =item.url server = video[2] return itemlist
def findvid(item): logger.info("streamondemand.channels.animesubita findvideos") headers.append(['Referer', item.url]) # Descarga la pagina data = scrapertools.cache_page(item.url, headers=headers) patron = 'return\s*gnarty_player\((\d+)\);' matches = re.compile(patron, re.DOTALL).findall(data) url = host + 'wp-admin/admin-ajax.php' html = [] for vid in matches: html.append(scrapertools.cache_page(url, post='action=loadPlayer&id=' + vid, headers=headers)) html = ''.join(html) itemlist = servertools.find_video_items(data=html) if len(itemlist) == 0: itemlist = servertools.find_video_items(data=data) for videoitem in itemlist: videoitem.title = item.title + videoitem.title videoitem.fulltitle = item.fulltitle videoitem.thumbnail = item.thumbnail videoitem.show = item.show videoitem.plot = item.plot videoitem.channel = __channel__ return itemlist
def findvideos(item): logger.info("[italiafilm.py] findvideos") # Descarga la pagina data = scrapertools.cache_page(item.url) itemlist = servertools.find_video_items(data=data) patron = '<iframe style="border: 0;" src="([^"]+)" width="[^"]*" height="[^"]*" scrolling="[^"]*" allowfullscreen="[^"]*"></iframe>' url = scrapertools.find_single_match(data, patron) if url: headers.append(['Referer', item.url]) data = scrapertools.cache_page(url, headers=headers) html = [] for num in scrapertools.find_multiple_matches(data, 'id="mlink_(\d+)"'): html.append(scrapertools.cache_page(url + '?host=%s' % num, headers=headers)) itemlist.extend(servertools.find_video_items(data=''.join(html))) for videoitem in itemlist: videoitem.title = item.title + videoitem.title videoitem.fulltitle = item.fulltitle videoitem.thumbnail = item.thumbnail videoitem.show = item.show videoitem.plot = item.plot videoitem.channel = __channel__ return itemlist
def findvideos(item): #show = item.title.replace("Añadir esta serie a la biblioteca de XBMC","") #logger.info("[megaspain.py] findvideos show "+ show) itemlist=[] data = scrapertools.cache_page(item.url) if 'thank_you_button'in data: item.url = item.url.replace("php?topic=","php?action=thankyou;topic=") item.url = item.url + item.plot data = scrapertools.cache_page(item.url) if 'MegaSpain' in data: patronimage = '<div class="inner" id="msg_\d{1,9}".*?<img src="([^"]+)".*?mega.co.nz/\#\![A-Za-z0-9\-\_]+\![A-Za-z0-9\-\_]+' matches = re.compile(patronimage,re.DOTALL).findall(data) if len(matches)>0: thumbnail = matches[0] thumbnail = scrapertools.htmlclean(thumbnail) thumbnail = unicode( thumbnail, "iso-8859-1" , errors="replace" ).encode("utf-8") item.thumbnail = thumbnail patronplot = '<div class="inner" id="msg_\d{1,9}".*?<img src="[^"]+"[^/]+/>(.*?)lgf_facebook_share' matches = re.compile(patronplot,re.DOTALL).findall(data) if len(matches)>0: plot = matches[0] title = item.title plot = re.sub(' ', '', plot) plot = re.sub('\s\s', '', plot) plot = scrapertools.htmlclean(plot) item.plot = "" from servers import servertools itemlist.extend(servertools.find_video_items(data=data)) for videoitem in itemlist: videoitem.channel=__channel__ videoitem.action="play" videoitem.folder=False videoitem.thumbnail=item.thumbnail videoitem.plot = item.plot videoitem.title = "["+videoitem.server+videoitem.title + " " + item.title #videoitem.show = show # if config.get_platform().startswith("xbmc") or config.get_platform().startswith("boxee"): # itemlist.append( Item(channel=item.channel, title=show + " Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="findvideos") ) return itemlist else: item.thumbnail = "" item.plot = "" from servers import servertools itemlist.extend(servertools.find_video_items(data=data)) for videoitem in itemlist: videoitem.channel=__channel__ videoitem.action="play" videoitem.folder=False videoitem.thumbnail=item.thumbnail videoitem.plot = item.plot videoitem.title = "["+videoitem.server+videoitem.title + " " + item.title return itemlist
def play(item): logger.info("pelisalacarta.seriesmu play") media_url = scrapertools.get_header_from_response(item.url, header_to_get="Location") itemlist = servertools.find_video_items(data=media_url) if len(itemlist) == 0: itemlist = servertools.find_video_items(data=item.url) return itemlist
def findvideos(item): show = item.title.replace("Añadir esta serie a la biblioteca de XBMC","") logger.info("[megahd.py] findvideos show "+ show) logger.info("[megahd.py] findvideos"+item.url) data = scrapertools.cache_page(item.url) itemlist=[] if '?action=thankyou;'+item.plot in data: item.plot = item.plot.replace("msg=","?action=thankyou;msg=") item.url = item.url + item.plot data = scrapertools.cache_page(item.url) logger.info("data="+data) if 'MegaHD' in data: patronimage = '<div class="inner" id="msg_\d{1,9}".*?<img src="([^"]+)".*?mega.co.nz/\#\![A-Za-z0-9\-\_]+\![A-Za-z0-9\-\_]+' matches = re.compile(patronimage,re.DOTALL).findall(data) if len(matches)>0: thumbnail = matches[0] thumbnail = scrapertools.htmlclean(thumbnail) thumbnail = unicode( thumbnail, "iso-8859-1" , errors="replace" ).encode("utf-8") item.thumbnail = thumbnail from servers import servertools itemlist.extend(servertools.find_video_items(data=data)) for videoitem in itemlist: videoitem.channel=__channel__ videoitem.action="play" videoitem.folder=False videoitem.thumbnail=item.thumbnail #videoitem.plot = item.plot videoitem.title = "["+videoitem.server+videoitem.title + " " + item.title videoitem.show = show if config.get_platform().startswith("xbmc") or config.get_platform().startswith("boxee"): itemlist.append( Item(channel=item.channel, title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="findvideos") ) return itemlist else: item.thumbnail = "" item.plot = "" from servers import servertools itemlist.extend(servertools.find_video_items(data=data)) for videoitem in itemlist: videoitem.channel=__channel__ videoitem.action="play" videoitem.folder=False videoitem.thumbnail=item.thumbnail #videoitem.plot = item.plot videoitem.title = "["+videoitem.server+videoitem.title + " " + item.title return itemlist
def play(item): logger.info("streamondemand.streamingfilmit play") data = scrapertools.cache_page(item.url, headers=headers) data = scrapertools.decodeHtmlentities(data).replace('http://cineblog01.pw', 'http://k4pp4.pw') url = scrapertools.find_single_match(data, r'<a\s*href="([^"]+)"><h1') data = scrapertools.cache_page(url, headers=headers) if "go.php" in url: data = scrapertools.get_match(data, 'window.location.href = "([^"]+)";') elif "/link/" in url: from lib.jsbeautifier.unpackers import packer try: data = scrapertools.get_match(data, "(eval.function.p,a,c,k,e,.*?)</script>") data = packer.unpack(data) except IndexError: pass data = scrapertools.get_match(data, 'var link(?:\s)?=(?:\s)?"([^"]+)";') else: data = url itemlist = servertools.find_video_items(data=data) for videoitem in itemlist: videoitem.title = item.show videoitem.fulltitle = item.fulltitle videoitem.thumbnail = item.thumbnail videoitem.channel = __channel__ return itemlist
def test(): bien = True # mainlist mainlist_items = mainlist(Item()) # Comprueba que todas las opciones por categorias tengan algo (excepto los buscadores) for mainlist_item in mainlist_items: if mainlist_item.action.startswith("menu"): exec "itemlist = "+mainlist_item.action+"(mainlist_item)" # Lee la primera categoría sólo exec "itemlist2 ="+itemlist[0].action+"(itemlist[0])" if len(itemlist2)==0: return false # Comprueba si alguno de los vídeos de "Novedades" devuelve mirrors for mainlist_item in mainlist_items: if mainlist_item.action=="peliculas" or mainlist_item.action=="listserie": exec "itemlist = "+mainlist_item.action+"(mainlist_item)" bien = False for episodio_item in itemlist: from servers import servertools mirrors = servertools.find_video_items(item=episodio_item) if len(mirrors)>0: bien = True break return bien
def play(item): logger.info("[somosmovies.py] play(item.url="+item.url+")") itemlist=[] if "bit.ly" in item.url: logger.info("Acortador bit.ly") location = scrapertools.get_header_from_response(item.url,header_to_get="location") logger.info("[somosmovies.py] location="+location) item.url = location return play(item) if "goo.gl" in item.url: logger.info("Acortador goo.gl") location = scrapertools.get_header_from_response(item.url,header_to_get="location") item.url = location return play(item) #adf.ly elif "j.gs" in item.url: logger.info("Acortador j.gs (adfly)") from servers import adfly location = adfly.get_long_url(item.url) item.url = location return play(item) else: from servers import servertools itemlist=servertools.find_video_items(data=item.url) for videoitem in itemlist: videoitem.channel=__channel__ videoitem.folder=False return itemlist
def findvid_serie(item): logger.info("pelisalacarta.filmstream findvideos") itemlist = [] ## Descarga la página data = scrapertools.cache_page(item.url) data = scrapertools.decodeHtmlentities(data) patron1 = '<p style="text-align: center;">(.*?)(<a href="[^"]+" target="_blank">([^<]+)</a>.+?)</p>' matches1 = re.compile(patron1).findall(data) for titulo1, data, titulo2 in matches1: ## Extrae las entradas titulo1 = re.sub(r'<[^>]*>', '', titulo1) scrapedtitle = titulo2 if titulo1 == "" else titulo1 li = servertools.find_video_items(data=data) for videoitem in li: videoitem.title = scrapedtitle + videoitem.title videoitem.fulltitle = item.fulltitle videoitem.thumbnail = item.thumbnail videoitem.show = item.show videoitem.plot = item.plot videoitem.channel = __channel__ itemlist.extend(li) return itemlist
def findvideos(item): logger.info("[peliculasonlineflv.py] findvideos") itemlist=[] # Descarga la p?gina data = scrapertools.cachePage(item.url) from servers import servertools itemlist.extend(servertools.find_video_items(data=data)) for videoitem in itemlist: videoitem.channel=__channel__ videoitem.action="play" videoitem.folder=False videoitem.title = "Ver en "+videoitem.server videoitem.fulltitle = item.fulltitle # Ahora busca patrones manuales try: vk_code = scrapertools.get_match(data,"vklat\=([a-zA-Z0-9]+)") vk_url = scrapertools.get_header_from_response("http://goo.gl/"+vk_code,header_to_get="location") itemlist.append( Item( channel=__channel__ , action="play" , title="Ver en VK (Latino)" , server="vk" , url=vk_url , folder=False ) ) except: logger.info("No encontrado enlace VK") try: putlocker_code = scrapertools.get_match(data,"plat\=([A-Z0-9]+)") putlocker_url = "http://www.putlocker.com/embed/"+putlocker_code itemlist.append( Item( channel=__channel__ , action="play" , title="Ver en Putlocker (Latino)" , server="putlocker" , url=putlocker_url , folder=False ) ) except: logger.info("No encontrado enlace PUTLOCKER") try: vk_code = scrapertools.get_match(data,"vksub\=([a-zA-Z0-9]+)") vk_url = scrapertools.get_header_from_response("http://goo.gl/"+vk_code,header_to_get="location") itemlist.append( Item( channel=__channel__ , action="play" , title="Ver en VK (Subtitulado)" , server="vk" , url=vk_url , folder=False ) ) except: logger.info("No encontrado enlace VK") try: putlocker_code = scrapertools.get_match(data,"plsub\=([A-Z0-9]+)") putlocker_url = "http://www.putlocker.com/embed/"+putlocker_code itemlist.append( Item( channel=__channel__ , action="play" , title="Ver en Putlocker (Subtitulado)" , server="putlocker" , url=putlocker_url , folder=False ) ) except: logger.info("No encontrado enlace PUTLOCKER") try: vk_code = scrapertools.get_match(data,"vk\=([a-zA-Z0-9]+)") vk_url = scrapertools.get_header_from_response("http://goo.gl/"+vk_code,header_to_get="location") itemlist.append( Item( channel=__channel__ , action="play" , title="Ver en VK" , server="vk" , url=vk_url , folder=False ) ) except: logger.info("No encontrado enlace VK") try: putlocker_code = scrapertools.get_match(data,"put\=([A-Z0-9]+)") putlocker_url = "http://www.putlocker.com/embed/"+putlocker_code itemlist.append( Item( channel=__channel__ , action="play" , title="Ver en Putlocker" , server="putlocker" , url=putlocker_url , folder=False ) ) except: logger.info("No encontrado enlace PUTLOCKER") return itemlist
def play(item): logger.info("[cineblog01.py] play") data = scrapertools.cache_page(item.url) print "##############################################################" if "go.php" in item.url: data = scrapertools.get_match(data, 'window.location.href = "([^"]+)";') print "##### play go.php data ##\n%s\n##" % data elif "/link/" in item.url: from lib.jsbeautifier.unpackers import packer try: data = scrapertools.get_match(data, "(eval.function.p,a,c,k,e,.*?)</script>") data = packer.unpack(data) print "##### play /link/ unpack ##\n%s\n##" % data except IndexError: print "##### The content is yet unpacked" data = scrapertools.get_match(data, 'var link(?:\s)?=(?:\s)?"([^"]+)";') print "##### play /link/ data ##\n%s\n##" % data else: data = item.url print "##### play else data ##\n%s\n##" % data print "##############################################################" itemlist = servertools.find_video_items(data=data) for videoitem in itemlist: videoitem.title = item.show videoitem.fulltitle = item.fulltitle videoitem.thumbnail = item.thumbnail videoitem.channel = __channel__ return itemlist
def findvideos(item): logger.info("[tusnovelas.py] findvideos") data = scrapertools.cache_page(item.url) itemlist=[] patron = '<embed type="application/x-shockwave-flash" src="http://www.todoanimes.com/reproductor/player.swf".*?file=([^\&]+)&' matches = re.compile(patron,re.DOTALL).findall(data) for match in matches: itemlist.append( Item(channel=__channel__, action="play", server="directo", title=item.title , url=match , thumbnail=item.thumbnail , plot=item.plot , folder=False) ) #<embed width="680" height="450" flashvars="file=mp4:p/459791/sp/45979100/serveFlavor/flavorId/0_0pacv7kr/forceproxy/true&image=&skin=&abouttext=&dock=false&streamer=rtmp://rtmpakmi.kaltura.com/ondemand/& patron = '<embed width="[^"]+" height="[^"]+" flashvars="file=([^\&]+)&.*?streamer=(rtmp[^\&]+)&' matches = re.compile(patron,re.DOTALL).findall(data) for final,principio in matches: itemlist.append( Item(channel=__channel__, action="play", server="directo", title=item.title , url=principio+final , thumbnail=item.thumbnail , plot=item.plot , folder=False) ) from servers import servertools itemlist.extend(servertools.find_video_items(data=data)) for videoitem in itemlist: videoitem.channel=__channel__ videoitem.action="play" videoitem.folder=False videoitem.title = "["+videoitem.server+"]" return itemlist
def play(item): logger.info("[filmenoi.py] play") itemlist=[] if item.server=="linkbucks": logger.info("Es linkbucks") # Averigua el enlace from servers import linkbucks location = linkbucks.get_long_url(item.url) logger.info("location="+location) # Extrae la URL de saltar el anuncio en adf.ly if location.startswith("http://adf"): # Averigua el enlace from servers import adfly location = adfly.get_long_url(location) logger.info("location="+location) from servers import servertools itemlist=servertools.find_video_items(data=location) for videoitem in itemlist: videoitem.channel=__channel__ videoitem.folder=False else: itemlist.append(item) return itemlist
def findvideos(item): logger.info("pelisalacarta.channels.animeid findvideos") data = scrapertools.cache_page(item.url) itemlist=[] data = scrapertools.find_single_match(data,'<ul id="partes">(.*?)</ul>') data = data.replace("\\/","/") data = data.replace("%3A",":") data = data.replace("%2F","/") logger.info("pelisalacarta.channels.animeid data="+data) #http%3A%2F%2Fwww.animeid.moe%2Fstream%2F41TLmCj7_3q4BQLnfsban7%2F1440956023.mp4 #http://www.animeid.moe/stream/41TLmCj7_3q4BQLnfsban7/1440956023.mp4 #http://www.animeid.tv/stream/oiW0uG7yqBrg5TVM5Cm34n/1385370686.mp4 patron = '(http://www.animeid.moe/stream/[^/]+/\d+.[a-z0-9]+)' matches = re.compile(patron,re.DOTALL).findall(data) encontrados = set() for url in matches: if url not in encontrados: itemlist.append( Item(channel=__channel__, action="play" , title="[directo]" , server="directo", url=url, thumbnail="", plot="", show=item.show, folder=False)) encontrados.add(url) from servers import servertools itemlist.extend(servertools.find_video_items(data=data)) for videoitem in itemlist: videoitem.channel=__channel__ videoitem.action="play" videoitem.folder=False videoitem.title = "["+videoitem.server+"]" return itemlist
def findvideos(item): logger.info("streamondemand.mondolunatico findvideos") # Descarga la página data = scrapertools.cache_page(item.url, headers=headers) itemlist = servertools.find_video_items(data=data) for videoitem in itemlist: videoitem.title = item.title + videoitem.title videoitem.fulltitle = item.fulltitle videoitem.thumbnail = item.thumbnail videoitem.show = item.show videoitem.plot = item.plot videoitem.channel = __channel__ # Extrae las entradas patron = r'noshade>(.*?)<br>.*?<a href="(http://mondolunatico\.org/pass/index\.php\?ID=[^"]+)"' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedtitle, scrapedurl in matches: scrapedtitle = scrapedtitle.replace('*', '').replace('Streaming', '').strip() title = '%s - [%s]' % (item.title, scrapedtitle) itemlist.append( Item(channel=__channel__, action="play", title=title, url=scrapedurl, thumbnail=item.thumbnail, fulltitle=item.fulltitle, show=item.show, server='captcha', folder=False)) return itemlist
def findvideos(item): from servers import servertools return servertools.find_video_items(item=item, channel=item.channel) #return MessageContainer("Empty", "There aren't any speakers whose name starts with " + char) #return ObjectContainer(header="Empty", message="There aren't any items") #oc.add(SearchDirectoryObject(identifier='com.plexapp.plugins.amt', title='Search Trailers', prompt='Search for movie trailer', term=L('Trailers')))
def findvideos(item): logger.info("[tucinecom.py] findvideos") itemlist=[] data = get_main_page(item.url) if item.extra!="": data = scrapertools.get_match(data,'<div id="'+item.extra+'"(.*?)</div>') logger.info("data="+data) itemlist = servertools.find_video_items(data=data) for videoitem in itemlist: videoitem.channel = __channel__ videoitem.thumbnail = item.thumbnail videoitem.fulltitle = item.title videoitem.title = "Ver en ["+videoitem.server+"]" patron = "(http\://adf.ly/[A-Z0-9a-z]+)" matches = re.compile(patron,re.DOTALL).findall(data) scrapertools.printMatches(matches) for url in matches: itemlist.append( Item(channel=__channel__, action="play", title="Enlace adf.ly" , url=url , folder=False) ) return itemlist
def findvideos(item): logger.info("pelisalacarta.channels.vertelenovelas findvideos") data = scrapertools.cache_page(item.url) itemlist=[] #<embed type="application/x-shockwave-flash" src="http://vertelenovelas.net/player.swf" width="680" height="430" id="mpl" name="mpl" quality="high" allowscriptaccess="always" allowfullscreen="true" wmode="transparent" flashvars="&file=http://content1.catalog.video.msn.com/e2/ds/4eeea8b3-6228-492b-a2be-e8b920cf4d4e.flv&backcolor=fd4bc5&frontcolor=fc9dde&lightcolor=ffffff&controlbar=over&volume=100&autostart=false&image="> #<embed type="application/x-shockwave-flash" src="http://vertelenovelas.net/player.swf" width="680" height="430" id="mpl" name="mpl" quality="high" allowscriptaccess="always" allowfullscreen="true" wmode="transparent" flashvars="&file=http://content1.catalog.video.msn.com/e2/ds/4eeea8b3-6228-492b-a2be-e8b920cf4d4e.flv&backcolor=fd4bc5&frontcolor=fc9dde&lightcolor=ffffff&controlbar=over&volume=100&autostart=false&image="></embed></d patron = '<embed type="application/x-shockwave-flash" src="http://vertelenovelas.net/player.swf".*?file=([^\&]+)&' matches = re.compile(patron,re.DOTALL).findall(data) for match in matches: itemlist.append( Item(channel=__channel__, action="play", server="directo", title=item.title , url=match , thumbnail=item.thumbnail , plot=item.plot , folder=False) ) #<embed width="680" height="450" flashvars="file=mp4:p/459791/sp/45979100/serveFlavor/flavorId/0_0pacv7kr/forceproxy/true&image=&skin=&abouttext=&dock=false&streamer=rtmp://rtmpakmi.kaltura.com/ondemand/& patron = '<embed width="[^"]+" height="[^"]+" flashvars="file=([^\&]+)&.*?streamer=(rtmp[^\&]+)&' matches = re.compile(patron,re.DOTALL).findall(data) for final,principio in matches: itemlist.append( Item(channel=__channel__, action="play", server="directo", title=item.title , url=principio+final , thumbnail=item.thumbnail , plot=item.plot , folder=False) ) #file=mp4:/c/g1MjYyYjpCnH8dRolOZ2G7u1KsleMuDS/DOcJ-FxaFrRg4gtDIwOjkzOjBrO8N_l0&streamer=rtmp://cp96275.edgefcs.net/ondemand& patron = 'file=([^\&]+)&streamer=(rtmp[^\&]+)&' matches = re.compile(patron,re.DOTALL).findall(data) for final,principio in matches: itemlist.append( Item(channel=__channel__, action="play", server="directo", title=item.title , url=principio+"/"+final , thumbnail=item.thumbnail , plot=item.plot , folder=False) ) from servers import servertools itemlist.extend(servertools.find_video_items(data=data)) for videoitem in itemlist: videoitem.channel=__channel__ videoitem.action="play" videoitem.folder=False videoitem.title = "["+videoitem.server+"]" return itemlist
def play(item): logger.info("pelisalacarta.bricocine findvideos") itemlist = servertools.find_video_items(data=item.url) data = scrapertools.cache_page(item.url) listavideos = servertools.findvideos(data) for video in listavideos: videotitle = scrapertools.unescape(video[0]) url = item.url server = video[2] # xbmctools.addnewvideo( __channel__ , "play" , category , server , , url , thumbnail , plot ) itemlist.append( Item( channel=__channel__, action="play", server=server, title="Trailer - " + videotitle, url=url, thumbnail=item.thumbnail, plot=item.plot, fulltitle=item.title, fanart="http://s23.postimg.org/84vkeq863/movietrailers.jpg", folder=False, ) ) return itemlist
def findvideos(item): logger.info("[mocosoftx.py] findvideos") itemlist = [] # Busca el thumbnail y el argumento data = scrapertools.cache_page(item.url) try: thumbnail = scrapertools.get_match(data, '<div class="post">.*?<img src="([^"]+)"') except: thumbnail = "" plot = "" # Ahora busca los vídeos itemlist = servertools.find_video_items(data=data) for videoitem in itemlist: videoitem.channel = __channel__ videoitem.plot = plot videoitem.thumbnail = thumbnail videoitem.fulltitle = item.title parsed_url = urlparse.urlparse(videoitem.url) fichero = parsed_url.path partes = fichero.split("/") titulo = partes[len(partes) - 1] videoitem.title = titulo + " - [" + videoitem.server + "]" return itemlist
def findvideos(item): logger.info("pelisalacarta.channels.doramatv findvideos") headers = DEFAULT_HEADERS[:] data = scrapertools.cache_page(item.url,headers=headers) data = data.replace('\n','') data = data.replace('\r','') patron = '<iframe src="(.+?)"' matches = re.compile(patron, re.DOTALL).findall(data) data1 = '' for match in matches: data1 += match + '\n' data = data1 data = data.replace('%26','&') data = data.replace('http://ozhe.larata.in/repro-d/mvk?v=','http://vk.com/video_ext.php?oid=') data = data.replace('http://ozhe.larata.in/repro-d/send?v=','http://sendvid.com/embed/') data = data.replace('http://ozhe.larata.in/repro-d/msend?v=','http://sendvid.com/embed/') data = data.replace('http://ozhe.larata.in/repro-d/vidweed?v=','http://www.videoweed.es/file/') data = data.replace('http://ozhe.larata.in/repro-d/nowv?v=','http://www.nowvideo.sx/video/') data = data.replace('http://ozhe.larata.in/repro-d/nov?v=','http://www.novamov.com/video/') itemlist = [] from servers import servertools itemlist.extend(servertools.find_video_items(data=data)) for videoitem in itemlist: videoitem.channel = __channel__ videoitem.folder = False return itemlist
def episodios(item): logger.info("[dlmore.py] episodios") itemlist = [] # Descarga la página data = scrapertools.cachePage(item.url) patron = '<a href="(\./ajax/fiche_serie.ajax.php\?id=[^"]+)" name="lien" class="[^"]+">([^<]+)</a>' matches = re.compile(patron,re.DOTALL).findall(data) for match in matches: scrapedtitle = match[1] scrapedurl = urlparse.urljoin("http://www.dl-more.eu/",match[0]) # Read episode iframe # http://www.dl-more.eu/ajax/fiche_serie.ajax.php?id=203&saison=1 # http://www.dl-more.eu/series/203/ajax/fiche_serie.ajax.php?id=203&saison=1 data = scrapertools.cachePage(scrapedurl) # Search videos in iframe videoitems = servertools.find_video_items(data=data) # Assigns channel name and appends season to episode title for videoitem in videoitems: videoitem.channel=__channel__ videoitem.title = scrapedtitle + videoitem.title # All episodes from all seasons in the same list itemlist.extend( videoitems ) return itemlist
def findvideos( item ): logger.info( "[filmsubitotv.py] findvideos" ) ## Descarga la página data = scrapertools.cache_page( item.url ) ## --------------------------------------------------------------- servers = { '2':'http://embed.nowvideo.li/embed.php?v=%s', '16':'http://youwatch.org/embed-%s-640x360.html', '22':'http://www.exashare.com/embed-%s-700x400.html', '23':'http://videomega.tv/cdn.php?ref=%s&width=700&height=430', '29':'http://embed.novamov.com/embed.php?v=%s' } patron = "=.setupNewPlayer.'([^']+)','(\d+)'" matches = re.compile( patron, re.DOTALL ).findall( data ) data = "" for video_id, i in matches: try: data+= servers[i] % video_id + "\n" except: pass ## --------------------------------------------------------------- itemlist = servertools.find_video_items(data=data) for videoitem in itemlist: videoitem.title = "".join([item.title, videoitem.title]) videoitem.fulltitle = item.fulltitle videoitem.thumbnail = item.thumbnail videoitem.show = item.show videoitem.channel = __channel__ return itemlist
def findvideos(item): logger.info("[cineonlineeu.py] findvideos") data = scrapertools.cache_page(item.url) itemlist=[] #<a href="http://6dc55dcb.linkbucks.com/" patron = '(http://[a-z0-9]+.linkbucks.com)' matches = re.compile(patron,re.DOTALL).findall(data) scrapertools.printMatches(matches) for url in matches: itemlist.append( Item(channel=__channel__, action="play", server="linkbucks", title="Ver enlace [linkbucks]" , url=url , thumbnail="" , plot=item.plot , folder=False) ) #<a href="http://6dc55dcb.linkbucks.com/" patron = '(http://adf.ly/[a-zA-Z0-9]+)' matches = re.compile(patron,re.DOTALL).findall(data) scrapertools.printMatches(matches) for url in matches: itemlist.append( Item(channel=__channel__, action="play", server="adfly", title="Ver enlace [adf.ly]" , url=url , thumbnail="" , plot=item.plot , folder=False) ) from servers import servertools itemlist.extend(servertools.find_video_items(data=data)) for videoitem in itemlist: if videoitem.server!="linkbucks" and videoitem.server!="adfly": videoitem.channel=__channel__ videoitem.action="play" videoitem.folder=False videoitem.title = "["+videoitem.server+"]" return itemlist
def play(item): logger.info("pelisalacarta.channels.seriesdanko play (url="+item.url+", server="+item.server+")" ) data = scrapertools.cache_page(item.url) patron = '<input type="hidden" name="id" value="([^"]+)" />.*?' patron+= '<img src="([^"]+)"' matches = re.compile(patron,re.DOTALL).findall(data) id = matches[0][0] captcha = matches[0][1] image = os.path.join( config.get_data_path(), 'captcha.png') imgurl = "http://seriesdanko.com/" + captcha req = urllib2.Request(imgurl) req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:35.0) Gecko/20100101 Firefox/35.0') req.add_header('Accept-Encoding','gzip, deflate') f = urllib2.urlopen(req) img = open(image, 'wb') img.write(f.read()) img.close() spc = get_captcha(image) post = "id=%s&spc=%s" % (id,spc) data = scrapertools.cache_page( "http://seriesdanko.com/anonim.php", post=post ) return servertools.find_video_items(data=data)
def download_url(url,titulo,server): url = url.replace("\\","") print "Analizando enlace "+url # Averigua el servidor if server=="": itemlist = servertools.find_video_items(data=url) if len(itemlist)==0: print "No se puede identificar el enlace" return item = itemlist[0] print "Es un enlace en "+item.server else: item = Item() item.server = server # Obtiene las URL de descarga video_urls, puedes, motivo = servertools.resolve_video_urls_for_playing(item.server,url) if len(video_urls)==0: print "No se ha encontrado nada para descargar" return # Descarga el de mejor calidad, como hace pelisalacarta print "Descargando..." print video_urls devuelve = downloadtools.downloadbest(video_urls,titulo,continuar=True)
def play(item): logger.info("[youanimehd.py] play") itemlist=[] logger.info("url="+item.url) # Descarga la pagina headers = [] headers.append(["User-Agent",USER_AGENT]) headers.append(["Referer",item.url]) data = scrapertools.cache_page(item.url) #http://youanimehd.com/videoss/video.php?id=161661193_161903494 url = scrapertools.get_match(data,'src="(http\://youanimehd.com/videoss/video.php[^"]+)"') logger.info("url="+url) headers = [] headers.append(["User-Agent",USER_AGENT]) headers.append(["Referer",item.url]) data = scrapertools.cache_page(url) logger.info("data="+data) from servers import servertools itemlist.extend(servertools.find_video_items(data=data)) for videoitem in itemlist: videoitem.channel=__channel__ videoitem.action="play" videoitem.folder=False videoitem.title = "["+videoitem.server+"]" return itemlist
def download_all_episodes(item, channel, first_episode="", preferred_server="vidspot", filter_language=""): logger.info( "pelisalacarta.platformcode.launcher download_all_episodes, show=" + item.show) show_title = item.show # Obtiene el listado desde el que se llamó action = item.extra # Esta marca es porque el item tiene algo más aparte en el atributo "extra" if "###" in item.extra: action = item.extra.split("###")[0] item.extra = item.extra.split("###")[1] exec "episode_itemlist = channel." + action + "(item)" # Ordena los episodios para que funcione el filtro de first_episode episode_itemlist = sorted(episode_itemlist, key=lambda Item: Item.title) from servers import servertools from core import downloadtools from core import scrapertools best_server = preferred_server worst_server = "moevideos" # Para cada episodio if first_episode == "": empezar = True else: empezar = False for episode_item in episode_itemlist: try: logger.info( "pelisalacarta.platformcode.launcher download_all_episodes, episode=" + episode_item.title) episode_title = scrapertools.get_match(episode_item.title, "(\d+x\d+)") logger.info( "pelisalacarta.platformcode.launcher download_all_episodes, episode=" + episode_title) except: import traceback logger.info(traceback.format_exc()) continue if first_episode != "" and episode_title == first_episode: empezar = True if episodio_ya_descargado(show_title, episode_title): continue if not empezar: continue # Extrae los mirrors try: mirrors_itemlist = channel.findvideos(episode_item) except: mirrors_itemlist = servertools.find_video_items(episode_item) print mirrors_itemlist descargado = False new_mirror_itemlist_1 = [] new_mirror_itemlist_2 = [] new_mirror_itemlist_3 = [] new_mirror_itemlist_4 = [] new_mirror_itemlist_5 = [] new_mirror_itemlist_6 = [] for mirror_item in mirrors_itemlist: # Si está en español va al principio, si no va al final if "(Español)" in mirror_item.title: if best_server in mirror_item.title.lower(): new_mirror_itemlist_1.append(mirror_item) else: new_mirror_itemlist_2.append(mirror_item) elif "(Latino)" in mirror_item.title: if best_server in mirror_item.title.lower(): new_mirror_itemlist_3.append(mirror_item) else: new_mirror_itemlist_4.append(mirror_item) elif "(VOS)" in mirror_item.title: if best_server in mirror_item.title.lower(): new_mirror_itemlist_3.append(mirror_item) else: new_mirror_itemlist_4.append(mirror_item) else: if best_server in mirror_item.title.lower(): new_mirror_itemlist_5.append(mirror_item) else: new_mirror_itemlist_6.append(mirror_item) mirrors_itemlist = new_mirror_itemlist_1 + new_mirror_itemlist_2 + new_mirror_itemlist_3 + new_mirror_itemlist_4 + new_mirror_itemlist_5 + new_mirror_itemlist_6 for mirror_item in mirrors_itemlist: logger.info( "pelisalacarta.platformcode.launcher download_all_episodes, mirror=" + mirror_item.title) if "(Español)" in mirror_item.title: idioma = "(Español)" codigo_idioma = "es" elif "(Latino)" in mirror_item.title: idioma = "(Latino)" codigo_idioma = "lat" elif "(VOS)" in mirror_item.title: idioma = "(VOS)" codigo_idioma = "vos" elif "(VO)" in mirror_item.title: idioma = "(VO)" codigo_idioma = "vo" else: idioma = "(Desconocido)" codigo_idioma = "desconocido" logger.info( "pelisalacarta.platformcode.launcher filter_language=#" + filter_language + "#, codigo_idioma=#" + codigo_idioma + "#") if filter_language == "" or (filter_language != "" and filter_language == codigo_idioma): logger.info( "pelisalacarta.platformcode.launcher download_all_episodes, downloading mirror" ) else: logger.info("pelisalacarta.platformcode.launcher language " + codigo_idioma + " filtered, skipping") continue if hasattr(channel, 'play'): video_items = channel.play(mirror_item) else: video_items = [mirror_item] if len(video_items) > 0: video_item = video_items[0] # Comprueba que esté disponible video_urls, puedes, motivo = servertools.resolve_video_urls_for_playing( video_item.server, video_item.url, video_password="", muestra_dialogo=False) # Lo añade a la lista de descargas if puedes: logger.info( "pelisalacarta.platformcode.launcher download_all_episodes, downloading mirror started..." ) # El vídeo de más calidad es el último mediaurl = video_urls[len(video_urls) - 1][1] devuelve = downloadtools.downloadbest( video_urls, show_title + " " + episode_title + " " + idioma + " [" + video_item.server + "]", continuar=False) if devuelve == 0: logger.info( "pelisalacarta.platformcode.launcher download_all_episodes, download ok" ) descargado = True break elif devuelve == -1: try: import xbmcgui advertencia = xbmcgui.Dialog() resultado = advertencia.ok("plugin", "Descarga abortada") except: pass return else: logger.info( "pelisalacarta.platformcode.launcher download_all_episodes, download error, try another mirror" ) continue else: logger.info( "pelisalacarta.platformcode.launcher download_all_episodes, downloading mirror not available... trying next" ) if not descargado: logger.info( "pelisalacarta.platformcode.launcher download_all_episodes, EPISODIO NO DESCARGADO " + episode_title)
def findvideos(item): logger.info("[seriesdanko.py] findvideos") # Descarga la página if config.get_platform() == "xbmceden": from core.subtitletools import saveSubtitleName saveSubtitleName(item) if "seriesdanko.com" in item.url: data = scrapertools.downloadpageGzip(item.url).replace("\n", "") patronvideos = "<tr><td class=('tam12'>.*?)</td></tr>" matches = re.compile(patronvideos, re.DOTALL).findall(data) #for match in matches: #print match itemlist = [] for match in matches: try: scrapedurl = urlparse.urljoin( item.url, re.compile(r"href='(.+?)'").findall(match)[0]) except: continue try: scrapedthumbnail = re.compile(r"src='(.+?)'").findall(match)[1] if "megavideo" in scrapedthumbnail: mega = " [Megavideo]" elif "megaupload" in scrapedthumbnail: mega = " [Megaupload]" else: mega = "" if not scrapedthumbnail.startswith("http"): scrapedthumbnail = urlparse.urljoin( item.url, scrapedthumbnail) except: continue try: subtitle = re.compile(r"src='(.+?)'").findall(match)[0] if "es.png" in subtitle: subtitle = " (Español)" elif "la.png" in subtitle: subtitle = " (Latino)" elif "vo.png" in subtitle: subtitle = " (Version Original)" elif "vos.png" in subtitle: subtitle = " (Subtitulado)" elif "ca.png" in match[2]: subtitle = " (Catalan)" elif "ga.jpg" in match[2]: subtitle = " (Gallego)" elif "eu.jpg" in match[2]: subtitle = " (Euskera)" elif "ba.png" in match[2]: subtitle = " (Bable)" else: subtitle = "(desconocido)" try: opcion = re.compile(r"(Ver|Descargar)").findall(match)[0] except: opcion = "Ver" scrapedtitle = opcion + " video" + subtitle + mega except: scrapedtitle = item.title scrapedplot = "" #scrapedthumbnail = item.thumbnail #if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]") # Añade al listado de XBMC itemlist.append( Item(channel=__channel__, action="play", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, fulltitle=item.fulltitle, extra=item.thumbnail, fanart=item.thumbnail, folder=False)) else: from core import servertools itemlist = servertools.find_video_items(item) return itemlist
def findvideos(item): logger.info("[peliculasonlineflv.py] findvideos") itemlist = [] # Descarga la p?gina data = scrapertools.cachePage(item.url) from servers import servertools itemlist.extend(servertools.find_video_items(data=data)) for videoitem in itemlist: videoitem.channel = __channel__ videoitem.action = "play" videoitem.folder = False videoitem.title = "Ver en " + videoitem.server videoitem.fulltitle = item.fulltitle # Ahora busca patrones manuales try: vk_code = scrapertools.get_match(data, "vklat\=([a-zA-Z0-9]+)") vk_url = scrapertools.get_header_from_response( "http://goo.gl/" + vk_code, header_to_get="location") itemlist.append( Item(channel=__channel__, action="play", title="Ver en VK (Latino)", server="vk", url=vk_url, folder=False)) except: logger.info("No encontrado enlace VK") try: putlocker_code = scrapertools.get_match(data, "plat\=([A-Z0-9]+)") putlocker_url = "http://www.putlocker.com/embed/" + putlocker_code itemlist.append( Item(channel=__channel__, action="play", title="Ver en Putlocker (Latino)", server="putlocker", url=putlocker_url, folder=False)) except: logger.info("No encontrado enlace PUTLOCKER") try: vk_code = scrapertools.get_match(data, "vksub\=([a-zA-Z0-9]+)") vk_url = scrapertools.get_header_from_response( "http://goo.gl/" + vk_code, header_to_get="location") itemlist.append( Item(channel=__channel__, action="play", title="Ver en VK (Subtitulado)", server="vk", url=vk_url, folder=False)) except: logger.info("No encontrado enlace VK") try: putlocker_code = scrapertools.get_match(data, "plsub\=([A-Z0-9]+)") putlocker_url = "http://www.putlocker.com/embed/" + putlocker_code itemlist.append( Item(channel=__channel__, action="play", title="Ver en Putlocker (Subtitulado)", server="putlocker", url=putlocker_url, folder=False)) except: logger.info("No encontrado enlace PUTLOCKER") try: vk_code = scrapertools.get_match(data, "vk\=([a-zA-Z0-9]+)") vk_url = scrapertools.get_header_from_response( "http://goo.gl/" + vk_code, header_to_get="location") itemlist.append( Item(channel=__channel__, action="play", title="Ver en VK", server="vk", url=vk_url, folder=False)) except: logger.info("No encontrado enlace VK") try: putlocker_code = scrapertools.get_match(data, "put\=([A-Z0-9]+)") putlocker_url = "http://www.putlocker.com/embed/" + putlocker_code itemlist.append( Item(channel=__channel__, action="play", title="Ver en Putlocker", server="putlocker", url=putlocker_url, folder=False)) except: logger.info("No encontrado enlace PUTLOCKER") return itemlist
def findvideos(item): logger.info("[cinetux.py] findvideos") itemlist = [] # Busca el argumento data = anti_cloudflare(item.url) logger.info("data=" + data) item.plot = scrapertools.find_single_match( data, '<td><span class="info">Sinops[^<]+</span>([^<]+)</td>') item.plot = scrapertools.htmlclean(item.plot) item.contentPlot = item.plot patron = '<tr class="tabletr">[^<]+' patron += '<td class="opcion-td"><img[^>]+>([^>]+)</td>[^<]+' patron += '<td class="server-td[^>]+>([^<]+)</td>[^<]+' patron += '<td class="idioma-td[^>]+>([^<]+)</td>[^<]+' patron += '<td class="fuente-td[^>]+>([^<]+)</td>[^<]+' patron += '<td class="link-td">(.*?)</td>' matches = re.compile(patron, re.DOTALL).findall(data) scrapertools.printMatches(matches) for scrapedtitle, scrapedserver, scrapedlanguage, scrapedquality, scrapedlink in matches: title = "Ver " + scrapedtitle + " en " + scrapedserver + " (" + scrapedlanguage + ") (" + scrapedquality + ")" url = scrapedlink thumbnail = servertools.guess_server_thumbnail(scrapedserver) plot = "" itemlist.append( Item(channel=__channel__, action="play", title=title, fulltitle=item.fulltitle, url=url, thumbnail=thumbnail, plot=plot, fanart= "http://pelisalacarta.mimediacenter.info/fanart/cinetux.jpg", parentContent=item, folder=False)) patron = '<tr class="tabletr">[^<]+' patron += '<td class="episode-server[^>]+><img[^>]+>([^>]+)</td>[^<]+' patron += '<td class="episode-server-img[^>]+>([^<]+)</td>[^<]+' patron += '<td class="episode-lang[^>]+>([^>]+)</td>[^<]+' patron += '<td align="center">([^<]+)</td>[^<]+' patron += '<td(.*?)</td>' matches = re.compile(patron, re.DOTALL).findall(data) scrapertools.printMatches(matches) for scrapedtitle, scrapedserver, scrapedlanguage, scrapedquality, scrapedlink in matches: title = "Ver " + scrapedtitle + " en " + scrapedserver + " (" + scrapedlanguage + ") (" + scrapedquality + ")" url = scrapedlink thumbnail = servertools.guess_server_thumbnail(scrapedserver) plot = "" itemlist.append( Item(channel=__channel__, action="play", title=title, fulltitle=item.fulltitle + " [" + scrapedlanguage + "][" + scrapedquality + "]", url=url, thumbnail=thumbnail, plot=plot, parentContent=item, folder=False)) if len(itemlist) == 0: itemlist = servertools.find_video_items(item=item, data=data) i = 1 for videoitem in itemlist: videoitem.title = "Ver Opción %d en %s" % (i, videoitem.server) videoitem.fulltitle = item.fulltitle videoitem.channel = channel = __channel__ return itemlist
def findvideos(item): from servers import servertools return servertools.find_video_items(item=item, channel=item.channel)
def play(item): logger.info("[repelis] play url="+item.url) itemlist = servertools.find_video_items(data=item.url) return itemlist
def download_all_episodes(item,channel,first_episode="",preferred_server="vidspot",filter_language=""): plugintools.log("streamondemand-pureita.navigation.py download_all_episodes, show="+item.show) show_title = item.show # Obtiene el listado desde el que se llamó action = item.extra # Esta marca es porque el item tiene algo más aparte en el atributo "extra" if "###" in item.extra: action = item.extra.split("###")[0] item.extra = item.extra.split("###")[1] exec "episode_itemlist = channel."+action+"(item)" # Ordena los episodios para que funcione el filtro de first_episode episode_itemlist = sorted(episode_itemlist, key=lambda Item: Item.title) from servers import servertools from core import downloadtools # Para cada episodio if first_episode=="": empezar = True else: empezar = False for episode_item in episode_itemlist: if episode_item.action == "add_serie_to_library" or episode_item.action == "download_all_episodes": continue try: plugintools.log("streamondemand-pureita.navigation.py download_all_episodes, episode="+episode_item.title) #episode_title = scrapertools.get_match(episode_item.title,"(\d+x\d+)") episode_title = episode_item.title episode_title = re.sub(r"\[COLOR [^]]*\]", "", episode_title) episode_title = re.sub(r"\[/COLOR\]", "", episode_title) plugintools.log("streamondemand-pureita.navigation.py download_all_episodes, episode="+episode_title) except: import traceback plugintools.log(traceback.format_exc()) continue if first_episode!="" and episode_title==first_episode: empezar = True if episodio_ya_descargado(show_title,episode_title): continue if not empezar: continue # Extrae los mirrors try: #mirrors_itemlist = channel.findvideos(episode_item) exec "mirrors_itemlist = channel."+episode_item.action+"(episode_item)" except: mirrors_itemlist = servertools.find_video_items(episode_item) print mirrors_itemlist descargado = False for mirror_item in mirrors_itemlist: plugintools.log("streamondemand-pureita.navigation.py download_all_episodes, mirror="+mirror_item.title) if hasattr(channel, 'play'): video_items = channel.play(mirror_item) else: video_items = [mirror_item] if len(video_items)>0: video_item = video_items[0] # Comprueba que esté disponible video_urls, puedes, motivo = servertools.resolve_video_urls_for_playing( video_item.server , video_item.url , video_password="" , muestra_dialogo=False) # Lo añade a la lista de descargas if puedes: plugintools.log("streamondemand-pureita.navigation.py download_all_episodes, downloading mirror started...") # El vídeo de más calidad es el último #mediaurl = video_urls[len(video_urls)-1][1] devuelve = downloadtools.downloadbest(video_urls,show_title+" "+episode_title+" ["+video_item.server+"]",continuar=False) if devuelve==0: plugintools.log("streamondemand-pureita.navigation.py download_all_episodes, download ok") descargado = True break elif devuelve==-1: try: import xbmcgui advertencia = xbmcgui.Dialog() resultado = advertencia.ok("plugin" , "Download interrotto") except: pass return else: plugintools.log("streamondemand-pureita.navigation.py download_all_episodes, download error, try another mirror") continue else: plugintools.log("streamondemand-pureita.navigation.py download_all_episodes, downloading mirror not available... trying next") if not descargado: plugintools.log("streamondemand-pureita.navigation.py download_all_episodes, EPISODIO NO DESCARGADO "+episode_title) return itemlist
def get_next_items( item ): plugintools.log("navigation.get_next_items item="+item.tostring()) try: # ---------------------------------------------------------------- # Main menu # ---------------------------------------------------------------- if item.channel=="navigation": # --- Update channels list --------------------------------------- from core import config if item.action=="mainlist": plugintools.log("navigation.get_next_items Main menu") if config.get_setting("updatechannels")=="true": try: from core import updater actualizado = updater.updatechannel("channelselector") if actualizado: import xbmcgui advertencia = xbmcgui.Dialog() advertencia.ok("PureITA",config.get_localized_string(30064)) except: pass # ---------------------------------------------------------------- if item.action=="mainlist": plugintools.log("navigation.get_next_items Main menu") itemlist = channelselector.getmainlist("bannermenu") elif item.channel=="channelselector": if item.action=="channeltypes": plugintools.log("navigation.get_next_items Channel types menu") itemlist = channelselector.getchanneltypes("bannermenu") elif item.action=="listchannels": plugintools.log("navigation.get_next_items Channel list menu") itemlist = channelselector.filterchannels(item.category,"bannermenu") elif item.channel=="configuracion": plugintools.open_settings_dialog() return [] else: if item.action=="": item.action="mainlist" plugintools.log("navigation.get_next_items Channel code ("+item.channel+"."+item.action+")") # --- Update channels files -------------------------------------- if item.action=="mainlist": from core import config if config.get_setting("updatechannels")=="true": try: from core import updater actualizado = updater.updatechannel(item.channel) if actualizado: import xbmcgui advertencia = xbmcgui.Dialog() advertencia.ok("plugin",item.channel,config.get_localized_string(30063)) except: pass # ---------------------------------------------------------------- try: exec "import channels."+item.channel+" as channel" except: exec "import core."+item.channel+" as channel" from platformcode import xbmctools if item.action=="play": plugintools.log("navigation.get_next_items play") # Si el canal tiene una acción "play" tiene prioridad if hasattr(channel, 'play'): plugintools.log("streamondemand-pureita.navigation.py Channel has its own 'play' method") itemlist = channel.play(item) if len(itemlist)>0: item = itemlist[0] # FIXME: Este error ha que tratarlo de otra manera, al dar a volver sin ver el vídeo falla try: xbmctools.play_video(channel=item.channel, server=item.server, url=item.url, category=item.category, title=item.title, thumbnail=item.thumbnail, plot=item.plot, extra=item.extra, subtitle=item.subtitle, video_password = item.password, fulltitle=item.fulltitle, Serie=item.show) except: pass else: import xbmcgui ventana_error = xbmcgui.Dialog() ok = ventana_error.ok ("plugin", "Nessun File Da Riprodurre") else: plugintools.log("streamondemand-pureita.navigation.py No channel 'play' method, executing core method") # FIXME: Este error ha que tratarlo de otra manera, por al dar a volver sin ver el vídeo falla # Mejor hacer el play desde la ventana try: xbmctools.play_video(channel=item.channel, server=item.server, url=item.url, category=item.category, title=item.title, thumbnail=item.thumbnail, plot=item.plot, extra=item.extra, subtitle=item.subtitle, video_password = item.password, fulltitle=item.fulltitle, Serie=item.show) except: pass return [] elif item.action=="findvideos": plugintools.log("navigation.get_next_items findvideos") # Si el canal tiene una acción "findvideos" tiene prioridad if hasattr(channel, 'findvideos'): plugintools.log("streamondemand-pureita.navigation.py Channel has its own 'findvideos' method") itemlist = channel.findvideos(item) else: itemlist = [] if len(itemlist)==0: from servers import servertools itemlist = servertools.find_video_items(item) if len(itemlist)==0: itemlist = [ Item(title="Nessun video trovato", thumbnail=os.path.join( plugintools.get_runtime_path() , "resources" , "images" , "thumb_error.png" )) ] # ---------------add_serie_to_library----------- elif item.action=="add_serie_to_library": plugintools.log("navigation.get_next_items add_serie_to_library") from platformcode import library import xbmcgui # Obtiene el listado desde el que se llamó action = item.extra # Esta marca es porque el item tiene algo más aparte en el atributo "extra" if "###" in item.extra: action = item.extra.split("###")[0] item.extra = item.extra.split("###")[1] exec "itemlist = channel."+action+"(item)" # Progreso pDialog = xbmcgui.DialogProgress() ret = pDialog.create('streamondemand-pureita', 'Añadiendo episodios...') pDialog.update(0, 'Añadiendo episodio...') totalepisodes = len(itemlist) plugintools.log ("navigation.get_next_items Total Episodios:"+str(totalepisodes)) i = 0 errores = 0 nuevos = 0 for item in itemlist: i = i + 1 pDialog.update(i*100/totalepisodes, 'Añadiendo episodio...',item.title) plugintools.log("streamondemand-pureita.navigation.py add_serie_to_library, title="+item.title) if (pDialog.iscanceled()): return try: #(titulo="",url="",thumbnail="",server="",plot="",canal="",category="Cine",Serie="",verbose=True,accion="strm",pedirnombre=True): # Añade todos menos el que dice "Añadir esta serie..." o "Descargar esta serie..." if item.action!="add_serie_to_library" and item.action!="download_all_episodes": nuevos = nuevos + library.savelibrary( titulo=item.title , url=item.url , thumbnail=item.thumbnail , server=item.server , plot=item.plot , canal=item.channel , category="Series" , Serie=item.show.strip() , verbose=False, accion="play_from_library", pedirnombre=False, subtitle=item.subtitle, extra=item.extra ) except IOError: import sys for line in sys.exc_info(): logger.error( "%s" % line ) plugintools.log("streamondemand-pureita.navigation.py Error al grabar el archivo "+item.title) errores = errores + 1 pDialog.close() # Actualizacion de la biblioteca itemlist=[] if errores > 0: itemlist.append(Item(title="ERRORE, la serie NON si è aggiunta alla biblioteca o la fatto in modo incompleto")) plugintools.log ("navigation.get_next_items No se pudo añadir "+str(errores)+" episodios") else: itemlist.append(Item(title="La serie è stata aggiunta alla biblioteca")) plugintools.log ("navigation.get_next_items Ningún error al añadir "+str(errores)+" episodios") # FIXME:jesus Comentado porque no funciona bien en todas las versiones de XBMC #library.update(totalepisodes,errores,nuevos) #xbmctools.renderItems(itemlist, params, url, category) #Lista con series para actualizar from core import config nombre_fichero_config_canal = os.path.join( config.get_library_path() , "series.xml" ) if not os.path.exists(nombre_fichero_config_canal): nombre_fichero_config_canal = os.path.join( config.get_data_path() , "series.xml" ) plugintools.log("nombre_fichero_config_canal="+nombre_fichero_config_canal) if not os.path.exists(nombre_fichero_config_canal): f = open( nombre_fichero_config_canal , "w" ) else: f = open( nombre_fichero_config_canal , "r" ) contenido = f.read() f.close() f = open( nombre_fichero_config_canal , "w" ) f.write(contenido) from platformcode import library f.write( library.title_to_folder_name(item.show)+","+item.url+","+item.channel+"\n") f.close(); return itemlist # -------------------------------------------------------------------- elif item.action=="download_all_episodes": plugintools.log("navigation.get_next_items download_all_episodes") download_all_episodes(item,channel) #--------------------------------------------------------------------- else: if item.action=="search": tecleado = plugintools.keyboard_input() if tecleado!="": tecleado = tecleado.replace(" ", "+") itemlist = channel.search(item,tecleado) elif item.channel=="novedades" and item.action=="mainlist": itemlist = channel.mainlist(item,"bannermenu") elif item.channel=="buscador" and item.action=="mainlist": itemlist = channel.mainlist(item,"bannermenu") else: exec "itemlist = channel."+item.action+"(item)" for loaded_item in itemlist: if loaded_item.thumbnail=="": if loaded_item.folder: loaded_item.thumbnail = os.path.join( plugintools.get_runtime_path() , "resources" , "images" , "thumb_folder.png" ) else: loaded_item.thumbnail = os.path.join( plugintools.get_runtime_path() , "resources" , "images" , "thumb_nofolder.png" ) if len(itemlist)==0: itemlist = [ Item(title="Nessun Elemento Da Visualizzare", thumbnail=os.path.join( plugintools.get_runtime_path() , "resources" , "images" , "thumb_error.png" )) ] except: import traceback plugintools.log("navigation.get_next_items "+traceback.format_exc()) itemlist = [ Item(title="Rimozione Effettuata - Riavviare", thumbnail=os.path.join( plugintools.get_runtime_path() , "resources" , "images" , "thumb_error.png" )) ] return itemlist
def findvid(item): logger.info("[cineblog01.py] findvideos") itemlist = [] ## Descarga la página data = scrapertools.cache_page(item.url) data = scrapertools.decodeHtmlentities(data).replace('http://cineblog01.pw', 'http://k4pp4.pw') ## Extrae las entradas streaming = scrapertools.find_single_match(data, '<strong>Streaming:</strong>(.*?)<table height="30">') patron = '<td><a href="([^"]+)" target="_blank">([^<]+)</a></td>' matches = re.compile(patron, re.DOTALL).findall(streaming) for scrapedurl, scrapedtitle in matches: print "##### findvideos Streaming ## %s ## %s ##" % (scrapedurl, scrapedtitle) title = "[COLOR orange]Streaming:[/COLOR] " + item.title + " [COLOR blue][" + scrapedtitle + "][/COLOR]" itemlist.append( Item(channel=__channel__, action="play", title=title, url=scrapedurl, fulltitle=item.title, show=item.title, folder=False)) streaming_hd = scrapertools.find_single_match(data, '<strong>Streaming HD[^<]+</strong>(.*?)<table height="30">') patron = '<td><a href="([^"]+)" target="_blank">([^<]+)</a></td>' matches = re.compile(patron, re.DOTALL).findall(streaming_hd) for scrapedurl, scrapedtitle in matches: print "##### findvideos Streaming HD ## %s ## %s ##" % (scrapedurl, scrapedtitle) title = "[COLOR yellow]Streaming HD:[/COLOR] " + item.title + " [COLOR blue][" + scrapedtitle + "][/COLOR]" itemlist.append( Item(channel=__channel__, action="play", title=title, url=scrapedurl, fulltitle=item.title, show=item.title, folder=False)) download = scrapertools.find_single_match(data, '<strong>Download:</strong>(.*?)<table height="30">') patron = '<td><a href="([^"]+)" target="_blank">([^<]+)</a></td>' matches = re.compile(patron, re.DOTALL).findall(download) for scrapedurl, scrapedtitle in matches: print "##### findvideos Download ## %s ## %s ##" % (scrapedurl, scrapedtitle) title = "[COLOR aqua]Download:[/COLOR] " + item.title + " [COLOR blue][" + scrapedtitle + "][/COLOR]" itemlist.append( Item(channel=__channel__, action="play", title=title, url=scrapedurl, fulltitle=item.title, show=item.title, folder=False)) download_hd = scrapertools.find_single_match(data, '<strong>Download HD[^<]+</strong>(.*?)<table width="100%" height="20">') patron = '<td><a href="([^"]+)" target="_blank">([^<]+)</a></td>' matches = re.compile(patron, re.DOTALL).findall(download_hd) for scrapedurl, scrapedtitle in matches: print "##### findvideos Download HD ## %s ## %s ##" % (scrapedurl, scrapedtitle) title = "[COLOR azure]Download HD:[/COLOR] " + item.title + " [COLOR blue][" + scrapedtitle + "][/COLOR]" itemlist.append( Item(channel=__channel__, action="play", title=title, url=scrapedurl, fulltitle=item.title, show=item.title, folder=False)) if len(itemlist) == 0: itemlist = servertools.find_video_items(item=item) return itemlist
def run(): logger.info("streamondemand.platformcode.launcher run") # Test if all the required directories are created config.verify_directories_created() # Extract parameters from sys.argv params, fanart, channel_name, title, fulltitle, url, thumbnail, plot, action, server, extra, subtitle, viewmode, category, show, password = extract_parameters() logger.info("streamondemand.platformcode.launcher fanart=%s, channel_name=%s, title=%s, fulltitle=%s, url=%s, thumbnail=%s, plot=%s, action=%s, server=%s, extra=%s, subtitle=%s, category=%s, show=%s, password=%s" % (fanart, channel_name, title, fulltitle, url, thumbnail, plot, action, server, extra, subtitle, category, show, password)) try: # Accion por defecto - elegir canal if ( action=="selectchannel" ): # Borra el fichero de las cookies para evitar problemas con MV #ficherocookies = os.path.join( config.get_data_path(), 'cookies.lwp' ) #if os.path.exists(ficherocookies): # os.remove(ficherocookies) if config.get_setting("updatechannels")=="true": try: from core import updater actualizado = updater.updatechannel("channelselector") if actualizado: import xbmcgui advertencia = xbmcgui.Dialog() advertencia.ok("tvalacarta",config.get_localized_string(30064)) except: pass import channelselector as plugin plugin.mainlist(params, url, category) # Actualizar version elif ( action=="update" ): try: from core import updater updater.update(params) except ImportError: logger.info("streamondemand.platformcode.launcher Actualizacion automática desactivada") #import channelselector as plugin #plugin.listchannels(params, url, category) if config.get_system_platform()!="xbox": import xbmc xbmc.executebuiltin( "Container.Refresh" ) elif (action=="channeltypes"): import channelselector as plugin plugin.channeltypes(params,url,category) elif (action=="categories"): import channelselector as plugin plugin.categories(params,url,category) elif (action=="listchannels"): import channelselector as plugin plugin.listchannels(params,url,category) # El resto de acciones vienen en el parámetro "action", y el canal en el parámetro "channel" else: if action=="mainlist" and config.get_setting("updatechannels")=="true": try: from core import updater actualizado = updater.updatechannel(channel_name) if actualizado: import xbmcgui advertencia = xbmcgui.Dialog() advertencia.ok("plugin",channel_name,config.get_localized_string(30063)) except: pass # La acción puede estar en el core, o ser un canal regular. El buscador es un canal especial que está en pelisalacarta regular_channel_path = os.path.join( config.get_runtime_path() , 'channels' , channel_name+".py" ) core_channel_path = os.path.join( config.get_runtime_path(), 'core' , channel_name+".py" ) logger.info("streamondemand.platformcode.launcher regular_channel_path=%s" % regular_channel_path) logger.info("streamondemand.platformcode.launcher core_channel_path=%s" % core_channel_path) if channel_name=="personal" or channel_name=="personal2" or channel_name=="personal3" or channel_name=="personal4" or channel_name=="personal5": import channels.personal as channel elif os.path.exists( regular_channel_path ): exec "import channels."+channel_name+" as channel" elif os.path.exists( core_channel_path ): exec "from core import "+channel_name+" as channel" logger.info("streamondemand.platformcode.launcher running channel %s %s" % (channel.__name__ , channel.__file__)) generico = False # Esto lo he puesto asi porque el buscador puede ser generico o normal, esto estará asi hasta que todos los canales sean genericos if category == "Buscador_Generico": generico = True else: try: generico = channel.isGeneric() except: generico = False if not generico: logger.info("streamondemand.platformcode.launcher xbmc native channel") if (action=="strm"): from platformcode import xbmctools xbmctools.playstrm(params, url, category) else: exec "channel."+action+"(params, url, category)" else: logger.info("streamondemand.platformcode.launcher multiplatform channel") from core.item import Item item = Item(channel=channel_name, title=title , fulltitle=fulltitle, url=url, thumbnail=thumbnail , plot=plot , server=server, category=category, extra=extra, subtitle=subtitle, viewmode=viewmode, show=show, password=password, fanart=fanart) ''' if item.subtitle!="": logger.info("streamondemand.platformcode.launcher Downloading subtitle file "+item.subtitle) from core import downloadtools ficherosubtitulo = os.path.join( config.get_data_path() , "subtitulo.srt" ) if os.path.exists(ficherosubtitulo): os.remove(ficherosubtitulo) downloadtools.downloadfile(item.subtitle, ficherosubtitulo ) config.set_setting("subtitulo","true") else: logger.info("streamondemand.platformcode.launcher No subtitle") ''' from platformcode import xbmctools if action=="play": logger.info("streamondemand.platformcode.launcher play") # Si el canal tiene una acción "play" tiene prioridad if hasattr(channel, 'play'): logger.info("streamondemand.platformcode.launcher executing channel 'play' method") itemlist = channel.play(item) if len(itemlist)>0: item = itemlist[0] xbmctools.play_video(channel=channel_name, server=item.server, url=item.url, category=item.category, title=item.title, thumbnail=item.thumbnail, plot=item.plot, extra=item.extra, subtitle=item.subtitle, video_password = item.password, fulltitle=item.fulltitle, Serie=item.show) else: import xbmcgui ventana_error = xbmcgui.Dialog() ok = ventana_error.ok ("plugin", "Niente da riprodurre") else: logger.info("streamondemand.platformcode.launcher no channel 'play' method, executing core method") xbmctools.play_video(channel=channel_name, server=item.server, url=item.url, category=item.category, title=item.title, thumbnail=item.thumbnail, plot=item.plot, extra=item.extra, subtitle=item.subtitle, video_password = item.password, fulltitle=item.fulltitle, Serie=item.show) elif action=="strm_detail" or action=="play_from_library": logger.info("streamondemand.platformcode.launcher play_from_library") fulltitle = item.show + " " + item.title elegido = Item(url="") logger.info("item.server=#"+item.server+"#") # Ejecuta find_videos, del canal o común try: itemlist = channel.findvideos(item) except: from servers import servertools itemlist = servertools.find_video_items(item) if len(itemlist)>0: #for item2 in itemlist: # logger.info(item2.title+" "+item2.subtitle) # El usuario elige el mirror opciones = [] for item in itemlist: opciones.append(item.title) import xbmcgui dia = xbmcgui.Dialog() seleccion = dia.select(config.get_localized_string(30163), opciones) elegido = itemlist[seleccion] if seleccion==-1: return else: elegido = item # Ejecuta el método play del canal, si lo hay try: itemlist = channel.play(elegido) item = itemlist[0] except: item = elegido logger.info("Elegido %s (sub %s)" % (item.title,item.subtitle)) from platformcode import xbmctools logger.info("subtitle="+item.subtitle) xbmctools.play_video(strmfile=True, channel=item.channel, server=item.server, url=item.url, category=item.category, title=item.title, thumbnail=item.thumbnail, plot=item.plot, extra=item.extra, subtitle=item.subtitle, video_password = item.password, fulltitle=fulltitle) elif action=="add_pelicula_to_library": logger.info("streamondemand.platformcode.launcher add_pelicula_to_library") from platformcode import library # Obtiene el listado desde el que se llamó library.savelibrary( titulo=item.fulltitle , url=item.url , thumbnail=item.thumbnail , server=item.server , plot=item.plot , canal=item.channel , category="Cine" , Serie=item.show.strip() , verbose=False, accion="play_from_library", pedirnombre=False, subtitle=item.subtitle ) elif action=="add_serie_to_library": logger.info("streamondemand.platformcode.launcher add_serie_to_library, show=#"+item.show+"#") from platformcode import library import xbmcgui # Obtiene el listado desde el que se llamó action = item.extra # Esta marca es porque el item tiene algo más aparte en el atributo "extra" if "###" in item.extra: action = item.extra.split("###")[0] item.extra = item.extra.split("###")[1] exec "itemlist = channel."+action+"(item)" # Progreso pDialog = xbmcgui.DialogProgress() ret = pDialog.create('pelisalacarta', 'Añadiendo episodios...') pDialog.update(0, 'Añadiendo episodio...') totalepisodes = len(itemlist) logger.info ("[launcher.py] Total Episodios:"+str(totalepisodes)) i = 0 errores = 0 nuevos = 0 for item in itemlist: i = i + 1 pDialog.update(i*100/totalepisodes, 'Añadiendo episodio...',item.title) logger.info("streamondemand.platformcode.launcher add_serie_to_library, title="+item.title) if (pDialog.iscanceled()): return try: #(titulo="",url="",thumbnail="",server="",plot="",canal="",category="Cine",Serie="",verbose=True,accion="strm",pedirnombre=True): # Añade todos menos el que dice "Añadir esta serie..." o "Descargar esta serie..." if item.action!="add_serie_to_library" and item.action!="download_all_episodes": nuevos = nuevos + library.savelibrary( titulo=item.title , url=item.url , thumbnail=item.thumbnail , server=item.server , plot=item.plot , canal=item.channel , category="Series" , Serie=item.show.strip() , verbose=False, accion="play_from_library", pedirnombre=False, subtitle=item.subtitle, extra=item.extra ) except IOError: import sys for line in sys.exc_info(): logger.error( "%s" % line ) logger.info("streamondemand.platformcode.launcher Error al grabar el archivo "+item.title) errores = errores + 1 pDialog.close() # Actualizacion de la biblioteca itemlist=[] if errores > 0: itemlist.append(Item(title="ERRORE, la serie NON si è aggiunta alla biblioteca o la fatto in modo incompleto")) logger.info ("[launcher.py] No se pudo añadir "+str(errores)+" episodios") else: itemlist.append(Item(title="La serie è stata aggiunta alla biblioteca")) logger.info ("[launcher.py] Ningún error al añadir "+str(errores)+" episodios") # FIXME:jesus Comentado porque no funciona bien en todas las versiones de XBMC #library.update(totalepisodes,errores,nuevos) xbmctools.renderItems(itemlist, params, url, category) #Lista con series para actualizar nombre_fichero_config_canal = os.path.join( config.get_library_path() , "series.xml" ) if not os.path.exists(nombre_fichero_config_canal): nombre_fichero_config_canal = os.path.join( config.get_data_path() , "series.xml" ) logger.info("nombre_fichero_config_canal="+nombre_fichero_config_canal) if not os.path.exists(nombre_fichero_config_canal): f = open( nombre_fichero_config_canal , "w" ) else: f = open( nombre_fichero_config_canal , "r" ) contenido = f.read() f.close() f = open( nombre_fichero_config_canal , "w" ) f.write(contenido) from platformcode import library f.write( library.title_to_folder_name(item.show)+","+item.url+","+item.channel+"\n") f.close(); elif action=="download_all_episodes": download_all_episodes(item,channel) elif action=="search": logger.info("streamondemand.platformcode.launcher search") import xbmc keyboard = xbmc.Keyboard("") keyboard.doModal() if (keyboard.isConfirmed()): tecleado = keyboard.getText() tecleado = tecleado.replace(" ", "+") itemlist = channel.search(item,tecleado) else: itemlist = [] xbmctools.renderItems(itemlist, params, url, category) else: logger.info("streamondemand.platformcode.launcher executing channel '"+action+"' method") if action!="findvideos": exec "itemlist = channel."+action+"(item)" #for item in itemlist: # logger.info("viemode="+item.viewmode) else: # Intenta ejecutar una posible funcion "findvideos" del canal if hasattr(channel, 'findvideos'): exec "itemlist = channel."+action+"(item)" # Si no funciona, lanza el método genérico para detectar vídeos else: logger.info("streamondemand.platformcode.launcher no channel 'findvideos' method, executing core method") from servers import servertools itemlist = servertools.find_video_items(item) from platformcode import subtitletools subtitletools.saveSubtitleName(item) # Activa el modo biblioteca para todos los canales genéricos, para que se vea el argumento import xbmcplugin import sys handle = sys.argv[1] xbmcplugin.setContent(int( handle ),"movies") # Añade los items a la lista de XBMC xbmctools.renderItems(itemlist, params, url, category) except urllib2.URLError,e: import traceback,sys from pprint import pprint exc_type, exc_value, exc_tb = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_tb) for line in lines: line_splits = line.split("\n") for line_split in line_splits: logger.error(line_split) import xbmcgui ventana_error = xbmcgui.Dialog() # Agarra los errores surgidos localmente enviados por las librerias internas if hasattr(e, 'reason'): logger.info("Razon del error, codigo: %s , Razon: %s" % (e.reason[0], e.reason[1])) texto = config.get_localized_string(30050) # "No se puede conectar con el sitio web" ok = ventana_error.ok ("plugin", texto) # Agarra los errores con codigo de respuesta del servidor externo solicitado elif hasattr(e,'code'): logger.info("codigo de error HTTP : %d" %e.code) texto = (config.get_localized_string(30051) % e.code) # "El sitio web no funciona correctamente (error http %d)" ok = ventana_error.ok ("plugin", texto)
def findvid(item): logger.info("[cineblog01.py] findvideos") itemlist = [] ## Descarga la página data = anti_cloudflare(item.url) data = scrapertools.decodeHtmlentities(data).replace( 'http://cineblog01.pw', 'http://k4pp4.pw') ## Extract the quality format patronvideos = '>([^<]+)</strong></div>' matches = re.compile(patronvideos, re.DOTALL).finditer(data) QualityStr = "" for match in matches: QualityStr = scrapertools.unescape(match.group(1))[6:] logger.info("QualityStr:" + QualityStr) ## Extrae las entradas streaming = scrapertools.find_single_match( data, '<strong>Streaming:</strong>(.*?)<table height="30">') patron = '<td><a href="([^"]+)" target="_blank">([^<]+)</a></td>' matches = re.compile(patron, re.DOTALL).findall(streaming) for scrapedurl, scrapedtitle in matches: print "##### findvideos Streaming ## %s ## %s ##" % (scrapedurl, scrapedtitle) title = "[COLOR orange]Streaming:[/COLOR] " + item.title + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]" itemlist.append( Item(channel=__channel__, action="play", title=title, url=scrapedurl, fulltitle=item.fulltitle, thumbnail=item.thumbnail, show=item.show, folder=False)) streaming_hd = scrapertools.find_single_match( data, '<strong>Streaming HD[^<]+</strong>(.*?)<table height="30">') patron = '<td><a href="([^"]+)" target="_blank">([^<]+)</a></td>' matches = re.compile(patron, re.DOTALL).findall(streaming_hd) for scrapedurl, scrapedtitle in matches: print "##### findvideos Streaming HD ## %s ## %s ##" % (scrapedurl, scrapedtitle) title = "[COLOR yellow]Streaming HD:[/COLOR] " + item.title + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]" itemlist.append( Item(channel=__channel__, action="play", title=title, url=scrapedurl, fulltitle=item.fulltitle, thumbnail=item.thumbnail, show=item.show, folder=False)) streaming_3D = scrapertools.find_single_match( data, '<strong>Streaming 3D[^<]+</strong>(.*?)<table height="30">') patron = '<td><a href="([^"]+)" target="_blank">([^<]+)</a></td>' matches = re.compile(patron, re.DOTALL).findall(streaming_3D) for scrapedurl, scrapedtitle in matches: print "##### findvideos Streaming 3D ## %s ## %s ##" % (scrapedurl, scrapedtitle) title = "[COLOR pink]Streaming 3D:[/COLOR] " + item.title + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]" itemlist.append( Item(channel=__channel__, action="play", title=title, url=scrapedurl, fulltitle=item.fulltitle, thumbnail=item.thumbnail, show=item.show, folder=False)) download = scrapertools.find_single_match( data, '<strong>Download:</strong>(.*?)<table height="30">') patron = '<td><a href="([^"]+)" target="_blank">([^<]+)</a></td>' matches = re.compile(patron, re.DOTALL).findall(download) for scrapedurl, scrapedtitle in matches: print "##### findvideos Download ## %s ## %s ##" % (scrapedurl, scrapedtitle) title = "[COLOR aqua]Download:[/COLOR] " + item.title + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]" itemlist.append( Item(channel=__channel__, action="play", title=title, url=scrapedurl, fulltitle=item.fulltitle, thumbnail=item.thumbnail, show=item.show, folder=False)) download_hd = scrapertools.find_single_match( data, '<strong>Download HD[^<]+</strong>(.*?)<table width="100%" height="20">' ) patron = '<td><a href="([^"]+)" target="_blank">([^<]+)</a></td>' matches = re.compile(patron, re.DOTALL).findall(download_hd) for scrapedurl, scrapedtitle in matches: print "##### findvideos Download HD ## %s ## %s ##" % (scrapedurl, scrapedtitle) title = "[COLOR azure]Download HD:[/COLOR] " + item.title + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]" itemlist.append( Item(channel=__channel__, action="play", title=title, url=scrapedurl, fulltitle=item.fulltitle, thumbnail=item.thumbnail, show=item.show, folder=False)) if len(itemlist) == 0: itemlist = servertools.find_video_items(item=item) return itemlist
def play(item): logger.info("tvalacarta.channels.laredcl play") from servers import servertools return servertools.find_video_items(item)
def play(item): logger.info("pelisalacarta.inkapelis play") itemlist = servertools.find_video_items(data=item.url) return itemlist
def forumdetail(item): logger.info("[mcanime.py] forumdetail") itemlist = [] # Descarga la p·gina data = scrapertools.cache_page(item.url) #logger.info(data) # ------------------------------------------------------------------------------------ # Busca los enlaces a los mirrors, paginas, o capÌtulos de las series... # ------------------------------------------------------------------------------------ patronvideos = '([^"]+)" class="next">Siguiente' matches = re.compile(patronvideos, re.DOTALL).findall(data) for match in matches: logger.info("Encontrada pagina siguiente") itemlist.append( Item(channel=__channel__, action='list', title=">> Página siguiente", url=urlparse.urljoin(item.url, match).replace("&", "&"), folder=True)) # ------------------------------------------------------------------------------------ # Busca los enlaces a los videos # ------------------------------------------------------------------------------------ # Saca el cuerpo del post #logFile.info("data="+data) #patronvideos = '<div class="content">.*?<div class="poster">.*?</div>(.*?)</div>' patronvideos = '<div class="content">(.*?)<div class="content">' matches = re.compile(patronvideos, re.DOTALL).findall(data) datapost = "" if len(matches) > 0: datapost = matches[0] else: datapost = "" #logFile.info("dataPost="+dataPost) # Saca el thumbnail patronvideos = '<img src="([^"]+)"' matches = re.compile(patronvideos, re.DOTALL).findall(datapost) thumbnailurl = "" logger.info("thumbnails") for match in matches: logger.info(match) if len(matches) > 0: thumbnailurl = matches[0] patronvideos = '<img.*?>(.*?)<a' matches = re.compile(patronvideos, re.DOTALL).findall(datapost) descripcion = "" if len(matches) > 0: descripcion = matches[0] descripcion = descripcion.replace("<br />", "") descripcion = descripcion.replace("<br/>", "") descripcion = descripcion.replace("\r", "") descripcion = descripcion.replace("\n", " ") descripcion = re.sub("<[^>]+>", " ", descripcion) logger.info("descripcion=" + descripcion) itemlist.extend(servertools.find_video_items(data=datapost)) for video in itemlist: if video.folder == False: video.channel = __channel__ video.title = re.sub("<[^>]+>", "", item.title) video.action = "play" return itemlist
def findvideos(item): logger.info("[altadefinizioneclick.py] findvideos") itemlist = [] # Descarga la página data = anti_cloudflare(item.url) patron = r'<iframe id="iframeVid" width="100%" height="500px" src="([^"]+)" allowfullscreen>' url = scrapertools.find_single_match(data, patron) if 'hdpass.link' in url: data = scrapertools.cache_page(url, headers=headers) start = data.find('<ul id="mirrors">') end = data.find('</ul>', start) data = data[start:end] patron = '<form method="get" action="">\s*' patron += '<input type="hidden" name="([^"]+)" value="([^"]+)"/>\s*' patron += '<input type="hidden" name="([^"]+)" value="([^"]+)"/><input type="hidden" name="([^"]+)" value="([^"]+)"/> <input type="submit" class="[^"]*" name="([^"]+)" value="([^"]+)"/>\s*' patron += '</form>' html = [] for name1, val1, name2, val2, name3, val3, name4, val4 in re.compile( patron).findall(data): if name3 == '' and val3 == '': get_data = '%s=%s&%s=%s&%s=%s' % (name1, val1, name2, val2, name4, val4) else: get_data = '%s=%s&%s=%s&%s=%s&%s=%s' % ( name1, val1, name2, val2, name3, val3, name4, val4) tmp_data = scrapertools.cache_page( 'http://hdpass.link/film.php?randid=0&' + get_data, headers=headers) patron = r'; eval\(unescape\("(.*?)",(\[".*?;"\]),(\[".*?\])\)\);' try: [(par1, par2, par3)] = re.compile(patron, re.DOTALL).findall(tmp_data) except: patron = r'<source src="([^"]+)"\s*type="video/mp4"(?:\s*label="([^"]+)")?' for media_url, media_label in re.compile(patron).findall( tmp_data): itemlist.append( Item(server='directo', action="play", title=' - [Player]' if media_label == '' else ' - [Player @%s]' % media_label, url=media_url, folder=False)) continue par2 = eval(par2, {'__builtins__': None}, {}) par3 = eval(par3, {'__builtins__': None}, {}) tmp_data = unescape(par1, par2, par3) html.append(tmp_data.replace(r'\/', '/')) html = ''.join(html) else: html = url itemlist.extend(servertools.find_video_items(data=html)) for videoitem in itemlist: videoitem.title = "".join([item.title, videoitem.title]) videoitem.fulltitle = item.fulltitle videoitem.thumbnail = item.thumbnail videoitem.show = item.show videoitem.channel = __channel__ return itemlist
def findvideos(item): logger.info("streamondemand.mondolunatico findvideos") itemlist = [] # Descarga la página data = scrapertools.cache_page(item.url, headers=headers) # Extrae las entradas patron = r'noshade>(.*?)<br>.*?<a href="(http://mondolunatico\.org/pass/index\.php\?ID=[^"]+)"' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedtitle, scrapedurl in matches: scrapedtitle = scrapedtitle.replace('*', '').replace('Streaming', '').strip() title = '%s - [%s]' % (item.title, scrapedtitle) itemlist.append( Item(channel=__channel__, action="play", title=title, url=scrapedurl, thumbnail=item.thumbnail, fulltitle=item.fulltitle, show=item.show, server='captcha', folder=False)) ### robalo fix obfuscator - start #### if 'keeplinks.eu' in data: import time keeplinks = "http://www.keeplinks.eu/p92/" id = scrapertools.get_match(data, 'href="' + keeplinks + '([^"]+)"') _headers = [ ['Host', 'www.keeplinks.eu'], [ 'User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0' ], [ 'Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' ], ['Accept-Language', 'es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3'], [ 'Cookie', 'flag[' + id + ']=1; noadvtday=0; nopopatall=' + str(time.time()) ], ['Accept-Encoding', 'gzip, deflate'], ['Connection', 'keep-alive'] ] data = scrapertools.cache_page(keeplinks + id, headers=_headers) data = str( scrapertools.find_multiple_matches( data, '</lable><a href="([^"]+)" target="_blank"')) ### robalo fix obfuscator - end #### for videoitem in servertools.find_video_items(data=data): videoitem.title = item.title + videoitem.title videoitem.fulltitle = item.fulltitle videoitem.thumbnail = item.thumbnail videoitem.show = item.show videoitem.plot = item.plot videoitem.channel = __channel__ itemlist.append(videoitem) return itemlist
def findvideos(item): logger.info("[cinetux.py] findvideos") itemlist = [] # Busca el argumento data = scrapertools.cache_page(item.url) logger.info("data=" + data) ''' <tr class="tabletr"> <td class="episode-server" align="left"><img src="http://www.cinetux.org/imagenes/veronline.png" alt="" width="22" height="22" />Opción 01</td> <td class="episode-server-img" align="center">PutLocker</td> <td class="episode-lang" align="center">Español</td> <td align="center">DVD-SCR</td> <td class="center" align="center"><a rel="nofollow" target="_blank" class="myButtonLink" href="http://www.putlocker.com/file/BADCD9ACA395E318"></a></td> <td align="center">Anónimo</td> </tr> ''' patron = '<tr class="tabletr">[^<]+' patron += '<td class="opcion-td"><img[^>]+>([^>]+)</td>[^<]+' patron += '<td class="server-td[^>]+>([^<]+)</td>[^<]+' patron += '<td class="idioma-td[^>]+>([^>]+)</td>[^<]+' patron += '<td class="calidad-td[^<]+</td>[^<]+' patron += '<td class="fuente-td[^>]+>([^<]+)</td>[^<]+' patron += '<td class="link-td">(.*?)</td>' matches = re.compile(patron, re.DOTALL).findall(data) scrapertools.printMatches(matches) for scrapedtitle, scrapedserver, scrapedlanguage, scrapedquality, scrapedlink in matches: title = "Ver " + scrapedtitle + " en " + scrapedserver + " (" + scrapedlanguage + ") (" + scrapedquality + ")" url = scrapedlink thumbnail = item.thumbnail plot = "" itemlist.append( Item(channel=__channel__, action="play", title=title, fulltitle=item.fulltitle + " [" + scrapedlanguage + "][" + scrapedquality + "]", url=url, thumbnail=thumbnail, plot=plot, fanart= "http://pelisalacarta.mimediacenter.info/fanart/cinetux.jpg", folder=False)) patron = '<tr class="tabletr">[^<]+' patron += '<td class="opcion-td"><img[^>]+>([^>]+)</td>[^<]+' patron += '<td class="server-td[^>]+>([^<]+)</td>[^<]+' patron += '<td class="idioma-td[^>]+>([^<]+)</td>[^<]+' patron += '<td class="fuente-td[^>]+>([^<]+)</td>[^<]+' patron += '<td class="link-td">(.*?)</td>' matches = re.compile(patron, re.DOTALL).findall(data) scrapertools.printMatches(matches) for scrapedtitle, scrapedserver, scrapedlanguage, scrapedquality, scrapedlink in matches: title = "Ver " + scrapedtitle + " en " + scrapedserver + " (" + scrapedlanguage + ") (" + scrapedquality + ")" url = scrapedlink thumbnail = item.thumbnail plot = "" itemlist.append( Item(channel=__channel__, action="play", title=title, fulltitle=item.fulltitle, url=url, thumbnail=thumbnail, plot=plot, fanart= "http://pelisalacarta.mimediacenter.info/fanart/cinetux.jpg", folder=False)) patron = '<tr class="tabletr">[^<]+' patron += '<td class="episode-server[^>]+><img[^>]+>([^>]+)</td>[^<]+' patron += '<td class="episode-server-img[^>]+>([^<]+)</td>[^<]+' patron += '<td class="episode-lang[^>]+>([^>]+)</td>[^<]+' patron += '<td align="center">([^<]+)</td>[^<]+' patron += '<td(.*?)</td>' matches = re.compile(patron, re.DOTALL).findall(data) scrapertools.printMatches(matches) for scrapedtitle, scrapedserver, scrapedlanguage, scrapedquality, scrapedlink in matches: title = "Ver " + scrapedtitle + " en " + scrapedserver + " (" + scrapedlanguage + ") (" + scrapedquality + ")" url = scrapedlink thumbnail = item.thumbnail plot = "" itemlist.append( Item(channel=__channel__, action="play", title=title, fulltitle=item.fulltitle + " [" + scrapedlanguage + "][" + scrapedquality + "]", url=url, thumbnail=thumbnail, plot=plot, folder=False)) if len(itemlist) == 0: itemlist = servertools.find_video_items(data=data) i = 1 for videoitem in itemlist: videoitem.title = "Ver Opción %d en %s" % (i, videoitem.server) videoitem.fulltitle = item.fulltitle videoitem.channel = channel = __channel__ return itemlist
def findvideos(item): logger.info("[itastreaming.py] findvideos") itemlist = [] # Descarga la página data = anti_cloudflare(item.url) patron = r'<iframe width=".+?" height=".+?" src="([^"]+)" allowfullscreen frameborder="0">' url = scrapertools.find_single_match(data, patron) if 'hdpass.xyz' in url: data = scrapertools.cache_page(url, headers=headers) start = data.find('<ul id="mirrors">') end = data.find('</ul>', start) data = data[start:end] patron = '<form method="get" action="">\s*<input type="hidden" name="([^"]+)" value="([^"]+)"/>\s*<input type="hidden" name="([^"]+)" value="([^"]+)"/>\s*<input type="hidden" name="([^"]+)" value="(.*?)"/><input type="hidden" name="([^"]+)" value="([^"]+)"/> <input type="submit" class="[^"]*" name="([^"]+)" value="([^"]+)"/>\s*</form>' #patron = '<form method="get" action="">\s*' #patron += '<input type="hidden" name="([^"]+)" value="([^"]+)"/>\s*' #patron += '<input type="hidden" name="([^"]+)" value="([^"]+)"/>\s*' #patron += '(?:<input type="hidden" name="([^"]+)" value="([^"]+)"/>\s*)?' #patron += '<input type="submit" class="[^"]*" name="([^"]+)" value="([^"]+)"/>\s*' #patron += '</form>' html = [] for name1, val1, name2, val2, name3, val3, name4, val4, name5, val5 in re.compile( patron).findall(data): if name3 == '' and val3 == '': get_data = '%s=%s&%s=%s&%s=%s&%s=%s' % ( name1, val1, name2, val2, name4, val4, name5, val5) else: get_data = '%s=%s&%s=%s&%s=%s&%s=%s&%s=%s' % ( name1, val1, name2, val2, name3, val3, name4, val4, name5, val5) tmp_data = scrapertools.cache_page('http://hdpass.xyz/film.php?' + get_data, headers=headers) patron = r'; eval\(unescape\("(.*?)",(\[".*?;"\]),(\[".*?\])\)\);' try: [(par1, par2, par3)] = re.compile(patron, re.DOTALL).findall(tmp_data) except: patron = r'<input type="hidden" name="urlEmbed" data-mirror="([^"]+)" id="urlEmbed" value="([^"]+)"/>' for media_label, media_url in re.compile(patron).findall( tmp_data): media_label = scrapertools.decodeHtmlentities( media_label.replace("hosting", "hdload")) itemlist.append( Item(server=media_label, action="play", title=' - [Player]' if media_label == '' else ' - [Player @%s]' % media_label, url=media_url, folder=False)) continue par2 = eval(par2, {'__builtins__': None}, {}) par3 = eval(par3, {'__builtins__': None}, {}) tmp_data = unescape(par1, par2, par3) html.append(tmp_data.replace(r'\/', '/')) html = ''.join(html) else: html = url itemlist.extend(servertools.find_video_items(data=html)) for videoitem in itemlist: videoitem.title = "".join([item.title, videoitem.title]) videoitem.fulltitle = item.fulltitle videoitem.thumbnail = item.thumbnail videoitem.show = item.show videoitem.channel = __channel__ return itemlist
def findvideos(item): show = item.title.replace("Añadir esta serie a la biblioteca de XBMC", "") logger.info("[megaforo.py] findvideos show " + show) itemlist = [] data = scrapertools.cache_page(item.url) if 'mega-foro' in data: patronimage = '<div class="inner" id="msg_\d{1,9}".*?<img src="([^"]+)".*?mega.co.nz/\#\![A-Za-z0-9\-\_]+\![A-Za-z0-9\-\_]+' matches = re.compile(patronimage, re.DOTALL).findall(data) if len(matches) > 0: thumbnail = matches[0] thumbnail = scrapertools.htmlclean(thumbnail) thumbnail = unicode(thumbnail, "iso-8859-1", errors="replace").encode("utf-8") item.thumbnail = thumbnail patronplot = '<div class="inner" id="msg_\d{1,9}".*?<img src="[^"]+"[^/]+/>(.*?)lgf_facebook_share' matches = re.compile(patronplot, re.DOTALL).findall(data) if len(matches) > 0: plot = matches[0] title = item.title plot = re.sub(' ', '', plot) plot = re.sub('\s\s', '', plot) plot = scrapertools.htmlclean(plot) item.plot = "" from servers import servertools itemlist.extend(servertools.find_video_items(data=data)) for videoitem in itemlist: videoitem.channel = __channel__ videoitem.action = "play" videoitem.folder = False videoitem.thumbnail = item.thumbnail videoitem.plot = item.plot videoitem.title = "[" + videoitem.server + videoitem.title + " " + item.title videoitem.show = show if config.get_platform().startswith( "xbmc") or config.get_platform().startswith("boxee"): itemlist.append( Item(channel=item.channel, title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="findvideos")) return itemlist else: item.thumbnail = "" item.plot = "" from servers import servertools itemlist.extend(servertools.find_video_items(data=data)) for videoitem in itemlist: videoitem.channel = __channel__ videoitem.action = "play" videoitem.folder = False videoitem.thumbnail = item.thumbnail videoitem.plot = item.plot videoitem.title = "[" + videoitem.server + videoitem.title + " " + item.title return itemlist
# -*- coding: utf-8 -*-
def get_next_items( item ): plugintools.log("navigation.get_next_items item="+item.tostring()) try: # ---------------------------------------------------------------- # Main menu # ---------------------------------------------------------------- if item.channel=="navigation": if item.action=="mainlist": plugintools.log("navigation.get_next_items Main menu") itemlist = channelselector.getmainlist("bannermenu") elif item.channel=="channelselector": if item.action=="channeltypes": plugintools.log("navigation.get_next_items Channel types menu") itemlist = channelselector.getchanneltypes("bannermenu") elif item.action=="listchannels": plugintools.log("navigation.get_next_items Channel list menu") itemlist = channelselector.filterchannels(item.category,"bannermenu") elif item.channel=="configuracion": plugintools.open_settings_dialog() return [] else: if item.action=="": item.action="mainlist" plugintools.log("navigation.get_next_items Channel code ("+item.channel+"."+item.action+")") try: exec "import channels."+item.channel+" as channel" except: exec "import core."+item.channel+" as channel" from platformcode import xbmctools if item.action=="play": plugintools.log("navigation.get_next_items play") # Si el canal tiene una acción "play" tiene prioridad if hasattr(channel, 'play'): plugintools.log("navigation.get_next_items play Channel has its own 'play' method") itemlist = channel.play(item) if len(itemlist)>0: item = itemlist[0] # FIXME: Este error ha que tratarlo de otra manera, al dar a volver sin ver el vídeo falla try: xbmctools.play_video(channel=item.channel, server=item.server, url=item.url, category=item.category, title=item.title, thumbnail=item.thumbnail, plot=item.plot, extra=item.extra, subtitle=item.subtitle, video_password = item.password, fulltitle=item.fulltitle, Serie=item.show, hasContentDetails=item.hasContentDetails, contentTitle=item.contentTitle, contentThumbnail=item.contentThumbnail, contentPlot=item.contentPlot) except: pass else: import xbmcgui ventana_error = xbmcgui.Dialog() ok = ventana_error.ok ("plugin", "No hay nada para reproducir") else: plugintools.log("navigation.get_next_items play No channel 'play' method, executing core method") # FIXME: Este error ha que tratarlo de otra manera, por al dar a volver sin ver el vídeo falla # Mejor hacer el play desde la ventana try: xbmctools.play_video(channel=item.channel, server=item.server, url=item.url, category=item.category, title=item.title, thumbnail=item.thumbnail, plot=item.plot, extra=item.extra, subtitle=item.subtitle, video_password = item.password, fulltitle=item.fulltitle, Serie=item.show, hasContentDetails=item.hasContentDetails, contentTitle=item.contentTitle, contentThumbnail=item.contentThumbnail, contentPlot=item.contentPlot) except: pass return [] elif item.action=="findvideos": plugintools.log("navigation.get_next_items findvideos") # Si el canal tiene una acción "findvideos" tiene prioridad if hasattr(channel, 'findvideos'): plugintools.log("navigation.get_next_items play Channel has its own 'findvideos' method") itemlist = channel.findvideos(item) else: itemlist = [] if len(itemlist)==0: from servers import servertools itemlist = servertools.find_video_items(item) if len(itemlist)==0: itemlist = [ Item(title="No se han encontrado vídeos", thumbnail=os.path.join( plugintools.get_runtime_path() , "resources" , "images" , "thumb_error.png" )) ] else: if item.action=="search": tecleado = plugintools.keyboard_input() if tecleado!="": tecleado = tecleado.replace(" ", "+") itemlist = channel.search(item,tecleado) elif item.channel=="novedades" and item.action=="mainlist": itemlist = channel.mainlist(item,"bannermenu") elif item.channel=="buscador" and item.action=="mainlist": itemlist = channel.mainlist(item,"bannermenu") else: exec "itemlist = channel."+item.action+"(item)" for loaded_item in itemlist: if loaded_item.thumbnail=="": if loaded_item.folder: loaded_item.thumbnail = os.path.join( plugintools.get_runtime_path() , "resources" , "images" , "thumb_folder.png" ) else: loaded_item.thumbnail = os.path.join( plugintools.get_runtime_path() , "resources" , "images" , "thumb_nofolder.png" ) if len(itemlist)==0: itemlist = [ Item(title="No hay elementos para mostrar", thumbnail=os.path.join( plugintools.get_runtime_path() , "resources" , "images" , "thumb_error.png" )) ] except: import traceback plugintools.log("navigation.get_next_items "+traceback.format_exc()) itemlist = [ Item(title="Se ha producido un error", thumbnail=os.path.join( plugintools.get_runtime_path() , "resources" , "images" , "thumb_error.png" )) ] return itemlist
def findvideos(item): logger.info("pelisalacarta.channels.divxatope findvideos") itemlist = [] # Descarga la pagina item.url = item.url.replace("divxatope.com/descargar/", "divxatope.com/ver-online/") ''' <div class="box1"><img src='http://www.divxatope.com/uploads/images/gestores/thumbs/1411605666_nowvideo.jpg' width='33' height='33'></div> <div class="box2">nowvideo</div> <div class="box3">Español Castel</div> <div class="box4">DVD-Screene</div> <div class="box5"><a href="http://www.nowvideo.ch/video/affd21b283421" rel="nofollow" target="_blank">Ver Online</a></div> ''' # Descarga la pagina data = scrapertools.cachePage(item.url) link = scrapertools.find_single_match( data, 'href="http://tumejorserie.*?url=([^"]+)"') if link != "": #link = "http://www.divxatope.com/"+link logger.info("pelisalacarta.channels.divxatope torrent=" + link) itemlist.append( Item(channel=__channel__, action="play", server="torrent", title="Vídeo en torrent", fulltitle=item.title, url=link, thumbnail=item.thumbnail, plot=item.plot, folder=False)) patron = "<div class=\"box1\"[^<]+<img[^<]+</div[^<]+" patron += '<div class="box2">([^<]+)</div[^<]+' patron += '<div class="box3">([^<]+)</div[^<]+' patron += '<div class="box4">([^<]+)</div[^<]+' patron += '<div class="box5">(.*?)</div[^<]+' patron += '<div class="box6">([^<]+)<' matches = re.compile(patron, re.DOTALL).findall(data) scrapertools.printMatches(matches) itemlist_ver = [] itemlist_descargar = [] for servername, idioma, calidad, scrapedurl, comentarios in matches: title = "Mirror en " + servername + " (" + calidad + ")" + " (" + idioma + ")" if comentarios.strip() != "": title = title + " (" + comentarios.strip() + ")" url = urlparse.urljoin(item.url, scrapedurl) thumbnail = "" plot = "" if (DEBUG): logger.info("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") new_item = Item(channel=__channel__, action="extract_url", title=title, fulltitle=title, url=url, thumbnail=thumbnail, plot=plot, folder=True) if comentarios.startswith("Ver en"): itemlist_ver.append(new_item) else: itemlist_descargar.append(new_item) for new_item in itemlist_ver: itemlist.append(new_item) for new_item in itemlist_descargar: itemlist.append(new_item) if len(itemlist) == 0: itemlist = servertools.find_video_items(data=data) for videoitem in itemlist: videoitem.title = "Enlace encontrado en " + videoitem.server + " (" + scrapertools.get_filename_from_url( videoitem.url) + ")" videoitem.fulltitle = item.fulltitle videoitem.thumbnail = item.thumbnail videoitem.channel = __channel__ return itemlist