def episodios(item): logger.info("url: {0}".format(item.url)) data = httptools.downloadpage(item.url).data episodes = re.findall('visco.*?href="(?P<url>[^"]+).+?nbsp; (?P<title>.*?)</a>.+?ucapaudio.?>(?P<langs>.*?)</div>', data, re.MULTILINE | re.DOTALL) itemlist = [] for url, title, langs in episodes: languages = " ".join(["[{0}]".format(IDIOMAS.get(lang, lang)) for lang in re.findall('images/s-([^\.]+)', langs)]) itemlist.append(item.clone(action = "findvideos", title = "{0} {1} {2}".format(item.title, title, languages), url = urlparse.urljoin(HOST, url), language = languages, list_idiomas=list_idiomas, list_calidad=CALIDADES, context=filtertools.context )) if len(itemlist) > 0 and filtertools.context: itemlist = filtertools.get_links(itemlist, item.channel) # Opción "Añadir esta serie a la biblioteca de XBMC" if config.get_library_support() and len(itemlist) > 0: itemlist.append(item.clone(title="Añadir esta serie a la biblioteca", action="add_serie_to_library", extra="episodios")) return itemlist
def temporadas(item): logger.debug("pelisalacarta.channels.metaserie temporadas") itemlist = [] templist = [] data = scrapertools.cache_page(item.url) patron = '<li class=".*?="([^"]+)".*?>([^<]+)</a>' matches = re.compile(patron,re.DOTALL).findall(data) for scrapedurl,scrapedtitle in matches: url = urlparse.urljoin(item.url,scrapedurl) title = scrapedtitle title = title.replace("&","x"); thumbnail = item.thumbnail plot = item.plot fanart = scrapertools.find_single_match(data,'<img src="([^"]+)"/>.*?</a>') if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"])") itemlist.append( Item(channel=item.channel, action="episodios" , title=title ,fulltitle = item.title, url=url, thumbnail=thumbnail, plot=plot, fanart = fanart, contentSerieName=item.contentSerieName)) if item.extra == 'temporadas': for tempitem in itemlist: templist += episodios(tempitem) if config.get_library_support() and len(itemlist) > 0: itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la biblioteca[/COLOR]', url=item.url, action="add_serie_to_library", extra="temporadas", contentSerieName=item.contentSerieName)) if item.extra == 'temporadas': return templist else: return itemlist
def temporadas(item): logger.debug("pelisalacarta.channels.mundoflv temporadas") itemlist = [] templist = [] data = scrapertools.cache_page(item.url) realplot = '' patron = "<button class='classnamer' onclick='javascript: mostrarcapitulos.*?blank'>([^<]+)</button>" matches = re.compile(patron,re.DOTALL).findall(data) serieid = scrapertools.find_single_match(data,"<link rel='shortlink' href='http:\/\/mundoflv.com\/\?p=([^']+)' \/>") item.thumbnail = item.thumbvid for scrapedtitle in matches: url = 'http://mundoflv.com/wp-content/themes/wpRafael/includes/capitulos.php?serie='+serieid+'&sr=&temporada=' + scrapedtitle title = 'Temporada '+ scrapertools.decodeHtmlentities(scrapedtitle) thumbnail = item.thumbnail realplot = scrapertools.find_single_match(data, '\/><\/a>([^*]+)<p><\/p>.*') plot = scrapertools.remove_htmltags(realplot) fanart = ''#scrapertools.find_single_match(data,'<img src="([^"]+)"/>.*?</a>') if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"])") itemlist.append( Item(channel=item.channel, action="episodios" , title=title , fulltitle=item.title, url=url, thumbnail=thumbnail, plot=plot, fanart = fanart, extra1=item.extra1, contentSerieName=item.contentSerieName)) if item.extra=='temporadas': for tempitem in itemlist: templist += episodios(tempitem) if config.get_library_support() and len(itemlist) > 0: itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la biblioteca[/COLOR]', url=item.url, action="add_serie_to_library", extra="temporadas", contentSerieName=item.contentSerieName, extra1 = item.extra1)) if item.extra=='temporadas': return templist else: return itemlist
def episodi(item): logger.info("[Majintoon.py]==> episodi") itemlist = [] data = httptools.downloadpage(item.url).data patron = r'<a href="([^"]+)" target="_blank"(?:\s*rel="[^"]+"|)>([^<]+)</a>' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle in matches: if 'wikipedia' not in scrapedurl: scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).replace("×", "x") itemlist.append( Item(channel=__channel__, action="findvideos", contentType="tv", title=scrapedtitle, fulltitle=scrapedtitle, url=scrapedurl, extra="tv", show=item.show, thumbnail=item.thumbnail, folder=True)) if config.get_library_support() and len(itemlist) != 0: itemlist.append( Item(channel=__channel__, title="Aggiungi alla libreria (%s)" % color("Solo Serie TV", "red"), url=item.url, action="add_serie_to_library", extra="episodi", show=item.show)) return itemlist
def findvideos(item): servidor = {"http://uptobox.com/":"uptobox","http://userscloud.com/":"userscloud","https://my.pcloud.com/publink/show?code=":"pcloud","http://thevideos.tv/":"thevideos","http://ul.to/":"uploadedto","http://turbobit.net/":"turbobit","http://www.cinecalidad.com/protect/v.html?i=":"cinecalidad","http://www.mediafire.com/download/":"mediafire","https://www.youtube.com/watch?v=":"youtube","http://thevideos.tv/embed-":"thevideos","//www.youtube.com/embed/":"youtube","http://ok.ru/video/":"okru","http://ok.ru/videoembed/":"okru","http://www.cinemaqualidade.com/protect/v.html?i=":"cinemaqualidade.com","http://usersfiles.com/":"usersfiles","https://depositfiles.com/files/":"depositfiles","http://www.nowvideo.sx/video/":"nowvideo","http://vidbull.com/":"vidbull"} logger.info("pelisalacarta.channels.cinecalidad links") itemlist=[] data = scrapertools.cache_page(item.url) # {h=dec("111 123 123 119 65 54 54 124 119 123 118 105 118 127 53 106 118 116 54")+dec("114 114 110 115 110 55 121 117 64 120 120 115");} patron = 'dec\("([^"]+)"\)\+dec\("([^"]+)"\)' matches = re.compile(patron,re.DOTALL).findall(data) recomendados = ["uptobox","thevideos","nowvideo","pcloud"] for scrapedurl,scrapedtitle in matches: if dec(scrapedurl) in servidor: url = dec(scrapedurl)+dec(scrapedtitle) title = "Ver "+item.contentTitle+" en "+servidor[dec(scrapedurl)].upper() if (servidor[dec(scrapedurl)]) in recomendados: title=title+"[COLOR limegreen] [I] (Recomedado) [/I] [/COLOR]" # if (servidor[dec(scrapedurl)])=='pcloud': # thumbnail='https://pbs.twimg.com/profile_images/687592526694473728/bCQCZC7b.png' # else: thumbnail = servertools.guess_server_thumbnail(servidor[dec(scrapedurl)]) plot = "" if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"])") itemlist.append( Item(channel=item.channel, action="play" , title=title ,fulltitle = item.title, url=url, thumbnail=thumbnail, plot=plot,extra=item.thumbnail, server=servidor[dec(scrapedurl)])) if config.get_library_support() and len(itemlist) > 0 and item.extra !='findvideos' : itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la biblioteca[/COLOR]', url=item.url, action="add_pelicula_to_library", extra="findvideos", contentTitle = item.contentTitle)) return itemlist
def episodios(item): logger.info("[solostreaming.py] episodios") itemlist = [] # Descarga la página hosturi = "%s/sod/api.php?get=%s&type=episodi&uri=%s" % (host, item.url.split("||")[1], item.url.split("||")[0]) data = cache_jsonpage(hosturi) for singledata in data: type = normalize_unicode(singledata["type"]) if item.extra == "serietv": titolo = scrapertools.decodeHtmlentities(normalize_unicode(singledata["ep_title"])).strip() ep_num = normalize_unicode(singledata["ep_num"]) frm_title = "[COLOR white](%s) [B][COLOR deepskyblue]- %s %s[/COLOR][/B]" % (type.upper(), ep_num, titolo) else: e_num = normalize_unicode(singledata["e_num"]) s_num = normalize_unicode(singledata["s_num"]) frm_title = "[COLOR white](%s) [B][COLOR deepskyblue]- %sx%s[/COLOR][/B]" % (type.upper(), s_num, e_num) links = " ".join(singledata["links"]) itemlist.append( Item( channel=__channel__, action="findvid_serie", title=frm_title, url=item.url, thumbnail=item.thumbnail, extra=links, fulltitle=item.fulltitle, show=item.show, ) ) if config.get_library_support() and len(itemlist) != 0: itemlist.append( Item( channel=__channel__, title=item.title, url=item.url, action="add_serie_to_library", extra="episodios", show=item.show, ) ) itemlist.append( Item( channel=item.channel, title="Scarica tutti gli episodi della serie", url=item.url, action="download_all_episodes", extra="episodios", show=item.show, ) ) return itemlist
def temporadas(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data url_base= item.url patron = '<a href="javascript:.*?;" class="lccn"><b>([^<]+)<\/b><\/a>' matches = re.compile(patron,re.DOTALL).findall(data) infoLabels=item.infoLabels temp=1 if matches: for scrapedtitle in matches: url = url_base title = scrapedtitle thumbnail = item.thumbnail plot = item.plot contentSeasonNumber=str(temp) infoLabels['season']=contentSeasonNumber fanart = scrapertools.find_single_match(data,'<img src="([^"]+)"/>.*?</a>') itemlist.append( Item(channel=item.channel, action="episodiosxtemp" , title=title , fulltitle=item.title, url=url, thumbnail=thumbnail, plot=plot, fanart = fanart, contentSeasonNumber=contentSeasonNumber, contentSerieName =item.contentSerieName, infoLabels=infoLabels)) temp = temp+1 if config.get_library_support() and len(itemlist) > 0: itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la biblioteca[/COLOR]', url=item.url, action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName )) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True) return itemlist else: item.title ='' item.modo = 'unico' return episodiosxtemp(item)
def episodios(item): logger.info( "[seriehd.py] episodios" ) itemlist = [] data = scrapertools.cache_page( item.url ) seasons_data = scrapertools.get_match( data, '<select name="stagione" id="selSt">(.*?)</select>' ) seasons = re.compile( 'data-stagione="(\d+)"', re.DOTALL ).findall( seasons_data ) for scrapedseason in seasons: episodes_data = scrapertools.get_match( data, '<div class="list[^"]+" data-stagione="' + scrapedseason + '">(.*?)</div>' ) episodes = re.compile( 'data-id="(\d+)"', re.DOTALL ).findall( episodes_data ) for scrapedepisode in episodes: season = str ( int( scrapedseason ) + 1 ) episode = str ( int( scrapedepisode ) + 1 ) if len( episode ) == 1: episode = "0" + episode title = season + "x" + episode ## Le pasamos a 'findvideos' la url con dos partes divididas por el caracter "?" ## [host+path]?[argumentos]?[Referer] url = item.url + "?st_num=" + scrapedseason + "&pt_num=" + scrapedepisode + "?" + item.url itemlist.append( Item( channel=__channel__, action="findvideos", title=title, url=url, fulltitle=item.fulltitle, show=item.show, thumbnail=item.thumbnail) ) if config.get_library_support(): itemlist.append( Item(channel=__channel__, title=item.title, url=item.url, action="add_serie_to_library", extra="episodios", show=item.show) ) itemlist.append( Item(channel=item.channel, title="Scarica tutti gli episodi della serie", url=item.url, action="download_all_episodes", extra="episodios", show=item.show) ) return itemlist
def episodios(item): logger.info() itemlist = [] html_serie = get_url_contents(item.url) info_serie = __extract_info_from_serie(html_serie) plot = info_serie[3] if info_serie else '' episodes = re.findall(REGEX_EPISODE, html_serie, re.DOTALL) es_pelicula = False for url, title, date in episodes: episode = scrapertools.find_single_match(title, r'Episodio (\d+)') # El enlace pertenece a un episodio if episode: season = 1 episode = int(episode) season, episode = renumbertools.numbered_for_tratk( item.channel, item.show, season, episode) title = "{0}x{1:02d} {2} ({3})".format( season, episode, "Episodio " + str(episode), date) # El enlace pertenece a una pelicula else: title = "{0} ({1})".format(title, date) item.url = url es_pelicula = True logger.debug("title=[{0}], url=[{1}], thumbnail=[{2}]".format( title, url, item.thumbnail)) itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=item.thumbnail, plot=plot, show=item.show, fulltitle="{0} {1}".format(item.show, title), viewmode="movies_with_plot", folder=True)) # El sistema soporta la biblioteca y se encontro por lo menos un episodio # o pelicula if config.get_library_support() and len(itemlist) > 0: if es_pelicula: item_title = "Añadir película a la biblioteca" item_action = "add_pelicula_to_library" item_extra = "" else: item_title = "Añadir serie a la biblioteca" item_action = "add_serie_to_library" item_extra = "episodios" itemlist.append(Item(channel=item.channel, title=item_title, url=item.url, action=item_action, extra=item_extra, show=item.show)) if not es_pelicula: itemlist.append(Item(channel=item.channel, title="Descargar todos los episodios", url=item.url, action="download_all_episodes", extra="episodios", show=item.show)) return itemlist
def episodios(item): logger.info("streamondemand.channels.filmstreampw episodios") itemlist = [] data = scrapertools.cache_page(item.url, headers=headers) post_url = site + 'engine/ajax/a.sseries.php' serie_id = scrapertools.get_match( data, '\?id=(\d+)" rel="nofollow"' ) start = data.find('<select id="sseriesSeason">') end = data.find('</select>', start) for season_id, season in re.compile('<option value="([^"]+)">([^<]+)</option>').findall(data[start:end]): post_data = 'news_id=%s&season=%s' % (serie_id, season_id) json = scrapertools.cache_page(post_url, post=post_data, headers=headers) for episode_id, episode in re.compile(r'<option value=\\"(\d+)\\">([^<]+)<\\/option>').findall(json): title = season + ' | ' + episode.replace('Serie', 'Episodio') url = '%s?news_id=%s&series=%s?%s' % (post_url, serie_id, episode_id, item.url) itemlist.append( Item( channel=__channel__, action="findvid_serie", title=title.strip(), url=url, fulltitle=item.fulltitle, show=item.show, thumbnail=item.thumbnail ) ) if config.get_library_support() and len(itemlist) != 0: itemlist.append( Item(channel=__channel__, title=item.title, url=item.url, action="add_serie_to_library", extra="episodios", show=item.show) ) itemlist.append( Item(channel=__channel__, title="Scarica tutti gli episodi della serie", url=item.url, action="download_all_episodes", extra="episodios", show=item.show) ) return itemlist
def findvideos(item): servidor = {"http://uptobox.com/":"uptobox","http://userscloud.com/":"userscloud","https://my.pcloud.com/publink/show?code=":"pcloud","http://thevideos.tv/":"thevideos","http://ul.to/":"uploadedto","http://turbobit.net/":"turbobit","http://www.cinecalidad.com/protect/v.html?i=":"cinecalidad","http://www.mediafire.com/download/":"mediafire","https://www.youtube.com/watch?v=":"youtube","http://thevideos.tv/embed-":"thevideos","//www.youtube.com/embed/":"youtube","http://ok.ru/video/":"okru","http://ok.ru/videoembed/":"okru","http://www.cinemaqualidade.com/protect/v.html?i=":"cinemaqualidade.com","http://usersfiles.com/":"usersfiles","https://depositfiles.com/files/":"depositfiles","http://www.nowvideo.sx/video/":"nowvideo","http://vidbull.com/":"vidbull","http://filescdn.com/":"filescdn","https://www.yourupload.com/watch/":"yourupload"} logger.info() itemlist=[] duplicados=[] data = httptools.downloadpage(item.url).data patron = 'dec\("([^"]+)"\)\+dec\("([^"]+)"\)' matches = re.compile(patron,re.DOTALL).findall(data) recomendados = ["uptobox","thevideos","nowvideo","pcloud"] for scrapedurl,scrapedtitle in matches: if dec(scrapedurl) in servidor: title = "Ver "+item.contentTitle+" en "+servidor[dec(scrapedurl)].upper() if 'yourupload' in dec(scrapedurl): url = dec(scrapedurl).replace('watch','embed')+dec(scrapedtitle) else: if 'youtube' in dec(scrapedurl): title='[COLOR orange]Trailer en Youtube[/COLOR]' url = dec(scrapedurl)+dec(scrapedtitle) if (servidor[dec(scrapedurl)]) in recomendados: title=title+"[COLOR limegreen] [I] (Recomedado) [/I] [/COLOR]" thumbnail = servertools.guess_server_thumbnail(servidor[dec(scrapedurl)]) plot = "" if title not in duplicados: itemlist.append( Item(channel=item.channel, action="play" , title=title ,fulltitle = item.title, url=url, thumbnail=thumbnail, plot=plot,extra=item.thumbnail, server=servidor[dec(scrapedurl)])) duplicados.append(title) if config.get_library_support() and len(itemlist) > 0 and item.extra !='findvideos' : itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la biblioteca[/COLOR]', url=item.url, action="add_pelicula_to_library", extra="findvideos", contentTitle = item.contentTitle)) return itemlist
def episodios(item): logger.info("{0} - {1}".format(item.title, item.url)) itemlist = [] # Descarga la página data = scrapertools.cache_page(item.url) fanart = scrapertools.find_single_match(data, "background-image[^'\"]+['\"]([^'\"]+)") plot = scrapertools.find_single_match(data, "id=['\"]profile2['\"]>\s*(.*?)\s*</div>") logger.debug("fanart: {0}".format(fanart)) logger.debug("plot: {0}".format(plot)) episodes = re.findall("<tr.*?href=['\"](?P<url>[^'\"]+).+?>(?P<title>.+?)</a>.*?<td>(?P<flags>.*?)</td>", data, re.MULTILINE | re.DOTALL) for url, title, flags in episodes: idiomas = " ".join(["[{0}]".format(IDIOMAS.get(language, "OVOS")) for language in re.findall("banderas/([^\.]+)", flags, re.MULTILINE)]) displayTitle = "{show} - {title} {languages}".format(show = item.show, title = title, languages = idiomas) logger.debug("Episode found {0}: {1}".format(displayTitle, urlparse.urljoin(HOST, url))) itemlist.append(item.clone(title=displayTitle, url=urlparse.urljoin(HOST, url), action="findvideos", plot=plot, fanart=fanart, language=idiomas, list_idiomas=list_idiomas, list_calidad=CALIDADES, context=filtertools.context)) if len(itemlist) > 0 and filtertools.context: itemlist = filtertools.get_links(itemlist, item.channel) if config.get_library_support() and len(itemlist) > 0: itemlist.append(item.clone(title="Añadir esta serie a la biblioteca", action="add_serie_to_library", extra="episodios")) return itemlist
def episodios(item): logger.info("pelisalacarta.channels.descargasmix episodios") itemlist = [] fanart = item.fanart thumbnail = item.thumbnail if item.category == "": try: from core.tmdb import Tmdb otmdb= Tmdb(texto_buscado=item.fulltitle, tipo="tv") except: pass data = scrapertools.cachePage(item.url) patron = '(<ul class="menu" id="seasons-list">.*?<div class="section-box related-posts">)' bloque = scrapertools.find_single_match(data, patron) patron = '<strong>(.*?)</strong>' matches = scrapertools.find_multiple_matches(bloque, patron) for scrapedtitle in matches: if item.category == "": try: item.plot, fanart, thumbnail = infoepi(otmdb, scrapedtitle) except: pass scrapedtitle = item.fulltitle+" "+scrapedtitle.strip() itemlist.append( Item(channel=__channel__, action="epienlaces", title=scrapedtitle, fulltitle = item.fulltitle, url=item.url , thumbnail=thumbnail , fanart=fanart, plot=str(item.plot), context = "2", contentTitle=item.fulltitle, show=item.fulltitle, folder=True )) itemlist.sort(key=lambda item: item.title, reverse=True) if config.get_library_support(): itemlist.append( Item(channel=__channel__, title="[COLOR green]Añadir esta serie a la biblioteca[/COLOR]", url=item.url, action="add_serie_to_library", extra="episodios", fulltitle=item.fulltitle, show=item.fulltitle)) return itemlist
def episodios(item): logger.info("pelisalacarta.channels.reyanime episodios") itemlist = [] # Descarga la pagina data = scrapertools.cache_page(item.url) data = scrapertools.find_single_match(data,'<div id="box-cap"(.*?)</div>') # <a title="active-raid-kidou-kyoushuushitsu-dai-hakkei-12" href="/active-raid-kidou-kyoushuushitsu-dai-hakkei-12/"><b>12</b> patron = 'href="([^"]+).*?<b>([^<]+)' matches = re.compile(patron,re.DOTALL).findall(data) for scrapedurl,scrapedtitle in matches: title = scrapedtitle.strip() try: if len(title)==1: title = "1x0"+title else: title = "1x"+title except: pass url = urlparse.urljoin(item.url,scrapedurl) thumbnail = item.thumbnail plot = item.plot if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]") itemlist.append( Item(channel=item.channel, action="findvideos", title=title , url=url , thumbnail=thumbnail , plot=plot , show=item.show, fulltitle=item.show+" "+title, fanart=thumbnail, folder=True) ) if config.get_library_support(): itemlist.append( Item(channel=item.channel, title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show) ) itemlist.append( Item(channel=item.channel, title="Descargar todos los episodios de la serie", url=item.url, action="download_all_episodes", extra="episodios", show=item.show) ) return itemlist
def episodiosearch(item): logger.info("streamondemand.channels.serietvsubita episodios") itemlist = [] data = scrapertools.cache_page(item.url) patron = '<div class="post-meta">.*?<a href="([^"]+)" title="([^"]+)".*?<img.*?src="([^"]+)"' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle, scrapedthumbnail in matches: scrapedplot = "" scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle) if (DEBUG): logger.info( "title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") itemlist.append( Item(channel=__channel__, action="findvideos", fulltitle=scrapedtitle, show=item.show, title="[COLOR azure]" + scrapedtitle + "[/COLOR]", url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True)) if config.get_library_support() and len(itemlist) != 0: itemlist.append( Item(channel=__channel__, title="Aggiungi alla libreria", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show)) return itemlist
def temporadas(item): logger.info() itemlist = [] templist = [] data = httptools.downloadpage(item.url).data patron = 'class="listatemporadas" ><a href="([^"]+)" title=".*?" ><img src="([^"]+)" width="80" height="100" title=".*?alt=".*?<h3>([^<]+)<' matches = re.compile(patron,re.DOTALL).findall(data) for scrapedurl, scrapedthumbnail, scrapedtitle in matches: url = host+scrapedurl title = scrapedtitle thumbnail = scrapedthumbnail plot = '' fanart = '' contentSeasonNumber = scrapedtitle.replace('Temporada ','') itemlist.append( Item(channel=item.channel, action="episodiosxtemp" , title=title , fulltitle=item.title, url=url, thumbnail=thumbnail, plot=plot, fanart = fanart, contentSerieName = item.contentSerieName, contentSeasonNumber = contentSeasonNumber)) if item.extra == 'temporadas': for tempitem in itemlist: templist += episodiosxtemp(tempitem) if config.get_library_support() and len(itemlist) > 0: itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la biblioteca[/COLOR]', url=item.url, action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName)) return itemlist
def episodios(item): logger.info("streamondemand.channels.guardaserie episodios") itemlist = [] data = scrapertools.cache_page( item.url ) serie_id = scrapertools.get_match( data, '/?id=(\d+)" rel="nofollow"' ) data = scrapertools.get_match( data, '<div id="episode">(.*?)</div>' ) seasons_episodes = re.compile( '<select name="episode" id="(\d+)">(.*?)</select>', re.DOTALL ).findall( data ) for scrapedseason, scrapedepisodes in seasons_episodes: episodes = re.compile( '<option value="(\d+)"', re.DOTALL ).findall( scrapedepisodes ) for scrapedepisode in episodes: season = str ( int( scrapedseason ) + 1 ) episode = str ( int( scrapedepisode ) + 1 ) if len( episode ) == 1: episode = "0" + episode title = season + "x" + episode + " - " + item.title ## Le pasamos a 'findvideos' la url con tres partes divididas por el caracter "?" ## [host+path]?[argumentos]?[Referer] url = host + "/wp-admin/admin-ajax.php?action=get_episode&id=" + serie_id + "&season=" + scrapedseason + "&episode=" + scrapedepisode + "?" + item.url itemlist.append( Item( channel=__channel__, action="findvideos", title= title, url=url, fulltitle=item.title, show=item.title, thumbnail=item.thumbnail ) ) if config.get_library_support(): itemlist.append( Item(channel=__channel__, title="[COLOR azure]Aggiungi [/COLOR]" + item.title + "[COLOR azure] alla libreria di Kodi[/COLOR]", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show) ) itemlist.append( Item(channel=__channel__, title="[COLOR azure]Scarica tutti gli episodi della serie[/COLOR]", url=item.url, action="download_all_episodes", extra="episodios", show=item.show) ) return itemlist
def temporadas(item): logger.info() itemlist = [] templist = [] data = httptools.downloadpage(item.url).data realplot = '' patron = "<button class='classnamer' onclick='javascript: mostrarcapitulos.*?blank'>([^<]+)</button>" matches = re.compile(patron,re.DOTALL).findall(data) serieid = scrapertools.find_single_match(data,"<link rel='shortlink' href='http:\/\/mundoflv.com\/\?p=([^']+)' \/>") item.thumbnail = item.thumbvid infoLabels=item.infoLabels for scrapedtitle in matches: url = 'http://mundoflv.com/wp-content/themes/wpRafael/includes/capitulos.php?serie='+serieid+'&temporada=' + scrapedtitle title = 'Temporada '+ scrapertools.decodeHtmlentities(scrapedtitle) contentSeasonNumber = scrapedtitle thumbnail = item.thumbnail realplot = scrapertools.find_single_match(data, '\/><\/a>([^*]+)<p><\/p>.*') plot ='' fanart = '' itemlist.append( Item(channel=item.channel, action="episodiosxtemp" , title=title , fulltitle=item.title, url=url, thumbnail=thumbnail, plot=plot, fanart = fanart, extra1=item.extra1, contentSerieName=item.contentSerieName, contentSeasonNumber = contentSeasonNumber, infoLabels = {'season':contentSeasonNumber})) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True) if config.get_library_support() and len(itemlist) > 0: itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la biblioteca[/COLOR]', url=item.url, action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName, extra1 = item.extra1)) return itemlist
def temporadas(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data) #logger.debug(data) #return patron = '<span class=se-t.*?>(.*?)<\/span>' matches = re.compile(patron,re.DOTALL).findall(data) infoLabels = item.infoLabels for scrapedtitle in matches: contentSeasonNumber = scrapedtitle.strip('') title = item.contentSerieName+' Temporada '+scrapedtitle thumbnail = item.thumbnail plot = item.plot fanart = item.fanart infoLabels['season']=contentSeasonNumber itemlist.append( Item(channel=item.channel, action= 'episodiosxtemp' , url= item.url, title=title ,contentSerieName = item.contentSerieName, thumbnail=thumbnail, plot=plot, fanart = fanart, contentSeasonNumber = contentSeasonNumber, infoLabels=item.infoLabels)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb= True) if config.get_library_support() and len(itemlist) > 0: itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la biblioteca[/COLOR]', url=item.url, action="add_serie_to_library", extra="episodiosxtemp", contentSerieName = item.contentSerieName, contentYear=item.contentYear, extra1='library')) return itemlist
def temporadas(item): logger.info("pelisalacarta.channels.pelisplus temporadas") itemlist = [] templist =[] data = scrapertools.cache_page(item.url) patron = '<span class="ico accordion_down"><\/span>Temporada([^<]+)' matches = re.compile(patron,re.DOTALL).findall(data) for scrapedtitle in matches: url = item.url title = 'Temporada '+scrapedtitle thumbnail = scrapertools.find_single_match(data,'<img src="([^"]+)" alt="" class="picture-movie">') plot = scrapertools.find_single_match(data,'<span>Sinopsis:<\/span>.([^<]+).<span class="text-detail-hide"><\/span>') fanart = scrapertools.find_single_match(data,'<img src="([^"]+)"/>.*?</a>') if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"])") itemlist.append( Item(channel=item.channel, action="episodios" , title=title , fulltitle=item.title, url=url, thumbnail=thumbnail, plot=plot, fanart = fanart, extra=scrapedtitle.rstrip('\n'), contentSerieName =item.contentSerieName)) if item.extra == 'temporadas': for tempitem in itemlist: templist += episodios(tempitem) if config.get_library_support() and len(itemlist) > 0: itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la biblioteca[/COLOR]', url=item.url, action="add_serie_to_library", extra="temporadas", contentSerieName=item.contentSerieName)) if item.extra == 'temporadas': return templist else: return itemlist
def findvideos(item): logger.info("pelisalacarta.channels.tvvip findvideos") itemlist = [] # En caso de llamarse a la función desde una serie de la biblioteca if item.extra.startswith("http"): item.url = item.extra data = scrapertools.anti_cloudflare(item.url, host=host, headers=headers) data = jsontools.load_json(data) id = urllib.quote(data['id']) for child in data["profiles"].keys(): videopath = urllib.quote(data["profiles"][child]['videoUri']) for i in range(0, len(data["profiles"][child]['servers'])): url = data["profiles"][child]['servers'][i]['url'] + videopath size = " " + data["profiles"][child]["sizeHuman"] resolution = " [" + (data["profiles"][child]['videoResolution']) + "]" title = "Ver vídeo en " + resolution.replace('1920x1080', 'HD-1080p') if i == 0: title += size + " [COLOR purple]Mirror " + str(i + 1) + "[/COLOR]" else: title += size + " [COLOR green]Mirror " + str(i + 1) + "[/COLOR]" # Para poner enlaces de mayor calidad al comienzo de la lista if data["profiles"][child]["profileId"] == "default": itemlist.insert(i, item.clone(action="play", server="directo", title=bbcode_kodi2html(title), url=url, contentTitle=item.fulltitle, viewmode="list", extra=id, folder=False)) else: itemlist.append(item.clone(action="play", server="directo", title=bbcode_kodi2html(title), url=url, contentTitle=item.fulltitle, viewmode="list", extra=id, folder=False)) if len(itemlist) > 0 and item.category != "Cine" and item.category != "" and item.category != "Series": if config.get_library_support(): itemlist.append(Item(channel=item.channel, title="Añadir enlaces a la biblioteca", text_color="green", contentTitle=item.fulltitle, url=item.url, action="add_pelicula_to_library", infoLabels={'title':item.fulltitle}, fulltitle=item.fulltitle)) return itemlist
def findvideos(item): logger.info("pelisalacarta.channels.tvvip findvideos") itemlist = [] # En caso de llamarse a la función desde una serie de la biblioteca if item.title.startswith("http"): item.url = item.title.split('%')[0] data = anti_cloudflare(item.url) data = jsontools.load_json(data) for child in data["profiles"].keys(): videopath = data["profiles"][child]['videoPath'] extension = videopath[-4:] head = header_string + get_cookie_value(extension) for i in range(0, len(data["profiles"][child]['servers'])): url = data["profiles"][child]['servers'][i]['url'] + videopath + head size = " "+data["profiles"][child]["sizeHuman"] resolution = " ["+(data["profiles"][child]['videoResolution'])+"]" if i == 0: title = "Ver vídeo en " + resolution.replace('1920x1080','HD-1080p') + size + " [COLOR purple]Mirror "+str(i+1)+"[/COLOR]" else: title = "Ver vídeo en " + resolution.replace('1920x1080','HD-1080p') + size + " [COLOR green]Mirror "+str(i+1)+"[/COLOR]" # Para poner enlaces de mayor calidad al comienzo de la lista if data["profiles"][child]["profileId"] == "default": itemlist.insert(i, Item(channel=__channel__, action='play', server='directo', title=title , url=url , thumbnail=item.thumbnail, fanart=item.fanart, fulltitle=item.fulltitle, plot=item.plot, folder=False) ) else: itemlist.append( Item(channel=__channel__, action='play', server='directo', title=title , url=url , thumbnail=item.thumbnail, fanart=item.fanart, fulltitle=item.fulltitle, plot=item.plot, folder=False) ) if len(itemlist) > 0 and item.category == "tvvip": if config.get_library_support(): itemlist.append( Item(channel=__channel__, title="[COLOR green]Añadir enlaces a la biblioteca[/COLOR]", url=item.url, action="add_pelicula_to_library", fulltitle=item.fulltitle)) return itemlist
def episodios(item): logger.info("pelisalacarta.channels.pordede episodios") itemlist = [] headers = DEFAULT_HEADERS[:] # Descarga la pagina idserie = '' data = scrapertools.cache_page(item.url, headers=headers) if (DEBUG): logger.info("data="+data) patrontemporada = '<div class="checkSeason"[^>]+>([^<]+)<div class="right" onclick="controller.checkSeason(.*?)\s+</div></div>' matchestemporadas = re.compile(patrontemporada,re.DOTALL).findall(data) idserie = scrapertools.find_single_match(data,'<div id="layout4" class="itemProfile modelContainer" data-model="serie" data-id="(\d+)"') for nombre_temporada,bloque_episodios in matchestemporadas: if (DEBUG): logger.info("nombre_temporada="+nombre_temporada) if (DEBUG): logger.info("bloque_episodios="+bloque_episodios) # Extrae los episodios patron = '<span class="title defaultPopup" href="([^"]+)"><span class="number">([^<]+)</span>([^<]+)</span>(\s*</div>\s*<span[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></span><div[^>]*><button[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></button><div class="action([^"]*)" data-action="seen">)?' matches = re.compile(patron,re.DOTALL).findall(bloque_episodios) for scrapedurl,numero,scrapedtitle,info,visto in matches: #visto_string = "[visto] " if visto.strip()=="active" else "" if visto.strip()=="active": visto_string = "[visto] " else: visto_string = "" title = visto_string+nombre_temporada.replace("Temporada ", "").replace("Extras", "Extras 0")+"x"+numero+" "+scrapertools.htmlclean(scrapedtitle) thumbnail = item.thumbnail fanart= item.fanart plot = "" #http://www.pordede.com/peli/the-lego-movie #http://www.pordede.com/links/view/slug/the-lego-movie/what/peli?popup=1 #http://www.pordede.com/links/viewepisode/id/475011?popup=1 epid = scrapertools.find_single_match(scrapedurl,"id/(\d+)") url = "http://www.pordede.com/links/viewepisode/id/"+epid itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, fulltitle=title, fanart= fanart, show=item.show)) if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]") if config.get_library_support(): # con año y valoracion la serie no se puede actualizar correctamente, si ademas cambia la valoracion, creara otra carpeta # Sin año y sin valoración: show = re.sub(r"\s\(\d+\)\s\(\d+\.\d+\)", "", item.show) # Sin año: #show = re.sub(r"\s\(\d+\)", "", item.show) # Sin valoración: #show = re.sub(r"\s\(\d+\.\d+\)", "", item.show) itemlist.append( Item(channel='pordede', title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="episodios###", show=show) ) itemlist.append( Item(channel='pordede', title="Descargar todos los episodios de la serie", url=item.url, action="download_all_episodes", extra="episodios", show=show)) itemlist.append( Item(channel='pordede', title="Marcar como Pendiente", tipo="serie", idtemp=idserie, valor="1", action="pordede_check", show=show)) itemlist.append( Item(channel='pordede', title="Marcar como Siguiendo", tipo="serie", idtemp=idserie, valor="2", action="pordede_check", show=show)) itemlist.append( Item(channel='pordede', title="Marcar como Finalizada", tipo="serie", idtemp=idserie, valor="3", action="pordede_check", show=show)) itemlist.append( Item(channel='pordede', title="Marcar como Favorita", tipo="serie", idtemp=idserie, valor="4", action="pordede_check", show=show)) itemlist.append( Item(channel='pordede', title="Quitar marca", tipo="serie", idtemp=idserie, valor="0", action="pordede_check", show=show)) return itemlist
def temporadas(item): logger.info() itemlist = [] templist =[] data = httptools.downloadpage(item.url).data patron = '<span class="ico accordion_down"><\/span>Temporada([^<]+)' matches = re.compile(patron,re.DOTALL).findall(data) for scrapedtitle in matches: infoLabels=item.infoLabels url = item.url title = 'Temporada '+scrapedtitle.strip(' \r\n') thumbnail = scrapertools.find_single_match(data,'<img src="([^"]+)" alt="" class="picture-movie">') plot = scrapertools.find_single_match(data,'<span>Sinopsis:<\/span>.([^<]+).<span class="text-detail-hide"><\/span>') fanart = scrapertools.find_single_match(data,'<img src="([^"]+)"/>.*?</a>') contentSeasonNumber = scrapedtitle.strip(' \r\n') itemlist.append( Item(channel=item.channel, action="episodios" , title=title, fulltitle=item.title, url=url, thumbnail=thumbnail, plot=plot, fanart = fanart, extra=scrapedtitle.rstrip('\n'), contentSerieName =item.contentSerieName, contentSeasonNumber = contentSeasonNumber, infoLabels={'season':contentSeasonNumber})) if item.extra == 'temporadas': for tempitem in itemlist: templist += episodios(tempitem) else: tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True) if config.get_library_support() and len(itemlist) > 0: itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la biblioteca[/COLOR]', url=item.url, action="add_serie_to_library", extra="temporadas", contentSerieName=item.contentSerieName, contentSeasonNumber=contentSeasonNumber)) if item.extra == 'temporadas': return templist else: return itemlist
def getTemporadas(item): # Recorre cada una de las temporadas de una serie y devuelve todo sus capitulos logger.info("[peliserie.py] getTemporadas") itemlist = [] list_fanart = "" data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", scrapertools.cache_page(item.url)) patron = '<div class="tabs">(.*?)</div>' data = scrapertools.get_match(data, patron) patron = '<a href="\?season=(\d+)"' seasons = re.compile(patron, re.DOTALL).findall(data) if item.extra != "add_serie": # obtener fanart oTvdb = TvDb() serieID = oTvdb.get_serieId_by_title(item.show) if serieID != "0": list_fanart = oTvdb.get_graphics_by_serieId(serieID) logger.info("[peliserie.py] getTemporadas item.fanart =" + str(item.fanart)) for s in seasons: if "?season=" in item.url: item.url = re.compile("\?season=\d+", re.DOTALL).sub("?season=" + s, item.url) else: item.url = item.url + "?season=1" if len(list_fanart) > 0: item.fanart = random.choice(list_fanart) else: item.fanart = "" if item.extra == "series" and len(seasons) > 1: itemlist.append( Item( channel=__channel__, title=item.show + ". Temporada " + s, url=item.url, action="getEpisodios", show=item.show, thumbnail=item.thumbnail, fanart=item.fanart, ) ) else: itemlist.extend(getEpisodios(item)) if config.get_library_support() and len(itemlist) > 0 and (item.extra == "series" or item.extra == "add_serie"): itemlist.append( Item( channel=__channel__, title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="episodios###series", show=item.show, ) ) return itemlist
def findvideos(item): logger.info("pelisalacarta.altorrent findvideos") itemlist = [] th = Thread(target=get_art(item)) th.setDaemon(True) th.start() data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ","",data) enlaces = scrapertools.find_multiple_matches(data,'id="modal-quality-\w+"><span>(.*?)</span>.*?class="quality-size">(.*?)</p>.*?href="([^"]+)"') for calidad,size,url in enlaces: title ="[COLOR palegreen][B]Torrent[/B][/COLOR]"+" "+"[COLOR chartreuse]"+calidad+"[/COLOR]"+"[COLOR teal] ( [/COLOR]"+"[COLOR forestgreen]"+size+"[/COLOR]"+"[COLOR teal] )[/COLOR]" itemlist.append( Item(channel=item.channel, title = title, url=url, action="play",server="torrent", fanart=item.fanart,thumbnail= item.thumbnail,extra=item.extra,InfoLabels=item.infoLabels, folder=False) ) dd=scrapertools.find_single_match(data,'button-green-download-big".*?href="([^"]+)"><span class="icon-play">') if dd: if item.library: itemlist.append( Item(channel=item.channel, title = "[COLOR floralwhite][B]Online[/B][/COLOR]" , url=dd, action="dd_y_o", thumbnail="http://imgur.com/mRmBIV4.png", fanart=item.extra.split("|")[0],contentType=item.contentType, extra=item.extra, folder=True) ) else: videolist = servertools.find_video_items(data=str(dd)) for video in videolist: icon_server = os.path.join( config.get_runtime_path() , "resources" , "images" , "servers" , "server_"+video.server+".png" ) if not os.path.exists(icon_server): icon_server = "" itemlist.append(Item(channel=item.channel ,url=video.url, server=video.server,title="[COLOR floralwhite][B]"+video.server+"[/B][/COLOR]",thumbnail=icon_server,fanart=item.extra.split("|")[1], action="play", folder=False) ) if item.library and config.get_library_support() and itemlist : infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'], 'title': item.infoLabels['title']} itemlist.append(Item(channel=item.channel, title="Añadir esta película a la biblioteca", action="add_pelicula_to_library", url=item.url, infoLabels=infoLabels, text_color="0xFFe5ffcc", thumbnail='http://imgur.com/DNCBjUB.png',extra="library")) return itemlist
def episodios(item): logger.info("streamondemand.animesubita episodios") itemlist = [] # Descarga la pagina headers.append(['Referer', item.url]) data = scrapertools.cache_page(item.url, headers=headers) # Extrae las entradas (carpetas) patron = '<a href="([^"]+)"> <img src="([^"]+)" alt="([^"]+)".*?' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedthumbnail, scrapedtitle in matches: itemlist.append( Item(channel=__channel__, action="findvid", title=scrapedtitle, fulltitle=item.fulltitle, show=item.show, url=scrapedurl, thumbnail=scrapedthumbnail)) if config.get_library_support() and len(itemlist) != 0: itemlist.append( Item(channel=__channel__, title=item.title, url=item.url, action="add_serie_to_library", extra="episodios", show=item.show)) return itemlist
def findvideos(item): logger.info("pelisalacarta.channels.pelisdanko findvideos") itemlist = [] if item.url[-2:] == "ss": prefix = "strms" else: prefix = "lnks" # Descarga la pagina data = scrapertools.downloadpage(item.url) # Parametros para redireccion donde muestra los enlaces data_slug = scrapertools.find_single_match(data, '<div id="ad" data-id="[^"]+" data-slug="([^"]+)"') data_id = scrapertools.find_single_match(data, '<tr class="rip hover" data-id="([^"]+)"') url = "http://pelisdanko.com/%s/%s/%s/%s" % (prefix, data_id, item.id_enlaces, data_slug) data = scrapertools.downloadpage(url, post="") from core import servertools video_item_list = servertools.find_video_items(data=data) for video_item in video_item_list: title = "[COLOR green]%s[/COLOR] | [COLOR darkorange][%s][/COLOR]" % (video_item.server, item.calidad) itemlist.append(item.clone(title=bbcode_kodi2html(title), url=video_item.url, action="play", server=video_item.server, text_color="")) # Opción "Añadir esta película a la biblioteca de XBMC" if config.get_library_support() and len(itemlist) > 0 and item.category != "Cine": itemlist.append(Item(channel=item.channel, title="Añadir película a la biblioteca", url=item.url, infoLabels={'title': item.fulltitle}, action="add_pelicula_to_library", fulltitle=item.fulltitle, text_color="green", id_enlaces=item.id_enlaces)) return itemlist
def episodios(item): logger.info("pelisalacarta.channels.seriesflv episodios") itemlist = [] # Descarga la pagina headers = DEFAULT_HEADERS[:] data = scrapertools.cache_page(item.url,headers=headers) #logger.info("data="+data) # Extrae los episodios ''' <tr> <td class="sape"><i class="glyphicon glyphicon-film"></i> <a href="http://www.seriesflv.net/ver/game-of-thrones-1x9.html" class="color4">Game of Thrones (Juego de tronos) 1x09</a></td> <td> <a href="javascript:void(0);" class="loginSF" title="Marcar Visto"><span class="no visto"></span></a> </td> <td><div class="star_rating"> <ul class="star"> <li class="curr" style="width: 99.6%;"></li> </ul> </div></td> <td> <img src="http://www.seriesflv.net/images/lang/es.png" width="20" /> <img src="http://www.seriesflv.net/images/lang/la.png" width="20" /> <img src="http://www.seriesflv.net/images/lang/sub.png" width="20" /> </td> <td>40,583</td> </tr> ''' patron = '<tr[^<]+<td class="sape"><i class="glyphicon glyphicon-film"></i[^<]+' patron += '<a href="([^"]+)"[^>]+>([^<]+)</a>.*?<img(.*?)</td' matches = re.compile(patron,re.DOTALL).findall(data) for scrapedurl,scrapedtitle,bloqueidiomas in matches: title = scrapedtitle+" (" patronidiomas="lang/([a-z]+).png" matchesidiomas = re.compile(patronidiomas,re.DOTALL).findall(bloqueidiomas) for idioma in matchesidiomas: title=title+get_nombre_idioma(idioma)+", " title=title[:-2]+")" thumbnail = "" plot = "" url = scrapedurl ## Sólo nos interesa el título de la serie show = re.sub(" \([^\)]+\)$","",item.show) ## Se a añadido el parámetro show itemlist.append( Item(channel=__channel__, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, fulltitle=title, show=show)) if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]") ## Opción "Añadir esta serie a la biblioteca de XBMC" if config.get_library_support() and len(itemlist)>0: itemlist.append( Item(channel=__channel__, title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="episodios", show=show) ) return itemlist
def findvideos(item): logger.info("pelisalacarta.channels.verseriesynovelas findvideos") itemlist = [] if item.title.startswith("http"): item.url = item.title.split('%')[0] data = anti_cloudflare(item.url) data = data.replace("\n","").replace("\t","") patron = '<tr><td data-th="Idioma">(.*?)</div>' bloque = scrapertools.find_multiple_matches(data, patron) for match in bloque: patron = '.*?data-th="Calidad">(.*?)<.*?' patron += '"Servidor".*?src="http://www.google.com/s2/favicons\?domain=(.*?)\.' patron += '.*?<td data-th="Enlace"><a href="(http://www.verseriesynovelas.tv/enlaces.php.*?)"' matches = scrapertools.find_multiple_matches(match, patron) for quality, server, url in matches: if server == "streamin": server = "streaminto" if server== "waaw": server = "netutv" if server == "ul": server = "uploadedto" try: servers_module = __import__("servers."+server) title = "Enlace encontrado en "+server+" ["+quality+"]" if "Español.png" in match: title += " [COLOR sandybrown][CAST][/COLOR]" if "VOS.png" in match: title += " [COLOR green][VOSE][/COLOR]" if "Latino.png" in match: title += " [COLOR red][LAT][/COLOR]" if "VO.png" in match: title += " [COLOR blue][V.O][/COLOR]" itemlist.append( Item(channel=__channel__, action="play", title=title , url=url , fulltitle = item.fulltitle, thumbnail=item.thumbnail , fanart=item.fanart, plot=item.plot, folder=True) ) except: pass if len(itemlist) == 0: itemlist.append( Item(channel=__channel__, action="", title="No se ha encontrado ningún enlace" , url="" , thumbnail="", fanart=item.fanart, folder=False) ) else: if config.get_library_support() and item.category == "": itemlist.append( Item(channel=__channel__, title="[COLOR green]Añadir enlaces a la biblioteca[/COLOR]", url=item.url, action="add_pelicula_to_library", fulltitle=item.title.split(" [")[0], show=item.title)) return itemlist
def episodios(item): def load_episodios(html, item, itemlist, lang_title): for data in scrapertools.decodeHtmlentities(html).splitlines(): # Extrae las entradas end = data.find('<a ') if end > 0: scrapedtitle = re.sub(r'<[^>]*>', '', data[:end]).strip() else: scrapedtitle = '' if scrapedtitle == '': patron = '<a\s*href="[^"]+"\s*target="_blank">([^<]+)</a>' scrapedtitle = scrapertools.find_single_match(data, patron).strip() title = scrapertools.find_single_match(scrapedtitle, '\d+[^\d]+\d+') if title == '': title = scrapedtitle if title != '': itemlist.append( Item(channel=__channel__, action="findvid_serie", title=title + " (" + lang_title + ")", url=item.url, thumbnail=item.thumbnail, extra=data, fulltitle=item.fulltitle, show=item.show)) logger.info("[italiafilm.py] episodios") itemlist = [] # Descarga la pagina data = scrapertools.cache_page(item.url) start = data.find('id="pd_rating_holder') end = data.find('id="linkcorrotto-show"', start) data = data[start:end] lang_titles = [] starts = [] patron = r"STAGION[I|E].*?ITA" matches = re.compile(patron, re.IGNORECASE).finditer(data) for match in matches: season_title = match.group() if season_title != '': lang_titles.append('SUB ITA' if 'SUB' in season_title.upper() else 'ITA') starts.append(match.end()) i = 1 len_lang_titles = len(lang_titles) while i <= len_lang_titles: inizio = starts[i - 1] fine = starts[i] if i < len_lang_titles else -1 html = data[inizio:fine] lang_title = lang_titles[i - 1] load_episodios(html, item, itemlist, lang_title) i += 1 if len(itemlist) == 0: load_episodios(data, item, itemlist, 'ITA') if config.get_library_support() and len(itemlist) != 0: itemlist.append( Item(channel=__channel__, title=item.title, url=item.url, action="add_serie_to_library", extra="episodios", show=item.show)) itemlist.append( Item(channel=item.channel, title="Scarica tutti gli episodi della serie", url=item.url, action="download_all_episodes", extra="episodios", show=item.show)) return itemlist
def findvideos(item): logger.info() itemlist = [] if item.temp: url_epis = item.url data = httptools.downloadpage(item.url).data if not item.infoLabels['episode'] or item.temp: th = Thread(target=get_art(item)) th.setDaemon(True) th.start() if item.contentType != "movie": if not item.infoLabels['episode']: capitulo = scrapertools.find_single_match(item.title, '(\d+x\d+)') url_capitulo = scrapertools.find_single_match( data, '<a href="(http://www.divxtotal.com/wp-content/uploads/.*?' + capitulo + '.*?.torrent)') if len(item.extra.split("|")) >= 2: extra = item.extra else: extra = item.fanart else: capitulo = item.title url_capitulo = item.url try: fanart = item.fanart_extra except: fanart = item.extra.split("|")[0] url_data = scrapertools.find_multiple_matches( data, '<div id="option-(.*?)".*?src="([^"]+)"') for option, url in url_data: server, idioma = scrapertools.find_single_match( data, 'href="#option-' + option + '">.*?</b>(.*?)<span class="dt_flag">.*?flags/(.*?).png') if not item.temp: item.infoLabels['year'] = None if item.temp: capitulo = re.sub(r".*--.*", "", capitulo) title = "[COLOR darkcyan][B]Ver capítulo [/B][/COLOR]" + "[COLOR red][B]" + capitulo + "[/B][/COLOR]" new_item = item.clone(title=title, url=url, action="play", fanart=fanart, thumbnail=item.thumbnail, server_v=server, idioma=idioma, extra=item.extra, fulltitle=item.fulltitle, folder=False) if not item.check_temp: new_item.infoLabels['episode'] = item.epi new_item.infoLabels['season'] = item.temp itemlist.append(new_item) else: title = "[COLOR darkcyan][B]Ver capítulo [/B][/COLOR]" + "[COLOR red][B]" + capitulo + "[/B][/COLOR]" + " " + "[COLOR darkred]" + server + " ( " + idioma + " )" + "[/COLOR]" itemlist.append( Item(channel=item.channel, title=title, url=url, action="play", fanart=fanart, thumbnail=item.thumbnail, extra=item.extra, fulltitle=item.fulltitle, folder=False)) if item.temp: tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) for item in itemlist: if item.infoLabels['title']: logger.info("yes") title_inf = "[COLOR royalblue]" + item.infoLabels[ 'title'] + "[/COLOR]" else: logger.info("no") title_inf = "[COLOR royalblue]Capítulo " + str( item.infoLabels['episode']) + "[/COLOR]" item.title = item.title + " -- \"" + title_inf + "\"" + " " + "[COLOR darkred]" + item.server_v + " ( " + item.idioma + " )" + "[/COLOR]" if item.infoLabels[ 'episode'] and item.library or item.temp and item.library: thumbnail = scrapertools.find_single_match( item.extra, 'http://assets.fanart.tv/.*jpg') if thumbnail == "": thumbnail = item.thumbnail if not "assets.fanart" in item.fanart_info: fanart = item.fanart_info else: fanart = item.fanart if item.temp: item.infoLabels['tvdb_id'] = item.tvdb itemlist.append( Item(channel=item.channel, title="[COLOR steelblue][B] info[/B][/COLOR]", action="info_capitulos", fanart=fanart, thumbnail=item.thumb_art, thumb_info=item.thumb_info, extra=item.extra, show=item.show, InfoLabels=item.infoLabels, folder=False)) if item.temp and not item.check_temp: url_epis = re.sub(r"-\dx.*", "", url_epis) url_epis = url_epis.replace("episodios", "series") itemlist.append( Item(channel=item.channel, title="[COLOR salmon][B]Todos los episodios[/B][/COLOR]", url=url_epis, action="findtemporadas", server="torrent", fanart=item.extra.split("|")[1], thumbnail=item.infoLabels['thumbnail'], extra=item.extra + "|" + item.thumbnail, contentType=item.contentType, contentTitle=item.contentTitle, InfoLabels=item.infoLabels, thumb_art=item.thumb_art, thumb_info=item.thumbnail, fulltitle=item.fulltitle, library=item.library, temp=item.temp, folder=True)) else: url_data = scrapertools.find_multiple_matches( data, '<div id="option-(.*?)".*?src="([^"]+)"') for option, url in url_data: server, idioma = scrapertools.find_single_match( data, 'href="#option-' + option + '">.*?</b>(.*?)<span class="dt_flag">.*?flags/(.*?).png') title = server + " ( " + idioma + " )" item.infoLabels['year'] = None itemlist.append( Item(channel=item.channel, title="[COLOR dodgerblue][B]" + title + " [/B][/COLOR]", url=url, action="play", fanart=item.fanart, thumbnail=item.thumbnail, extra=item.extra, InfoLabels=item.infoLabels, folder=True)) if item.library and config.get_library_support() and len(itemlist) > 0: infoLabels = { 'tmdb_id': item.infoLabels['tmdb_id'], 'title': item.infoLabels['title'] } itemlist.append( Item(channel=item.channel, title="Añadir esta película a la biblioteca", action="add_pelicula_to_library", url=item.url, fanart=item.extra.split("|")[0], infoLabels=infoLabels, text_color="0xFFe5ffcc", thumbnail='http://imgur.com/3ik73p8.png')) return itemlist
def findtemporadas(item): logger.info() itemlist = [] if not item.temp: th = Thread(target=get_art(item)) th.setDaemon(True) th.start() check_temp = None else: check_temp = "yes" data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) if len(item.extra.split("|")): if len(item.extra.split("|")) >= 4: fanart = item.extra.split("|")[2] extra = item.extra.split("|")[3] try: fanart_extra = item.extra.split("|")[4] except: fanart_extra = item.extra.split("|")[3] try: fanart_info = item.extra.split("|")[5] except: fanart_extra = item.extra.split("|")[3] elif len(item.extra.split("|")) == 3: fanart = item.extra.split("|")[2] extra = item.extra.split("|")[0] fanart_extra = item.extra.split("|")[0] fanart_info = item.extra.split("|")[1] elif len(item.extra.split("|")) == 2: fanart = item.extra.split("|")[1] extra = item.extra.split("|")[0] fanart_extra = item.extra.split("|")[0] fanart_info = item.extra.split("|")[1] else: extra = item.extra fanart_extra = item.extra fanart_info = item.extra try: logger.info(fanart_extra) logger.info(fanart_info) except: fanart_extra = item.fanart fanart_info = item.fanart bloque_episodios = scrapertools.find_multiple_matches( data, 'Temporada (\d+) <i>(.*?)</div></li></ul></div></div>') for temporada, bloque_epis in bloque_episodios: item.infoLabels = item.InfoLabels item.infoLabels['season'] = temporada itemlist.append( item.clone( action="epis", title="[COLOR cornflowerblue][B]Temporada [/B][/COLOR]" + "[COLOR darkturquoise][B]" + temporada + "[/B][/COLOR]", url=bloque_epis, contentType=item.contentType, contentTitle=item.contentTitle, show=item.show, extra=item.extra, fanart_extra=fanart_extra, fanart_info=fanart_info, datalibrary=data, check_temp=check_temp, folder=True)) tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) for item in itemlist: item.fanart = fanart item.extra = extra if item.temp: if item.infoLabels['temporada_poster']: item.thumbnail = item.infoLabels['temporada_poster'] else: item.thumbnail = item.infoLabels['thumbnail'] if config.get_library_support() and itemlist: if len(bloque_episodios) == 1: extra = "epis" else: extra = "epis###serie_add" infoLabels = { 'tmdb_id': item.infoLabels['tmdb_id'], 'tvdb_id': item.infoLabels['tvdb_id'], 'imdb_id': item.infoLabels['imdb_id'] } itemlist.append( Item(channel=item.channel, title="Añadir esta serie a la biblioteca", text_color="0xFFe5ffcc", action="add_serie_to_library", extra=extra, url=item.url, contentSerieName=item.fulltitle, infoLabels=infoLabels, thumbnail='http://imgur.com/3ik73p8.png', datalibrary=data)) return itemlist
def episodios(item): def load_episodios(html, item, itemlist): for data in html.splitlines(): # Extrae las entradas end = data.find('<a ') if end > 0: scrapedtitle = re.sub(r'<[^>]*>', '', data[:end]).strip() else: scrapedtitle = '' if scrapedtitle == '': patron = '<a\s*href="[^"]+"(?:\s*target="_blank")?>([^<]+)</a>' scrapedtitle = scrapertools.find_single_match(data, patron).strip() title = scrapertools.find_single_match(scrapedtitle, '\d+[^\d]+\d+') if title == '': title = scrapedtitle if title != '': itemlist.append( Item(channel=__channel__, action="findvideos", title=title, url=item.url, thumbnail=item.thumbnail, extra=data, fulltitle=item.fulltitle, show=item.show)) logger.info("streamondemand.tantifilm episodios") itemlist = [] data = scrapertools.cache_page(item.url, headers=headers) data = scrapertools.decodeHtmlentities(data) start = data.find('<div class="sp-wrap sp-wrap-blue">') end = data.find('<div id="disqus_thread">', start) data_sub = data[start:end] starts = [] patron = r".*?STAGIONE|MINISERIE|WEBSERIE|SERIE" matches = re.compile(patron, re.IGNORECASE).finditer(data_sub) for match in matches: season_title = match.group() if season_title != '': starts.append(match.end()) i = 1 len_starts = len(starts) while i <= len_starts: inizio = starts[i - 1] fine = starts[i] if i < len_starts else -1 html = data_sub[inizio:fine] load_episodios(html, item, itemlist) i += 1 if len(itemlist) == 0: patron = '<a href="(#wpwm-tabs-\d+)">([^<]+)</a></li>' seasons_episodes = re.compile(patron, re.DOTALL).findall(data) end = None for scrapedtag, scrapedtitle in seasons_episodes: start = data.find(scrapedtag, end) end = data.find('<div class="clearfix"></div>', start) html = data[start:end] itemlist.append( Item(channel=__channel__, action="findvideos", contentType="episode", title=scrapedtitle, url=item.url, thumbnail=item.thumbnail, extra=html, fulltitle=item.fulltitle, show=item.show)) if config.get_library_support() and len(itemlist) != 0: itemlist.append( Item(channel=__channel__, title="Aggiungi alla libreria", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show)) itemlist.append( Item(channel=__channel__, title="Scarica tutti gli episodi della serie", url=item.url, action="download_all_episodes", extra="episodios", show=item.show)) return itemlist
def findvideos(item): logger.info() itemlist = [] item.text_color = color2 # Descarga la pagina data = httptools.downloadpage(item.url).data sinopsis = scrapertools.find_single_match( data, '<h2>Sinopsis</h2>.*?>(.*?)</p>') item.infoLabels["plot"] = scrapertools.htmlclean(sinopsis) # Busca en tmdb si no se ha hecho antes if item.extra != "eroticas": if item.extra != "library": year = scrapertools.find_single_match( data, 'Año de lanzamiento.*?"ab">(\d+)') if year: try: item.infoLabels['year'] = year # Obtenemos los datos basicos de todas las peliculas mediante multihilos tmdb.set_infoLabels(item, __modo_grafico__) except: pass trailer_url = scrapertools.find_single_match( data, 'id="trailerpro">.*?src="([^"]+)"') item.infoLabels["trailer"] = "www.youtube.com/watch?v=TqqF3-qgJw4" patron = '<td><a href="([^"]+)".*?title="([^"]+)".*?<td>([^"]+)<\/td><td>([^"]+)<\/td>' matches = scrapertools.find_multiple_matches(data, patron) for url, server, idioma, calidad in matches: if server == "Embed": server = "Nowvideo" if server == "Ul": server = "Uploaded" title = "%s [%s][%s]" % (server, idioma, calidad) itemlist.append(item.clone(action="play", title=title, url=url)) patron = 'id="(embed[0-9]*)".*?<div class="calishow">(.*?)<.*?src="([^"]+)"' matches = scrapertools.find_multiple_matches(data, patron) for id_embed, calidad, url in matches: title = scrapertools.find_single_match( url, "(?:http://|https://|//)(.*?)(?:embed.|videoembed|)/") if re.search(r"(?i)inkapelis|goo.gl", title): title = "Directo" idioma = scrapertools.find_single_match( data, 'href="#%s".*?>([^<]+)<' % id_embed) title = "%s [%s][%s]" % (title.capitalize(), idioma, calidad) itemlist.append(item.clone(action="play", title=title, url=url)) if itemlist: if not config.get_setting('menu_trailer', item.channel): itemlist.append( item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler", text_color="magenta", context="")) if item.extra != "library": if config.get_library_support(): itemlist.append( Item(channel=item.channel, title="Añadir película a la biblioteca", action="add_pelicula_to_library", url=item.url, fulltitle=item.fulltitle, infoLabels={'title': item.fulltitle}, text_color="green", extra="library")) return itemlist
def episodios(item): def load_episodios(html, item, itemlist, lang_title): patron = '((?:.*?<a[^h]+href="[^"]+"[^>]+>[^<][^<]+<(?:b|\/)[^>]+>)+)' matches = re.compile(patron).findall(html) for data in matches: # Estrazione scrapedtitle = data.split('<a ')[0] scrapedtitle = re.sub(r'<[^>]*>', '', scrapedtitle).strip() if scrapedtitle != 'Categorie': scrapedtitle = scrapedtitle.replace('×', 'x') scrapedtitle = scrapedtitle.replace('×', 'x') itemlist.append( Item(channel=__channel__, action="findvideos", contentType="episode", title="[COLOR azure]%s[/COLOR]" % (scrapedtitle + " (" + lang_title + ")"), url=data, thumbnail=item.thumbnail, extra=item.extra, fulltitle=scrapedtitle + " (" + lang_title + ")" + ' - ' + item.show, show=item.show)) logger.info("[italiaserie.py] episodios") itemlist = [] # Download pagina data = scrapertools.cache_page(item.url) data = scrapertools.decodeHtmlentities(data) data = scrapertools.get_match(data, '<div class="su-spoiler-title">(.*?)<span style="color: #e0e0e0;">') lang_titles = [] starts = [] patron = r"Stagione.*?ITA" matches = re.compile(patron, re.IGNORECASE).finditer(data) for match in matches: season_title = match.group() if season_title != '': lang_titles.append('SUB ITA' if 'SUB' in season_title.upper() else 'ITA') starts.append(match.end()) i = 1 len_lang_titles = len(lang_titles) while i <= len_lang_titles: inizio = starts[i - 1] fine = starts[i] if i < len_lang_titles else -1 html = data[inizio:fine] lang_title = lang_titles[i - 1] load_episodios(html, item, itemlist, lang_title) i += 1 if config.get_library_support() and len(itemlist) != 0: itemlist.append( Item(channel=__channel__, title="Aggiungi alla libreria", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show)) return itemlist
def menu_info(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data) item.infoLabels["tmdb_id"] = scrapertools.find_single_match( data, '<a href="https://www.themoviedb.org/[^/]+/(\d+)') item.infoLabels["year"] = scrapertools.find_single_match( data, 'class="e_new">(\d{4})') item.infoLabels["plot"] = scrapertools.find_single_match( data, 'itemprop="description">([^<]+)</div>') item.infoLabels["genre"] = ", ".join( scrapertools.find_multiple_matches( data, '<a itemprop="genre"[^>]+>([^<]+)</a>')) if __modo_grafico__: tmdb.set_infoLabels_item(item, __modo_grafico__) action = "findvideos" title = "Ver enlaces" if item.contentType == "tvshow": action = "episodios" title = "Ver capítulos" itemlist.append(item.clone(action=action, title=title)) carpeta = "CINE" tipo = "película" action = "add_pelicula_to_library" extra = "" if item.contentType == "tvshow": carpeta = "SERIES" tipo = "serie" action = "add_serie_to_library" extra = "episodios###library" library_path = config.get_library_path() if config.get_library_support(): title = "Añadir %s a la biblioteca" % tipo if item.infoLabels["imdb_id"] and not library_path.lower().startswith( "smb://"): try: from core import filetools path = filetools.join(library_path, carpeta) files = filetools.walk(path) for dirpath, dirname, filename in files: if item.infoLabels["imdb_id"] in dirpath: namedir = dirpath.replace(path, '')[1:] for f in filename: if f != namedir + ".nfo" and f != "tvshow.nfo": continue from core import library head_nfo, it = library.read_nfo( filetools.join(dirpath, f)) canales = it.library_urls.keys() canales.sort() if "playmax" in canales: canales.pop(canales.index("playmax")) canales.insert(0, "[COLOR red]playmax[/COLOR]") title = "%s ya en tu biblioteca. [%s] ¿Añadir?" % ( tipo.capitalize(), ",".join(canales)) break except: import traceback logger.error(traceback.format_exc()) pass itemlist.append( item.clone(action=action, title=title, text_color=color5, extra=extra)) token_auth = config.get_setting("token_trakt", "tvmoviedb") if token_auth and item.infoLabels["tmdb_id"]: extra = "movie" if item.contentType != "movie": extra = "tv" itemlist.append( item.clone(channel="tvmoviedb", title="[Trakt] Gestionar con tu cuenta", action="menu_trakt", extra=extra)) itemlist.append( item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler", text_color="magenta", context="")) itemlist.append(item.clone(action="", title="")) ficha = scrapertools.find_single_match(item.url, '-f(\d+)-') if not ficha: ficha = scrapertools.find_single_match(item.url, 'f=(\d+)') itemlist.extend(acciones_fichas(item, sid, ficha, season=True)) itemlist.append( item.clone(action="acciones_cuenta", title="Añadir a una lista", text_color=color3, ficha=ficha)) return itemlist
def episodios(item): logger.info() itemlist = [] # Descarga la página data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data) if not item.infoLabels["tmdb_id"]: item.infoLabels["tmdb_id"] = scrapertools.find_single_match( data, '<a href="https://www.themoviedb.org/[^/]+/(\d+)') item.infoLabels["year"] = scrapertools.find_single_match( data, 'class="e_new">(\d{4})') if not item.infoLabels["genre"]: item.infoLabels["genre"] = ", ".join( scrapertools.find_multiple_matches( data, '<a itemprop="genre"[^>]+>([^<]+)</a>')) if not item.infoLabels["plot"]: item.infoLabels["plot"] = scrapertools.find_single_match( data, 'itemprop="description">([^<]+)</div>') dc = scrapertools.find_single_match(data, "var dc_ic = '\?dc=([^']+)'") patron = '<div class="f_cl_l_c f_cl_l_c_id[^"]+" c_id="([^"]+)" .*?c_num="([^"]+)" c_name="([^"]+)"' \ '.*?load_f_links\(\d+\s*,\s*(\d+).*?<div class="([^"]+)" onclick="marcar_capitulo' matches = scrapertools.find_multiple_matches(data, patron) lista_epis = [] for c_id, episodio, title, ficha, status in matches: episodio = episodio.replace("X", "x") if episodio in lista_epis: continue lista_epis.append(episodio) url = "https://playmax.mx/c_enlaces_n.php?ficha=%s&c_id=%s&dc=%s" % ( ficha, c_id, dc) title = "%s - %s" % (episodio, title) if "_mc a" in status: title = "[COLOR %s]%s[/COLOR] %s" % ( color5, u"\u0474".encode('utf-8'), title) new_item = Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=item.thumbnail, fanart=item.fanart, show=item.show, infoLabels=item.infoLabels, text_color=color2, referer=item.url, contentType="episode") try: new_item.infoLabels["season"], new_item.infoLabels[ "episode"] = episodio.split('x', 1) except: pass itemlist.append(new_item) itemlist.sort(key=lambda it: (it.infoLabels["season"], it.infoLabels["episode"]), reverse=True) if __modo_grafico__: tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) library_path = config.get_library_path() if config.get_library_support() and not item.extra: title = "Añadir serie a la biblioteca" if item.infoLabels["imdb_id"] and not library_path.lower().startswith( "smb://"): try: from core import filetools path = filetools.join(library_path, "SERIES") files = filetools.walk(path) for dirpath, dirname, filename in files: if item.infoLabels["imdb_id"] in dirpath: for f in filename: if f != "tvshow.nfo": continue from core import library head_nfo, it = library.read_nfo( filetools.join(dirpath, dirname, f)) canales = it.library_urls.keys() canales.sort() if "playmax" in canales: canales.pop(canales.index("playmax")) canales.insert(0, "[COLOR red]playmax[/COLOR]") title = "Serie ya en tu biblioteca. [%s] ¿Añadir?" % ",".join( canales) break except: import traceback logger.error(traceback.format_exc()) pass itemlist.append( item.clone(action="add_serie_to_library", title=title, text_color=color5, extra="episodios###library")) if itemlist and not __menu_info__: ficha = scrapertools.find_single_match(item.url, '-f(\d+)-') itemlist.extend(acciones_fichas(item, sid, ficha)) return itemlist
def findvideos(item): logger.info() itemlist = [] audio = { 'Latino': '[COLOR limegreen]LATINO[/COLOR]', 'Español': '[COLOR yellow]ESPAÑOL[/COLOR]', 'Ingles': '[COLOR red]ORIGINAL SUBTITULADO[/COLOR]', 'Latino-Ingles': 'DUAL' } data = httptools.downloadpage(item.url).data if item.extra != 'series': patron = 'data-video="([^"]+)" class="reproductorVideo"><ul><li>([^<]+)<\/li><li>([^<]+)<\/li>' tipotitle = item.contentTitle elif item.extra == 'series': tipotitle = str(item.contentSeasonNumber) + 'x' + str( item.contentEpisodeNumber) + ' ' + item.contentSerieName patron = '<li class="enlaces-l"><a href="([^"]+)" target="_blank"><ul><li>([^<]+)<.*?>([^<]+)<.*?>Reproducir<' matches = re.compile(patron, re.DOTALL).findall(data) if item.extra != 'documental': n = 0 for scrapedurl, scrapedcalidad, scrapedaudio in matches: if 'series' in item.extra: datab = httptools.downloadpage(host + scrapedurl).data url = scrapertools.find_single_match( datab, 'class="reproductor"><iframe src="([^"]+)"') print url + 'esta es la direccion' else: url = scrapedurl title = tipotitle idioma = audio[scrapedaudio] itemlist.extend(servertools.find_video_items(data=url)) if n < len(itemlist): itemlist[ n].title = tipotitle + ' (' + idioma + ' ) ' + '(' + itemlist[ n].server + ' )' n = n + 1 else: url = scrapertools.find_single_match( data, 'class="reproductor"><iframe src="([^"]+)"') itemlist.extend(servertools.find_video_items(data=url)) for videoitem in itemlist: if item.extra == 'documental': videoitem.title = item.title + ' (' + videoitem.server + ')' videoitem.channel = item.channel videoitem.action = "play" videoitem.folder = False if config.get_library_support( ) and len(itemlist) > 0 and item.extra != 'series': itemlist.append( Item(channel=item.channel, title= '[COLOR yellow]Añadir esta pelicula a la biblioteca[/COLOR]', url=item.url, action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle)) return itemlist
def findepisodios(item): logger.info("pelisalacarta.channels.tumejortv findepisodios") itemlist = [] if item.url.startswith("http://www.tumejortv.com"): item.url = item.url.replace("http://www.tumejortv.com", BASE_URL) logger.info("url=" + item.url) data = scrapertools.cache_page(item.url) logger.info("data=" + data) #<a href="#" class="antlo_temporadas_li" title="Haga clic para ver listado de capitulos"><img src="http://www.tumejortv.com/images/general/more.png" /> TEMPORADA 1<span style="float:right;"><img src="http://www.tumejortv.com/images/general/estreno.png" alt="EstrenoT"/></span></a><div><table class="antlo_links_table"> patron = '" class="antlo_temporadas_li" title="Haga clic[^"]+"><img[^>]+>( TEMPORADA [^<]+)<(.*?)</table>' matches = re.compile(patron, re.DOTALL).findall(data) if DEBUG: scrapertools.printMatches(matches) for temporada, episodios in matches: logger.info("temporada=" + temporada + ", episodios=" + episodios) #<tr><td></td><td style="background-color:#f2f2f2;"><a title="Descargar - Ver" alt="Descargar - Ver" href="http://www.tumejortv.com/series/The-walking-Dead-2/temporada-3/capitulo-2/"> <img src="http://www.tumejortv.com/images/general/acceder.gif"><br />Descargar</a></td><td>2</td><td>107</td><td><a title="Descargar - Ver" alt="Descargar - Ver" href="http://www.tumejortv.com/series/The-walking-Dead-2/temporada-3/capitulo-2/"></a></td></tr> #patronepisodio = '<tr><td></td><td[^>]+><a title="[^"]+" alt="[^"]+" href="([^"]+)"> <img[^>]+><br />[^<]+</a></td><td>([^<]+)</td><td>([^<]+)</td><td><a[^>]+>([^<]+)</a></td></tr>' #<tr><td> <a href="http://www.tumejortv.com/series/90210-La-Nueva-Geracion-/trailers/826" alt="Ver Trailer" title="Ver trailer"><img src="http://www.tumejortv.com/images/general/trailer.png" alt="Trailer"/></a></td><td style="background-color:#f2f2f2;"><a title="Descargar - Ver" alt="Descargar - Ver" href="http://www.tumejortv.com/series/90210-La-Nueva-Geracion-/temporada-3/capitulo-1/"> <img src="http://www.tumejortv.com/images/general/acceder.gif"><br />Descargar</a></td><td>1</td><td>52</td><td><a title="Descargar - Ver" alt="Descargar - Ver" href="http://www.tumejortv.com/ser patronepisodio = '<tr>(.*?)</tr>' matches2 = re.compile(patronepisodio, re.DOTALL).findall(episodios) for match2 in matches2: try: url = scrapertools.get_match( match2, '<a title="Descargar - Ver" alt="Descargar - Ver" href="([^"]+)"' ) except: url = "" try: episodio = scrapertools.get_match(match2, '</a></td><td>([^<]+)</td>') except: episodio = "" try: #</a></td><td>2</td><td>107</td> num_enlaces = scrapertools.get_match( match2, '</a></td><td[^<]+</td><td>([^<]+)</td>') except: num_enlaces = "" try: titulo = scrapertools.get_match( match2, '<a[^>]+>([^<]+)</a></td></tr>') except: titulo = "" if url != "": temporada = temporada.replace("TEMPORADA", "").strip() if len(episodio) < 2: episodio = "0" + episodio itemlist.append( Item(channel=__channel__, action="findvideos", title=temporada + "x" + episodio + " " + titulo + " (" + num_enlaces + " enlaces)", url=url, thumbnail=item.thumbnail, show=item.show, plot=item.plot, folder=True, fulltitle=item.title + " " + temporada + "x" + episodio + " " + titulo)) if config.get_library_support(): itemlist.append( Item(channel=item.channel, title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="findepisodios", show=item.show)) return itemlist
def episodios(item): logger.info("{0} - {1}".format(item.title, item.url)) itemlist = [] # Descarga la página data = httptools.downloadpage(item.url).data fanart = scrapertools.find_single_match( data, "background-image[^'\"]+['\"]([^'\"]+)") plot = scrapertools.find_single_match( data, "id=['\"]profile2['\"]>\s*(.*?)\s*</div>") logger.debug("fanart: {0}".format(fanart)) logger.debug("plot: {0}".format(plot)) ajaxSeasons = re.findall("['\"]loadSeason\((\d+),(\d+)\)", data) ajaxData = "" for showID, seasonNo in ajaxSeasons: logger.debug("Ajax seasson request: Show = {0} - Season = {1}".format( showID, seasonNo)) ajaxData += httptools.downloadpage(HOST + '/ajax/load_season.php?season_id=' + showID + '&season_number=' + seasonNo).data if ajaxData: data = ajaxData episodes = re.findall( "<tr.*?href=['\"](?P<url>[^'\"]+).+?>(?P<title>.+?)</a>.*?<td>(?P<flags>.*?)</td>", data, re.MULTILINE | re.DOTALL) for url, title, flags in episodes: idiomas = " ".join([ "[{0}]".format(IDIOMAS.get(language, "OVOS")) for language in re.findall("banderas/([^\.]+)", flags, re.MULTILINE) ]) displayTitle = "{show} - {title} {languages}".format(show=item.show, title=title, languages=idiomas) logger.debug("Episode found {0}: {1}".format( displayTitle, urlparse.urljoin(HOST, url))) itemlist.append( item.clone(title=displayTitle, url=urlparse.urljoin(HOST, url), action="findvideos", plot=plot, fanart=fanart, language=idiomas, list_idiomas=list_idiomas, list_calidad=CALIDADES, context=filtertools.context)) if len(itemlist) > 0 and filtertools.context: itemlist = filtertools.get_links(itemlist, item.channel) if config.get_library_support() and len(itemlist) > 0: itemlist.append( item.clone(title="Añadir esta serie a la biblioteca", action="add_serie_to_library", extra="episodios")) return itemlist
def epienlaces(item): logger.info("pelisalacarta.channels.descargasmix epienlaces") itemlist = [] data = scrapertools.cachePage(item.url) data = data.replace("\n", "").replace("\t", "") #Bloque de enlaces delimitador = item.title.replace(item.show, "") patron = delimitador + '\s*</strong>(.*?)(?:</strong>|<div class="section-box related-posts")' bloque = scrapertools.find_single_match(data, patron) logger.info(bloque) patron = '<div class="episode-server">.*?href="([^"]+)"' patron += '.*?data-server="([^"]+)"' patron += '.*?<div style="float:left;width:140px;">(.*?)</div>' matches = scrapertools.find_multiple_matches(bloque, patron) for scrapedurl, scrapedserver, scrapedcalidad in matches: if scrapedserver == "ul": scrapedserver = "uploadedto" if scrapedserver == "streamin": scrapedserver = "streaminto" titulo = scrapedserver.capitalize() + " [" + scrapedcalidad + "]" #Enlaces descarga if scrapedserver == "magnet": titulo = titulo.replace( "Magnet", "[COLOR green][Enlace en Torrent][/COLOR]") itemlist.insert( 0, Item(channel=__channel__, action="play", title=titulo, server="torrent", url=scrapedurl, fulltitle=item.fulltitle, thumbnail=item.thumbnail, fanart=item.fanart, plot=item.plot, folder=False)) else: mostrar_server = True if config.get_setting("hidepremium") == "true": mostrar_server = servertools.is_server_enabled(scrapedserver) if mostrar_server: try: servers_module = __import__("servers." + scrapedserver) if "http://descargasmix" in scrapedurl: DEFAULT_HEADERS.append(["Referer", item.url]) data = scrapertools.cache_page(scrapedurl, headers=DEFAULT_HEADERS) scrapedurl = scrapertools.find_single_match( data, 'iframe src="([^"]+)"') enlaces = servertools.findvideos(data=scrapedurl) if len(enlaces) > 0: for enlace in enlaces: titulo = "Enlace encontrado en [COLOR sandybrown]" + enlaces[ 0][0] + "[/COLOR] [" + scrapedcalidad + "]" itemlist.append( Item(channel=__channel__, action="play", server=enlaces[0][2], title=titulo, url=enlaces[0][1], fulltitle=item.fulltitle, thumbnail=item.thumbnail, fanart=item.fanart, plot=item.plot, folder=False)) except: pass if config.get_library_support() and item.category == "": itemlist.append( Item(channel=__channel__, title="[COLOR green]Añadir enlaces a la biblioteca[/COLOR]", url=item.url + "|", action="add_pelicula_to_library", extra="epienlaces", fulltitle=item.title, show=item.title)) return itemlist
def episodios(item): def load_episodios(html, item, itemlist, lang_title): patron = '.*?<a[^h]+href="[^"]+"[^>]+>[^<]+<\/a>(?:<br \/>|<\/p>|-)' matches = re.compile(patron).findall(html) for data in matches: # Estrae i contenuti scrapedtitle = data.split('<a ')[0] scrapedtitle = re.sub(r'<[^>]*>', '', scrapedtitle).strip() if scrapedtitle != 'Categorie': scrapedtitle = scrapedtitle.replace('×', 'x') itemlist.append( Item(channel=__channel__, action="findvideos", contentType="episode", title="[COLOR azure]%s[/COLOR]" % (scrapedtitle + " (" + lang_title + ")"), url=data, thumbnail=item.thumbnail, extra=item.extra, fulltitle=scrapedtitle + " (" + lang_title + ")" + ' - ' + item.show, show=item.show)) logger.info("[filmpertutti.py] episodios") itemlist = [] # Carica la pagina data = httptools.downloadpage(item.url).data data = scrapertools.decodeHtmlentities(data) lang_titles = [] starts = [] patron = r"Stagione.*?ITA" matches = re.compile(patron, re.IGNORECASE).finditer(data) for match in matches: season_title = match.group() if season_title != '': lang_titles.append('SUB ITA' if 'SUB' in season_title.upper() else 'ITA') starts.append(match.end()) i = 1 len_lang_titles = len(lang_titles) while i <= len_lang_titles: inizio = starts[i - 1] fine = starts[i] if i < len_lang_titles else -1 html = data[inizio:fine] lang_title = lang_titles[i - 1] load_episodios(html, item, itemlist, lang_title) i += 1 if config.get_library_support() and len(itemlist) != 0: itemlist.append( Item(channel=__channel__, title="Aggiungi alla libreria", url=item.url, action="add_serie_to_library", extra="episodios" + "###" + item.extra, show=item.show)) return itemlist
def findvideos(item): logger.info("pelisalacarta.channels.descargasmix findvideos") if item.category == "Series" or item.show != "": return epienlaces(item) itemlist = [] data = scrapertools.cachePage(item.url) fanart = item.fanart sinopsis = scrapertools.find_single_match( data, '<strong>SINOPSIS</strong>:(.*?)</p>') if item.category == "": try: sinopsis, fanart = info(item.fulltitle, "movie", sinopsis) except: pass #Patron torrent patron = 'class="separate3 magnet".*?href="([^"]+)"' matches = scrapertools.find_multiple_matches(data, patron) for scrapedurl in matches: title = urllib.unquote(scrapedurl) try: if item.fulltitle != "": titulo = item.fulltitle.strip().rsplit(" ", 1)[1] else: titulo = item.title.strip().rsplit(" ", 1)[1] except: if item.fulltitle != "": titulo = item.fulltitle.strip() else: titulo = item.title.strip() title = "[" + scrapertools.find_single_match( title, titulo + "(?:\.|)(.*?)(?:\.|[wW])") + "]" itemlist.append( Item(channel=__channel__, action="play", server="torrent", title="[COLOR green][Enlace en Torrent][/COLOR] " + title, fulltitle=item.fulltitle, url=scrapedurl, thumbnail=item.thumbnail, fanart=fanart, plot=str(sinopsis), context="0", contentTitle=item.fulltitle, folder=False)) #Patron online data_online = scrapertools.find_single_match( data, 'Enlaces para ver online(.*?)<div class="section-box related-posts">') if len(data_online) > 0: patron = 'dm\(c.a\(\'([^\']+)\'' matches = scrapertools.find_multiple_matches(data_online, patron) for code in matches: enlace = dm(code) enlaces = servertools.findvideos(data=enlace) titulo = "Enlace encontrado en [COLOR sandybrown]" + enlaces[0][ 0] + "[/COLOR]" if len(enlaces) > 0: itemlist.append( Item(channel=__channel__, action="play", server=enlaces[0][2], title=titulo, url=enlaces[0][1], fulltitle=item.fulltitle, thumbnail=item.thumbnail, fanart=fanart, plot=str(sinopsis), context="0", contentTitle=item.fulltitle, viewmode="movie_with_plot", folder=False)) #Patron descarga data_descarga = scrapertools.find_single_match( data, 'Enlaces de descarga(.*?)<script>') patron = '<div class="fondoenlaces".*?id=".*?_([^"]+)".*?textContent=nice=dm\(c.a\(\'([^\']+)\'' matches = scrapertools.find_multiple_matches(data_descarga, patron) for scrapedserver, scrapedurl in matches: if (scrapedserver == "ul") | (scrapedserver == "uploaded"): scrapedserver = "uploadedto" titulo = scrapedserver.capitalize() if titulo == "Magnet": continue mostrar_server = True if config.get_setting("hidepremium") == "true": mostrar_server = servertools.is_server_enabled(scrapedserver) if mostrar_server: try: servers_module = __import__("servers." + scrapedserver) #Saca numero de enlaces patron = "(dm\(c.a\('" + scrapedurl.replace( "+", "\+") + "'.*?)</div>" data_enlaces = scrapertools.find_single_match( data_descarga, patron) patron = 'dm\(c.a\(\'([^\']+)\'' matches_enlaces = scrapertools.find_multiple_matches( data_enlaces, patron) numero = str(len(matches_enlaces)) if item.category == "": itemlist.append( Item(channel=__channel__, action="enlaces", server="", title=titulo + " - Nº enlaces:" + numero, url=item.url, fulltitle=item.fulltitle, thumbnail=item.thumbnail, fanart=fanart, plot=str(sinopsis), extra=scrapedurl, context="0", contentTitle=item.fulltitle, viewmode="movie_with_plot", folder=True)) except: pass if config.get_library_support() and item.category == "": itemlist.append( Item(channel=__channel__, title="[COLOR green]Añadir enlaces a la biblioteca[/COLOR]", url=item.url, action="add_pelicula_to_library", extra="findvideos", fulltitle=item.fulltitle.strip())) return itemlist
def episodios(item): logger.info("streamondemand.channels.guardaserie episodios") item.title = item.fulltitle itemlist = [] ## Descarga la página data = re.sub(r'\t|\n|\r', '', scrapertools.anti_cloudflare(item.url, headers)) serie_id = scrapertools.get_match(data, '/?id=(\d+)" rel="nofollow"') data = scrapertools.get_match(data, '<div id="episode">(.*?)</div>') seasons_episodes = re.compile( '<select name="episode" id="(\d+)">(.*?)</select>', re.DOTALL).findall(data) for scrapedseason, scrapedepisodes in seasons_episodes: episodes = re.compile('<option value="(\d+)"', re.DOTALL).findall(scrapedepisodes) for scrapedepisode in episodes: season = str(int(scrapedseason) + 1) episode = str(int(scrapedepisode) + 1) if len(episode) == 1: episode = "0" + episode title = season + "x" + episode + " - " + item.title # Le pasamos a 'findvideos' la url con tres partes divididas por el caracter "?" # [host+path]?[argumentos]?[Referer] url = host + "/wp-admin/admin-ajax.php?action=get_episode&id=" + serie_id + "&season=" + scrapedseason + "&episode=" + scrapedepisode + "?" + item.url itemlist.append( Item(channel=__channel__, action="findvideos", title=title, url=url, fulltitle=item.title, show=item.title, thumbnail=item.thumbnail)) if config.get_library_support(): itemlist.append( Item(channel=__channel__, title="[COLOR azure]Aggiungi [/COLOR]" + item.title + "[COLOR azure] alla libreria di Kodi[/COLOR]", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show)) itemlist.append( Item(channel=__channel__, title= "[COLOR azure]Scarica tutti gli episodi della serie[/COLOR]", url=item.url, action="download_all_episodes", extra="episodios", show=item.show)) return itemlist
def episodios(item): def load_episodios(html, item, itemlist, lang_title): patron = '((?:.*?<a href="[^"]+" target="_blank" rel="nofollow">[^<]+</a>)+)' matches = re.compile(patron).findall(html) for data in matches: ## Extrae las entradas scrapedtitle = data.split('<a ')[0] scrapedtitle = re.sub(r'<[^>]*>', '', scrapedtitle).strip() itemlist.append( Item(channel=__channel__, action="findvid_serie", title="[COLOR azure]%s[/COLOR]" % (scrapedtitle + " (" + lang_title + ")"), url=item.url, thumbnail=item.thumbnail, extra=data, fulltitle=item.fulltitle, show=item.show)) logger.info("streamondemand.filmpertutti episodios") itemlist = [] ## Descarga la página data = scrapertools.cache_page(item.url) data = scrapertools.decodeHtmlentities(data) lang_titles = [] starts = [] patron = r"STAGIONE.*?ITA" matches = re.compile(patron, re.IGNORECASE).finditer(data) for match in matches: season_title = match.group() if season_title != '': lang_titles.append('SUB ITA' if 'SUB' in season_title.upper() else 'ITA') starts.append(match.end()) i = 1 len_lang_titles = len(lang_titles) while i <= len_lang_titles: inizio = starts[i - 1] fine = starts[i] if i < len_lang_titles else -1 html = data[inizio:fine] lang_title = lang_titles[i - 1] load_episodios(html, item, itemlist, lang_title) i += 1 if config.get_library_support() and len(itemlist) != 0: itemlist.append( Item(channel=__channel__, title=item.title, url=item.url, action="add_serie_to_library", extra="episodios", show=item.show)) itemlist.append( Item(channel=__channel__, title="Scarica tutti gli episodi della serie", url=item.url, action="download_all_episodes", extra="episodios", show=item.show)) return itemlist
def getmainlist(preferred_thumb=""): logger.info("channelselector.getmainlist") itemlist = list() # Añade los canales que forman el menú principal itemlist.append( Item(title=config.get_localized_string(30119), channel="channelselector", action="getchanneltypes", thumbnail=os.path.join(config.get_runtime_path(), "resources", "images", "main_menu_category.png"), viewmode="movie")) itemlist.append( Item(title=config.get_localized_string(30137), channel="buscadorall", action="mainlist", thumbnail=os.path.join(config.get_runtime_path(), "resources", "images", "main_menu_search.png"), viewmode="movie")) itemlist.append( Item(title="Novità", channel="novedades", action="mainlist", thumbnail=os.path.join(config.get_runtime_path(), "resources", "images", "thumb_novedades.png"), viewmode="movie")) itemlist.append( Item(title=config.get_localized_string(30102), channel="favoritos", action="mainlist", thumbnail=os.path.join(config.get_runtime_path(), "resources", "images", "main_menu_fav.png"), viewmode="movie")) if config.get_library_support(): itemlist.append( Item(title=config.get_localized_string(30131), channel="biblioteca", action="mainlist", thumbnail=os.path.join(config.get_runtime_path(), "resources", "images", "main_menu_library.png"), viewmode="movie")) itemlist.append( Item(title=config.get_localized_string(30101), channel="descargas", action="mainlist", thumbnail=os.path.join(config.get_runtime_path(), "resources", "images", "main_menu_download.png"), viewmode="movie")) if "xbmceden" in config.get_platform(): itemlist.append( Item(title=config.get_localized_string(30100), channel="configuracion", action="mainlist", thumbnail=os.path.join(config.get_runtime_path(), "resources", "images", "main_menu_conf.png"), folder=False, viewmode="movie")) else: itemlist.append( Item(title=config.get_localized_string(30100), channel="configuracion", action="mainlist", thumbnail=os.path.join(config.get_runtime_path(), "resources", "images", "main_menu_conf.png"), viewmode="movie")) itemlist.append( Item(title=config.get_localized_string(30138), channel="update_version", action="update_from_menu", thumbnail=os.path.join(config.get_runtime_path(), "resources", "images", "main_menu_download.png"), viewmode="movie")) itemlist.append( Item(title=config.get_localized_string(30104), channel="ayuda", action="mainlist", thumbnail=os.path.join(config.get_runtime_path(), "resources", "images", "main_menu_help.png"), viewmode="movie")) return itemlist
def findvideos(item): logger.info() itemlist = [] langs = dict() data = httptools.downloadpage(item.url).data logger.debug('data: %s'%data) patron = '<a onclick="return (play\d+).*?;"> (.*?) <\/a>' matches = re.compile(patron, re.DOTALL).findall(data) for key, value in matches: langs[key]=value.strip() patron = 'function (play\d).*?servidores.*?attr.*?src.*?\+([^;]+);' matches = re.compile(patron, re.DOTALL).findall(data) title = item.title enlace = scrapertools.find_single_match(data, 'var e20fb341325556c0fc0145ce10d08a970538987 =.*?"\/your\.".*?"([^"]+)"') for scrapedlang, encurl in matches: if 'e20fb34' in encurl: url = dec(encurl) url = url + enlace else: url = dec(encurl) title = '' server ='' servers = {'/opl':'openload','/your':'yourupload','/sen':'senvid','/face':'netutv','/vk':'vk'} server_id = re.sub(r'.*?embed|\.php.*','',url) if server_id and server_id in servers: server = servers[server_id] logger.debug('server_id: %s'%server_id) if langs[scrapedlang] in list_language: language = IDIOMAS[langs[scrapedlang]] else: language = 'Latino' if langs[scrapedlang] == 'Latino': idioma = '[COLOR limegreen]LATINO[/COLOR]' elif langs[scrapedlang] == 'Sub Español': idioma = '[COLOR red]SUB[/COLOR]' if item.extra == 'peliculas': title = item.contentTitle + ' (' + server + ') '+ idioma plot = scrapertools.find_single_match(data, '<p>([^<]+)<\/p>') else: title = item.contentSerieName + ' (' + server + ') '+ idioma plot = item.plot thumbnail = servertools.guess_server_thumbnail(title) if 'player' not in url and 'php' in url: itemlist.append(item.clone(title=title, url=url, action="play", plot=plot, thumbnail=thumbnail, server = server, quality ='', language = language )) logger.debug('url: %s' % url) # Requerido para FilterTools itemlist = filtertools.get_links(itemlist, item, list_language) # Requerido para AutoPlay autoplay.start(itemlist, item) if config.get_library_support() and len(itemlist) > 0 and item.extra != 'findvideos': itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la biblioteca[/COLOR]', url=item.url, action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle )) return itemlist
def findvideos(item): logger.info() itemlist = list() sublist = list() # Descarga la página data = httptools.downloadpage(item.url).data if not item.plot: item.plot = scrapertoolsV2.find_single_match( data, '>Sinopsis</dt> <dd>([^<]+)</dd>') item.plot = scrapertoolsV2.decodeHtmlentities(item.plot) patron = '<option value="([^"]+)"[^>]+' patron += '>([^<]+).*?</i>([^<]+)' matches = re.compile(patron, re.DOTALL).findall(data) for url, idioma, calidad in matches: sublist.append( item.clone(action="play", url=url, folder=False, text_color=color1, quality=calidad.strip(), language=idioma.strip())) sublist = servertools.get_servers_itemlist( sublist, lambda i: "Ver en %s %s" % (i.server, i.quality), True) # Añadir servidores encontrados, agrupandolos por idioma for k in ["Español", "Latino", "Subtitulado", "Ingles"]: lista_idioma = filter(lambda i: i.language == k, sublist) if lista_idioma: itemlist.append( Item(channel=item.channel, title=k, fanart=item.fanart, folder=False, text_color=color2, text_blod=True, thumbnail=thumbnail_host)) itemlist.extend(lista_idioma) # Insertar items "Buscar trailer" y "Añadir a la biblioteca" if itemlist and item.extra != "library": title = "%s [Buscar trailer]" % (item.contentTitle) itemlist.insert( 0, item.clone(channel="trailertools", action="buscartrailer", text_color=color3, title=title, viewmode="list")) if config.get_library_support(): itemlist.append( Item(channel=item.channel, title="Añadir película a la biblioteca", action="add_pelicula_to_library", url=item.url, text_color="green", contentTitle=item.contentTitle, extra="library", thumbnail=thumbnail_host)) return itemlist
def findvideos(item): logger.info("pelisalacarta.altorrent findvideos") itemlist = [] th = Thread(target=get_art(item)) th.setDaemon(True) th.start() data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) enlaces = scrapertools.find_multiple_matches( data, 'id="modal-quality-\w+"><span>(.*?)</span>.*?class="quality-size">(.*?)</p>.*?href="([^"]+)"' ) for calidad, size, url in enlaces: title = "[COLOR palegreen][B]Torrent[/B][/COLOR]" + " " + "[COLOR chartreuse]" + calidad + "[/COLOR]" + "[COLOR teal] ( [/COLOR]" + "[COLOR forestgreen]" + size + "[/COLOR]" + "[COLOR teal] )[/COLOR]" itemlist.append( Item(channel=item.channel, title=title, url=url, action="play", server="torrent", fanart=item.fanart, thumbnail=item.thumbnail, extra=item.extra, InfoLabels=item.infoLabels, folder=False)) dd = scrapertools.find_single_match( data, 'button-green-download-big".*?href="([^"]+)"><span class="icon-play">') if dd: if item.library: itemlist.append( Item(channel=item.channel, title="[COLOR floralwhite][B]Online[/B][/COLOR]", url=dd, action="dd_y_o", thumbnail="http://imgur.com/mRmBIV4.png", fanart=item.extra.split("|")[0], contentType=item.contentType, extra=item.extra, folder=True)) else: videolist = servertools.find_video_items(data=str(dd)) for video in videolist: icon_server = os.path.join(config.get_runtime_path(), "resources", "images", "servers", "server_" + video.server + ".png") if not os.path.exists(icon_server): icon_server = "" itemlist.append( Item(channel=item.channel, url=video.url, server=video.server, title="[COLOR floralwhite][B]" + video.server + "[/B][/COLOR]", thumbnail=icon_server, fanart=item.extra.split("|")[1], action="play", folder=False)) if item.library and config.get_library_support() and itemlist: infoLabels = { 'tmdb_id': item.infoLabels['tmdb_id'], 'title': item.infoLabels['title'] } itemlist.append( Item(channel=item.channel, title="Añadir esta película a la biblioteca", action="add_pelicula_to_library", url=item.url, infoLabels=infoLabels, text_color="0xFFe5ffcc", thumbnail='http://imgur.com/DNCBjUB.png', extra="library")) return itemlist
def mainlist(item): logger.info() itemlist = list() itemlist.append( Item(channel=CHANNELNAME, title="Preferencias", action="settings", folder=False, thumbnail=get_thumbnail_path("thumb_configuracion_0.png"))) if config.get_setting("plugin_updates_available") == 0: nuevas = "" elif config.get_setting("plugin_updates_available") == 1: nuevas = " (1 nueva)" else: nuevas = " (%s nuevas)" % config.get_setting( "plugin_updates_available") thumb_configuracion = "thumb_configuracion_%s.png" % config.get_setting( "plugin_updates_available") itemlist.append( Item(channel=CHANNELNAME, title="", action="", folder=False, thumbnail=get_thumbnail_path("thumb_configuracion_0.png"))) itemlist.append( Item(channel=CHANNELNAME, title="Ajustes especiales", action="", folder=False, thumbnail=get_thumbnail_path("thumb_configuracion_0.png"))) itemlist.append( Item(channel=CHANNELNAME, title=" Ajustes de Canales", action="menu_channels", folder=True, thumbnail=get_thumbnail_path("thumb_canales.png"))) itemlist.append( Item(channel=CHANNELNAME, title=" Ajustes de Servidores", action="menu_servers", folder=True, thumbnail=get_thumbnail_path("thumb_canales.png"))) itemlist.append( Item(channel="novedades", title=" Ajustes de la sección 'Novedades'", action="menu_opciones", folder=True, thumbnail=get_thumbnail_path("thumb_novedades.png"))) itemlist.append( Item(channel="buscador", title=" Ajustes del buscador global", action="opciones", folder=True, thumbnail=get_thumbnail_path("thumb_buscar.png"))) itemlist.append( Item(channel=CHANNELNAME, title=" Ajustes de descargas", action="channel_config", config="descargas", folder=True, thumbnail=get_thumbnail_path("thumb_descargas.png"))) if config.get_library_support(): itemlist.append( Item(channel="biblioteca", title=" Ajustes de la biblioteca", action="channel_config", folder=True, thumbnail=get_thumbnail_path("thumb_biblioteca.png"))) itemlist.append( Item(channel=CHANNELNAME, title=" Añadir o Actualizar canal/conector desde una URL", action="menu_addchannels")) itemlist.append( Item(channel=CHANNELNAME, action="", title="", folder=False, thumbnail=get_thumbnail_path("thumb_configuracion_0.png"))) itemlist.append( Item(channel=CHANNELNAME, title="Otras herramientas", action="submenu_tools", folder=True, thumbnail=get_thumbnail_path("thumb_configuracion_0.png"))) return itemlist
def episodios(item): def load_episodios(): for data in match.split('<br />'): ## Estrae i contenuti end = data.find('<a ') if end > 0: scrapedtitle = scrapertools.find_single_match( data[:end], '\d+[^\d]+\d+') scrapedtitle = scrapedtitle.replace('×', 'x') itemlist.append( Item(channel=__channel__, action="findvideos", contentType="episode", title=scrapedtitle + " (" + lang_title + ")", url=data, thumbnail=item.thumbnail, extra=item.extra, fulltitle=scrapedtitle + " (" + lang_title + ")" + ' - ' + item.show, show=item.show)) logger.info("[thegroove360.eurostreaming] episodios") itemlist = [] ## Carica la pagina data = httptools.downloadpage(item.url).data patron = r'go_to\":\"([^\"]+)\"' matches = re.compile(patron, re.IGNORECASE).findall(data) if len(matches) > 0: url = matches[0].replace("\/", "/") data = httptools.downloadpage(url).data patron = r"onclick=\"top.location=atob\('([^']+)'\)\"" b64_link = scrapertools.find_single_match(data, patron) if b64_link != '': import base64 data = httptools.downloadpage(base64.b64decode(b64_link)).data patron = r'<a href="(%s/\?p=\d+)">' % host link = scrapertools.find_single_match(data, patron) if link != '': data = httptools.downloadpage(link).data data = scrapertools.decodeHtmlentities(data) patron = r'</span>([^<]+)</div><div class="su-spoiler-content su-clearfix" style="display:none">(.+?)</div></div></div>' matches = re.compile(patron, re.DOTALL).findall(data) for lang_title, match in matches: lang_title = 'SUB ITA' if 'SUB' in lang_title.upper() else 'ITA' load_episodios() patron = '<li><span style="[^"]+"><a onclick="[^"]+" href="[^"]+">([^<]+)</a>(?:</span>\s*<span style="[^"]+"><strong>([^<]+)</strong>)?</span>(.*?)</div>\s*</li>' matches = re.compile(patron, re.DOTALL).findall(data) for lang_title1, lang_title2, match in matches: lang_title = 'SUB ITA' if 'SUB' in (lang_title1 + lang_title2).upper() else 'ITA' load_episodios() if config.get_library_support() and len(itemlist) != 0: itemlist.append( Item(channel=__channel__, title="Aggiungi alla libreria", url=item.url, action="add_serie_to_library", extra="episodios" + "###" + item.extra, show=item.show)) return itemlist
def findvideos(item): logger.info() itemlist = [] data = scrapertools.cache_page(item.url) data = re.sub(r"<!--.*?-->","",data) data = re.sub(r"\n|\r|\t|\s{2}| ","",data) bloque_tab= scrapertools.find_single_match(data,'<div id="verpelicula">(.*?)<div class="tab_container">') patron ='<li><a href="#([^<]+)"><span class="re">\d<\/span><span class="([^<]+)"><\/span><span class=.*?>([^<]+)<\/span>' check= re.compile(patron,re.DOTALL).findall(bloque_tab) servers_data_list = [] patron = '<div id="(tab\d+)" class="tab_content"><script type="text/rocketscript">(\w+)\("([^"]+)"\)</script></div>' matches = re.compile(patron,re.DOTALL).findall(data) if len(matches)==0: patron = '<div id="(tab\d+)" class="tab_content"><script>(\w+)\("([^"]+)"\)</script></div>' matches = re.compile(patron,re.DOTALL).findall(data) for check_tab ,server, id in matches: scrapedplot = scrapertools.get_match(data,'<span class="clms">(.*?)</div></div>') plotformat = re.compile('(.*?:) </span>',re.DOTALL).findall(scrapedplot) scrapedplot = scrapedplot.replace(scrapedplot,bbcode_kodi2html("[COLOR white]"+scrapedplot+"[/COLOR]")) for plot in plotformat: scrapedplot = scrapedplot.replace(plot,bbcode_kodi2html("[COLOR red][B]"+plot+"[/B][/COLOR]")) scrapedplot = scrapedplot.replace("</span>","[CR]") scrapedplot = scrapedplot.replace(":","") if check_tab in str(check): idioma, calidad = scrapertools.find_single_match(str(check),""+check_tab+"', '(.*?)', '(.*?)'") servers_data_list.append ([server,id, idioma, calidad]) url = "http://www.peliculasdk.com/Js/videod.js" data = scrapertools.cachePage(url) data = re.sub(r"\n|\r|\t|\s{2}| ","",data) data = data.replace ('<iframe width="100%" height="400" scrolling="no" frameborder="0"','') patron = 'function (\w+)\(id\).*?' patron+= 'data-src="([^"]+)"' matches = re.compile(patron,re.DOTALL).findall(data) for server, url in matches: for enlace , id, idioma, calidad in servers_data_list: if server ==enlace: video_url = re.sub(r"embed\-|\-.*?x.*?\.html|u\'|\'\(","",str(url)) video_url = re.sub(r"'\+codigo\+'","",video_url) video_url= video_url.replace('embed//','embed/') video_url= video_url + id if "goo.gl" in video_url: try: from unshortenit import unshorten url =unshorten(video_url) video_url = scrapertools.get_match(str(url),"u'([^']+)'") except: continue servertitle = scrapertools.get_match(video_url,'http.*?://(.*?)/') servertitle = servertitle.replace(servertitle,bbcode_kodi2html("[COLOR red]"+servertitle+"[/COLOR]")) servertitle = servertitle.replace("embed.","") servertitle = servertitle.replace("player.","") servertitle = servertitle.replace("api.video.","") servertitle = re.sub(r"hqq.tv|hqq.watch","netu.tv",servertitle) servertitle = servertitle.replace("anonymouse.org","netu.tv") title = bbcode_kodi2html("[COLOR orange]Ver en --[/COLOR]") + servertitle +" "+ idioma +" "+ calidad itemlist.append( Item(channel=item.channel, title =title , url=video_url, action="play", thumbnail=item.category, plot=scrapedplot, fanart=item.show ) ) if item.library and config.get_library_support() and len(itemlist) > 0 : infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'], 'title': item.fulltitle} itemlist.append(Item(channel=item.channel, title="Añadir esta película a la biblioteca", action="add_pelicula_to_library", url=item.url, infoLabels=infoLabels, text_color="0xFFff6666", thumbnail='http://imgur.com/0gyYvuC.png')) return itemlist
def findvideos(item): logger.info("pelisalacarta.channels.pelisplus findvideos") itemlist = [] datas = scrapertools.cache_page(item.url) patron = "<iframe.*?src='([^']+)' frameborder='0' allowfullscreen.*?" matches = re.compile(patron, re.DOTALL).findall(datas) for scrapedurl in matches: if 'elreyxhd' or 'pelisplus.biz' in scrapedurl: data = scrapertools.cachePage(scrapedurl, headers=headers) quote = scrapertools.find_single_match(data, 'sources.*?file.*?http') if quote and "'" in quote: patronr = "file:'([^']+)',label:'([^.*?]+)',type:.*?'.*?}" elif '"' in quote: patronr = 'file:"([^"]+)",label:"([^.*?]+)",type:.*?".*?}' matchesr = re.compile(patronr, re.DOTALL).findall(data) for scrapedurl, scrapedcalidad in matchesr: print scrapedurl + ' ' + scrapedcalidad url = scrapedurl title = item.contentTitle + ' (' + scrapedcalidad + ')' thumbnail = item.thumbnail fanart = item.fanart if (DEBUG): logger.info("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "])") itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, fanart=fanart)) url = scrapedurl from core import servertools itemlist.extend(servertools.find_video_items(data=datas)) for videoitem in itemlist: videoitem.channel = item.channel if videoitem.server != '': videoitem.thumbnail = servertools.guess_server_thumbnail( videoitem.server) else: videoitem.thumbnail = item.thumbnail videoitem.action = 'play' videoitem.fulltitle = item.title if 'redirector' not in videoitem.url and 'youtube' not in videoitem.url: videoitem.title = item.contentTitle + ' (' + videoitem.server + ')' n = 0 for videoitem in itemlist: if 'youtube' in videoitem.url: videoitem.title = '[COLOR orange]Trailer en' + ' (' + videoitem.server + ')[/COLOR]' itemlist[n], itemlist[-1] = itemlist[-1], itemlist[n] n = n + 1 if item.extra == 'findvideos' and 'youtube' in itemlist[-1]: itemlist.pop(1) if 'serie' not in item.url: if config.get_library_support( ) and len(itemlist) > 0 and item.extra != 'findvideos': itemlist.append( Item( channel=item.channel, title= '[COLOR yellow]Añadir esta pelicula a la biblioteca[/COLOR]', url=item.url, action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle)) return itemlist
def findvideos(item): logger.info() if item.extra and item.extra != "findvideos": return epienlaces(item) itemlist = [] item.text_color = color3 data = httptools.downloadpage(item.url).data item.plot = scrapertools.find_single_match( data, 'SINOPSIS(?:</span>|</strong>):(.*?)</p>') year = scrapertools.find_single_match( data, '(?:<span class="bold">|<strong>)AÑO(?:</span>|</strong>):\s*(\d+)') if year != "": try: from core import tmdb item.infoLabels['year'] = year tmdb.set_infoLabels_item(item, __modo_grafico__) except: pass old_format = False #Patron torrent antiguo formato if "Enlaces de descarga</div>" in data: old_format = True matches = scrapertools.find_multiple_matches( data, 'class="separate3 magnet".*?href="([^"]+)"') for scrapedurl in matches: title = "[Torrent] " title += urllib.unquote( scrapertools.find_single_match(scrapedurl, 'dn=(.*?)(?i)WWW.DescargasMix')) itemlist.append( item.clone(action="play", server="torrent", title=title, url=scrapedurl, text_color="green")) #Patron online data_online = scrapertools.find_single_match( data, 'Ver online</div>(.*?)<div class="section-box related-' 'posts">') if data_online: title = "Enlaces Online" if '"l-latino2"' in data_online: title += " [LAT]" elif '"l-esp2"' in data_online: title += " [ESP]" elif '"l-vose2"' in data_online: title += " [VOSE]" patron = 'make_links.*?,[\'"]([^"\']+)["\']' matches = scrapertools.find_multiple_matches(data_online, patron) for i, code in enumerate(matches): enlace = mostrar_enlaces(code) enlaces = servertools.findvideos(data=enlace[0]) if enlaces and not "peliculas.nu" in enlaces: if i == 0: itemlist.append( item.clone(title=title, action="", text_color=color1)) title = " Ver vídeo en " + enlaces[0][2] itemlist.append( item.clone(action="play", server=enlaces[0][2], title=title, url=enlaces[0][1])) scriptg = scrapertools.find_single_match( data, "<script type='text/javascript'>str='([^']+)'") if scriptg: gvideo = urllib.unquote_plus(scriptg.replace("@", "%")) url = scrapertools.find_single_match(gvideo, 'src="([^"]+)"') if url: itemlist.append( item.clone( action="play", server="directo", url=url, title=" Ver vídeo en Googlevideo (Máxima calidad)", extra=item.url)) #Patron descarga patron = '<div class="(?:floatLeft |)double(?:nuevo|)">(.*?)</div>(.*?)' \ '(?:<div(?: id="mirrors"|) class="(?:contentModuleSmall |)mirrors">|<div class="section-box related-posts">)' bloques_descarga = scrapertools.find_multiple_matches(data, patron) for title_bloque, bloque in bloques_descarga: if title_bloque == "Ver online": continue if '"l-latino2"' in bloque: title_bloque += " [LAT]" elif '"l-esp2"' in bloque: title_bloque += " [ESP]" elif '"l-vose2"' in bloque: title_bloque += " [VOSE]" itemlist.append( item.clone(title=title_bloque, action="", text_color=color1)) if '<div class="subiendo">' in bloque: itemlist.append( item.clone(title=" Los enlaces se están subiendo", action="")) continue patron = 'class="separate.*? ([^"]+)".*?(?:make_links.*?,|href=)[\'"]([^"\']+)["\']' matches = scrapertools.find_multiple_matches(bloque, patron) for scrapedserver, scrapedurl in matches: if (scrapedserver == "ul") | (scrapedserver == "uploaded"): scrapedserver = "uploadedto" titulo = scrapedserver.capitalize() if titulo == "Magnet" and old_format: continue elif titulo == "Magnet" and not old_format: title = " Enlace Torrent" itemlist.append( item.clone(action="play", server="torrent", title=title, url=scrapedurl, text_color="green")) continue mostrar_server = True if config.get_setting("hidepremium") == "true": mostrar_server = servertools.is_server_enabled(scrapedserver) if mostrar_server: try: servers_module = __import__("servers." + scrapedserver) #Saca numero de enlaces urls = mostrar_enlaces(scrapedurl) numero = str(len(urls)) titulo = " %s - Nº enlaces:%s" % (titulo, numero) itemlist.append( item.clone(action="enlaces", title=titulo, extra=scrapedurl)) except: pass itemlist.append( item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="", text_color="magenta")) if item.extra != "findvideos" and config.get_library_support(): itemlist.append( Item(channel=item.channel, title="Añadir a la biblioteca", action="add_pelicula_to_library", extra="findvideos", url=item.url, infoLabels={'title': item.fulltitle}, fulltitle=item.fulltitle, text_color="green")) return itemlist
def temporadas(item): logger.info("pelisalacarta.channels.seodiv temporadas") itemlist = [] templist = [] data = scrapertools.cache_page(item.url) url_base = item.url patron = '<a class="collapsed" data-toggle="collapse" data-parent="#accordion" href=.*? aria-expanded="false" aria-controls=.*?>([^<]+)<\/a>' matches = re.compile(patron, re.DOTALL).findall(data) temp = 1 if 'Temporada' in str(matches): for scrapedtitle in matches: url = url_base tempo = re.findall(r'\d+', scrapedtitle) if tempo: title = 'Temporada' + ' ' + tempo[0] else: title = scrapedtitle.lower() thumbnail = item.thumbnail plot = item.plot fanart = scrapertools.find_single_match( data, '<img src="([^"]+)"/>.*?</a>') if (DEBUG): logger.info("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "])") itemlist.append( Item(channel=item.channel, action="episodios", title=title, fulltitle=item.title, url=url, thumbnail=thumbnail, plot=plot, fanart=fanart, temp=str(temp), contentSerieName=item.contentSerieName)) temp = temp + 1 if item.extra == 'temporadas': for tempitem in itemlist: templist += episodios(tempitem) if config.get_library_support() and len(itemlist) > 0: itemlist.append( Item(channel=item.channel, title= '[COLOR yellow]Añadir esta serie a la biblioteca[/COLOR]', url=item.url, action="add_serie_to_library", extra="temporadas", contentSerieName=item.contentSerieName, extra1=item.extra1, temp=str(temp))) if item.extra == 'temporadas': return templist else: return itemlist else: itemlist = episodios(item) if config.get_library_support() and len(itemlist) > 0: itemlist.append( Item(channel=item.channel, title= '[COLOR yellow]Añadir esta serie a la biblioteca[/COLOR]', url=item.url, action="add_serie_to_library", extra="temporadas", contentSerieName=item.contentSerieName, extra1=item.extra1, temp=str(temp))) return itemlist
def completo(item): logger.info("[newpct1.py] completo") itemlist = [] categoryID = "" # Guarda el valor por si son etiquetas para que lo vea 'listadofichas' item_extra = item.extra item_show = item.show item_title = item.title # Lee las entradas if item_extra.startswith("serie"): ultimo_action = "get_episodios" if item.extra != "serie_add": ''' # Afinar mas la busqueda if item_extra=="serie-hd": categoryID=buscar_en_subcategoria(item.show,'1469') elif item_extra=="serie-vo": categoryID=buscar_en_subcategoria(item.show,'775') elif item_extra=="serie-tv": categoryID=buscar_en_subcategoria(item.show,'767') if categoryID !="": item.url=item.url.replace("categoryID=","categoryID="+categoryID) #Fanart oTvdb= TvDb() serieID=oTvdb.get_serieId_by_title(item.show) fanart = oTvdb.get_graphics_by_serieId(serieID) if len(fanart)>0: item.fanart = fanart[0]''' try: from core.tmdb import Tmdb oTmdb = Tmdb(texto_buscado=item.show, tipo="tv", idioma_busqueda="es") item.fanart = oTmdb.get_backdrop() item.plot = oTmdb.get_sinopsis() print item.plot except: pass else: item_title = item.show items_programas = get_episodios(item) else: ultimo_action = "listado" items_programas = listado(item) if len(items_programas) == 0: return itemlist # devolver lista vacia salir = False while not salir: # Saca la URL de la siguiente página ultimo_item = items_programas[len(items_programas) - 1] # Páginas intermedias if ultimo_item.action == ultimo_action: # Quita el elemento de "Página siguiente" ultimo_item = items_programas.pop() # Añade las entradas de la página a la lista completa itemlist.extend(items_programas) # Carga la siguiente página ultimo_item.extra = item_extra ultimo_item.show = item_show ultimo_item.title = item_title logger.info("[newpct1.py] completo url=" + ultimo_item.url) if item_extra.startswith("serie"): items_programas = get_episodios(ultimo_item) else: items_programas = listado(ultimo_item) # Última página else: # Añade a la lista completa y sale itemlist.extend(items_programas) salir = True if (config.get_library_support() and len(itemlist) > 0 and item.extra.startswith("serie")): itemlist.append( Item(channel=item.channel, title="Añadir esta serie a la biblioteca", url=item.url, action="add_serie_to_library", extra="completo###serie_add", show=item.show)) logger.info("[newpct1.py] completo items=" + str(len(itemlist))) return itemlist
def episodios(item): def load_episodios(html, item, itemlist, lang_title): patron = '.*?<a href="[^"]+"[^o]+ofollow[^>]+>[^<]+</a><(?:b|/)[^>]+>' matches = re.compile(patron).findall(html) for data in matches: # Extrae las entradas scrapedtitle = data.split('<a ')[0] scrapedtitle = re.sub(r'<[^>]*>', '', scrapedtitle).strip() if scrapedtitle != 'Categorie': scrapedtitle = scrapedtitle.replace('×', 'x') scrapedtitle = scrapedtitle.replace('×', 'x') itemlist.append( Item(channel=__channel__, action="findvideos", title="[COLOR azure]%s[/COLOR]" % (scrapedtitle + " (" + lang_title + ")"), url=data, thumbnail=item.thumbnail, extra=item.extra, fulltitle=scrapedtitle + " (" + lang_title + ")" + ' - ' + item.show, show=item.show)) logger.info("[casacinema.py] episodios") itemlist = [] # Descarga la página data = scrapertools.cache_page(item.url) data = scrapertools.decodeHtmlentities(data) data = scrapertools.get_match(data, '<p><strong>(.*?)<div id="disqus_thread">') lang_titles = [] starts = [] patron = r"Stagione.*?ITA" matches = re.compile(patron, re.IGNORECASE).finditer(data) for match in matches: season_title = match.group() if season_title != '': lang_titles.append('SUB ITA' if 'SUB' in season_title.upper() else 'ITA') starts.append(match.end()) i = 1 len_lang_titles = len(lang_titles) while i <= len_lang_titles: inizio = starts[i - 1] fine = starts[i] if i < len_lang_titles else -1 html = data[inizio:fine] lang_title = lang_titles[i - 1] load_episodios(html, item, itemlist, lang_title) i += 1 if config.get_library_support() and len(itemlist) != 0: itemlist.append( Item(channel=__channel__, title=item.title, url=item.url, action="add_serie_to_library", extra="episodios" + "###" + item.extra, show=item.show)) itemlist.append( Item(channel=__channel__, title="Scarica tutti gli episodi della serie", url=item.url, action="download_all_episodes", extra="episodios" + "###" + item.extra, show=item.show)) return itemlist
def episodios(item): logger.info("pelisalacarta.seriesblanco episodios") itemlist = [] # Descarga la página data = scrapertools.cache_page(item.url) data = re.sub(r"\n|\r|\t|\s{2}| |<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data) data = re.sub(r"<!--.*?-->", "", data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") data = re.sub(r"a></td><td> <img src=/banderas/", "a><idioma/", data) data = re.sub(r"<img src=/banderas/", "|", data) data = re.sub(r"\s\|", "|", data) data = re.sub(r"\.png border='\d+' height='\d+' width='\d+'[^>]+><", "/idioma><", data) data = re.sub(r"\.png border='\d+' height='\d+' width='\d+'[^>]+>", "", data) patron = "<img id='port_serie' src='([^']+)'.*?<li data-content=\"settings\"><p>(.*?)</p>" matches = re.compile(patron, re.DOTALL).findall(data) thumbnail = "" plot = "" for scrapedthumbnail, scrapedplot in matches: thumbnail = scrapedthumbnail plot = scrapedplot ''' <td> <a href='/serie/534/temporada-1/capitulo-00/the-big-bang-theory.html'>1x00 - Capitulo 00 </a> </td> <td> <img src=/banderas/vo.png border='0' height='15' width='25' /> <img src=/banderas/vos.png border='0' height='15' width='25' /> </td> ''' patron = "<a href='([^']+)'>([^<]+)</a><idioma/([^/]+)/idioma>" matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle, scrapedidioma in matches: idioma = "" for i in scrapedidioma.split("|"): idioma += " [" + IDIOMAS.get(i, "OVOS") + "]" title = item.show + " - " + scrapedtitle + idioma itemlist.append( Item(channel=item.channel, title=title, url=urlparse.urljoin(HOST, scrapedurl), action="findvideos", show=item.show, thumbnail=thumbnail, plot=plot, language=idioma, list_idiomas=list_idiomas, list_calidad=CALIDADES, context=CONTEXT)) if len(itemlist) == 0 and "<title>404 Not Found</title>" in data: itemlist.append( Item(channel=item.channel, title="la url '" + item.url + "' parece no estar disponible en la web. Iténtalo más tarde.", url=item.url, action="series")) if len(itemlist) > 0 and OPCION_FILTRO: itemlist = filtertools.get_filtered_links(itemlist, item.channel) # Opción "Añadir esta serie a la biblioteca de XBMC" if config.get_library_support() and len(itemlist) > 0: itemlist.append( Item(channel=item.channel, title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show)) return itemlist
def episodios(item, final=True): logger.info() # Descarga la pagina body = httptools.downloadpage(item.url).data try: scrapedplot = scrapertools.get_match( body, '<meta name="description" content="([^"]+)"') except: pass try: scrapedthumbnail = scrapertools.get_match( body, '<link rel="image_src" href="([^"]+)"') except: pass data = scrapertools.get_match(body, '<ul id="listado">(.*?)</ul>') patron = '<li><a href="([^"]+)">(.*?)</a></li>' matches = re.compile(patron, re.DOTALL).findall(data) itemlist = [] for url, title in matches: scrapedtitle = scrapertools.htmlclean(title) try: episodio = scrapertools.get_match(scrapedtitle, "Capítulo\s+(\d+)") titulo_limpio = re.compile("Capítulo\s+(\d+)\s+", re.DOTALL).sub("", scrapedtitle) if len(episodio) == 1: scrapedtitle = "1x0" + episodio + " - " + titulo_limpio else: scrapedtitle = "1x" + episodio + " - " + titulo_limpio except: pass scrapedurl = urlparse.urljoin(item.url, url) # scrapedthumbnail = "" # scrapedplot = "" logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, show=item.show)) try: next_page = scrapertools.get_match(body, '<a href="([^"]+)">\>\;</a>') next_page = urlparse.urljoin(item.url, next_page) item2 = Item(channel=item.channel, action="episodios", title=item.title, url=next_page, thumbnail=item.thumbnail, plot=item.plot, show=item.show, viewmode="movie_with_plot") itemlist.extend(episodios(item2, final=False)) except: import traceback logger.error(traceback.format_exc()) if final and config.get_library_support(): itemlist.append( Item(channel=item.channel, title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show)) itemlist.append( Item(channel=item.channel, title="Descargar todos los episodios de la serie", url=item.url, action="download_all_episodes", extra="episodios", show=item.show)) return itemlist