示例#1
0
def findvideos(item):
    logger.info ("pelisalacarta.channels.metaserie findvideos")
    itemlist=[]
    audio = {'la':'[COLOR limegreen]LATINO[/COLOR]','es':'[COLOR yellow]ESPAÑOL[/COLOR]','sub':'[COLOR red]ORIGINAL SUBTITULADO[/COLOR]'}
    data=scrapertools.cache_page(item.url)
    patron ='<td><img src="http:\/\/metaserie\.com\/wp-content\/themes\/mstheme\/gt\/assets\/img\/([^\.]+).png" width="20".*?<\/td>.*?<td><img src="http:\/\/www\.google\.com\/s2\/favicons\?domain=([^"]+)" \/>&nbsp;([^<]+)<\/td>'
    matches = re.compile(patron,re.DOTALL).findall(data)
    
    anterior = scrapertools.find_single_match(data,'<th scope="col"><a href="([^"]+)" rel="prev" class="local-link">Anterior</a></th>')
    siguiente = scrapertools.find_single_match(data,'<th scope="col"><a href="([^"]+)" rel="next" class="local-link">Siguiente</a></th>')
    #titulo = scrapertools.find_single_match(data,'<h1 class="entry-title">([^<]+)</h1>		</header>')
    #titulo = titulo.encode('utf-8')

    for scrapedid, scrapedurl, scrapedserv in matches:
        url = scrapedurl
        title = item.title+' audio '+audio[scrapedid]+' en '+scrapedserv
        extra = item.thumbnail
        thumbnail = servertools.guess_server_thumbnail(scrapedserv)
        if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"])")
        itemlist.append( Item(channel=item.channel, action="play" , title=title, fulltitle=item.contentSerieName, url=url, thumbnail=thumbnail, extra=extra, folder= True))
    if item.extra1 != 'capitulos':
        if anterior !='':
            itemlist.append( Item(channel=item.channel, action="findvideos" , title='Capitulo Anterior' , url=anterior, thumbnail='https://s31.postimg.org/k5kpwyrgb/anterior.png', folder =True ))
        if siguiente !='':
            itemlist.append( Item(channel=item.channel, action="findvideos" , title='Capitulo Siguiente' , url=siguiente, thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png', folder =True ))
    return itemlist
示例#2
0
def findvideos(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    patron ='href="([^"]+)".*?domain=.*?>([^<]+).*?gold">([^<]+)<'
    matches = re.compile(patron,re.DOTALL).findall(data)

    for scrapedurl, scrapedserver, scrapedidioma in matches:
    	url = scrapedurl
        idioma = audio[scrapedidioma]
        title = item.contentSerieName+' '+str(item.contentSeasonNumber)+'x'+str(item.contentEpisodeNumber)+' '+idioma+' ('+scrapedserver.strip(' ')+')'
        if scrapedidioma == item.extra1 or item.extra1 == 'all':
           itemlist.append(item.clone(title=title, url=url, action="play", language=idioma,
                                      server = scrapedserver.strip(), fulltitle = item.ContentSeriename))
    
    for videoitem in itemlist:
        videoitem.infoLabels = item.infoLabels
        videoitem.thumbnail = servertools.guess_server_thumbnail(videoitem.server)
        

               
        

    return itemlist
示例#3
0
def findvideos(item):
    logger.info ("pelisalacarta.channels.metaserie findvideos")
    itemlist=[]
    audio = {'la':'[COLOR limegreen]LATINO[/COLOR]','es':'[COLOR yellow]ESPAÑOL[/COLOR]','sub':'[COLOR red]ORIGINAL SUBTITULADO[/COLOR]'}
    data=scrapertools.cache_page(item.url)
    patron ='<td><img src="http:\/\/metaserie\.com\/wp-content\/themes\/mstheme\/gt\/assets\/img\/([^\.]+).png" width="20".*?<\/td>.*?<td><img src="http:\/\/www\.google\.com\/s2\/favicons\?domain=([^"]+)" \/>&nbsp;([^<]+)<\/td>'
    matches = re.compile(patron,re.DOTALL).findall(data)
    
    anterior = scrapertools.find_single_match(data,'<th scope="col"><a href="([^"]+)" rel="prev" class="local-link">Anterior</a></th>')
    siguiente = scrapertools.find_single_match(data,'<th scope="col"><a href="([^"]+)" rel="next" class="local-link">Siguiente</a></th>')
    titulo = scrapertools.find_single_match(data,'<h1 class="entry-title">([^<]+)</h1>		</header>')
    

    for scrapedid, scrapedurl, scrapedserv in matches:
        url = scrapedurl
        title = titulo+' audio '+audio[scrapedid]+' en '+scrapedserv
        extra = item.thumbnail
        thumbnail = servertools.guess_server_thumbnail(scrapedserv)
        if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"])")
        itemlist.append( Item(channel=item.channel, action="play" , title=title, fulltitle=titulo, url=url, thumbnail=thumbnail, extra=extra))

    if anterior !='':
        itemlist.append( Item(channel=item.channel, action="findvideos" , title='Capitulo Anterior' , url=anterior, thumbnail='https://s31.postimg.org/k5kpwyrgb/anterior.png', folder ="true" ))
    if siguiente !='':
        itemlist.append( Item(channel=item.channel, action="findvideos" , title='Capitulo Siguiente' , url=siguiente, thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png', folder ="true" ))
    return itemlist
示例#4
0
def findvideos(item):
    servidor = {"http://uptobox.com/":"uptobox","http://userscloud.com/":"userscloud","https://my.pcloud.com/publink/show?code=":"pcloud","http://thevideos.tv/":"thevideos","http://ul.to/":"uploadedto","http://turbobit.net/":"turbobit","http://www.cinecalidad.com/protect/v.html?i=":"cinecalidad","http://www.mediafire.com/download/":"mediafire","https://www.youtube.com/watch?v=":"youtube","http://thevideos.tv/embed-":"thevideos","//www.youtube.com/embed/":"youtube","http://ok.ru/video/":"okru","http://ok.ru/videoembed/":"okru","http://www.cinemaqualidade.com/protect/v.html?i=":"cinemaqualidade.com","http://usersfiles.com/":"usersfiles","https://depositfiles.com/files/":"depositfiles","http://www.nowvideo.sx/video/":"nowvideo","http://vidbull.com/":"vidbull","http://filescdn.com/":"filescdn","https://www.yourupload.com/watch/":"yourupload"}
    logger.info()
    itemlist=[]
    duplicados=[]
    data = httptools.downloadpage(item.url).data
    
    patron = 'dec\("([^"]+)"\)\+dec\("([^"]+)"\)'
    matches = re.compile(patron,re.DOTALL).findall(data)
    recomendados = ["uptobox","thevideos","nowvideo","pcloud"]
    for scrapedurl,scrapedtitle in matches:
        if dec(scrapedurl) in servidor:
          title = "Ver "+item.contentTitle+" en "+servidor[dec(scrapedurl)].upper()
          if 'yourupload' in dec(scrapedurl):
            url = dec(scrapedurl).replace('watch','embed')+dec(scrapedtitle)
          else:

            if 'youtube' in dec(scrapedurl):
                title='[COLOR orange]Trailer en Youtube[/COLOR]'
            url = dec(scrapedurl)+dec(scrapedtitle)

          
          if (servidor[dec(scrapedurl)]) in recomendados:
            title=title+"[COLOR limegreen] [I] (Recomedado) [/I] [/COLOR]"
          thumbnail = servertools.guess_server_thumbnail(servidor[dec(scrapedurl)])
          plot = ""
          if title not in duplicados:
            itemlist.append( Item(channel=item.channel, action="play" , title=title ,fulltitle = item.title, url=url, thumbnail=thumbnail, plot=plot,extra=item.thumbnail, server=servidor[dec(scrapedurl)]))
          duplicados.append(title)
    if config.get_library_support() and len(itemlist) > 0 and item.extra !='findvideos' :
        itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la biblioteca[/COLOR]', url=item.url,
                             action="add_pelicula_to_library", extra="findvideos", contentTitle = item.contentTitle))
    
    return itemlist
示例#5
0
def findvideos(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    video_items = servertools.find_video_items(item)

    for videoitem in video_items:
        videoitem.thumbnail = servertools.guess_server_thumbnail(videoitem.server)
        videoitem.language = scrapertools.find_single_match(data, '<span class="f-info-title">Idioma:<\/span>\s*<span '
                                                                  'class="f-info-text">(.*?)<\/span>')
        videoitem.title = item.contentSerieName + ' (' + videoitem.server + ') (' + videoitem.language + ')'
        videoitem.quality = 'default'
        videoitem.context = item.context
        itemlist.append(videoitem)

    # Requerido para FilterTools

    if len(itemlist) > 0 and filtertools.context:
        itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    return itemlist
示例#6
0
def play(item):
    logger.info("channels.peliculasaudiolatino play")
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = scrapertools.find_single_match(data,
                                          'src="(' + HOST + '/show/[^"]+)"')
    data = httptools.downloadpage(
        data,
        headers=[['User-Agent', 'Mozilla/5.0'],
                 ['Accept-Encoding', 'gzip, deflate'], ['Referer', HOST],
                 ['Connection', 'keep-alive']]).data
    videoUrl = scrapertools.find_single_match(data, '<IFRAME SRC="([^"]+)"')
    goo = scrapertools.find_single_match(videoUrl, '://([^/]+)/')
    if (goo == 'goo.gl'):
        videoUrl = httptools.downloadpage(
            videoUrl, follow_redirects=False,
            only_headers=True).headers["location"]
        server = scrapertools.find_single_match(videoUrl, '://([^/]+)/')
    #logger.info("videoUrl = "+videoUrl)
    enlaces = servertools.findvideos(videoUrl)
    if enlaces:
        thumbnail = servertools.guess_server_thumbnail(videoUrl)
        # Añade al listado de XBMC
        itemlist.append(
            Item(channel=item.channel,
                 action="play",
                 title=item.title,
                 fulltitle=item.fulltitle,
                 url=enlaces[0][1],
                 server=enlaces[0][2],
                 thumbnail=thumbnail,
                 folder=False))

    return itemlist
示例#7
0
def findvideos(item):
    servidor = {"http://uptobox.com/":"uptobox","http://userscloud.com/":"userscloud","https://my.pcloud.com/publink/show?code=":"pcloud","http://thevideos.tv/":"thevideos","http://ul.to/":"uploadedto","http://turbobit.net/":"turbobit","http://www.cinecalidad.com/protect/v.html?i=":"cinecalidad","http://www.mediafire.com/download/":"mediafire","https://www.youtube.com/watch?v=":"youtube","http://thevideos.tv/embed-":"thevideos","//www.youtube.com/embed/":"youtube","http://ok.ru/video/":"okru","http://ok.ru/videoembed/":"okru","http://www.cinemaqualidade.com/protect/v.html?i=":"cinemaqualidade.com","http://usersfiles.com/":"usersfiles","https://depositfiles.com/files/":"depositfiles","http://www.nowvideo.sx/video/":"nowvideo","http://vidbull.com/":"vidbull"}
    logger.info("pelisalacarta.channels.cinecalidad links")
    itemlist=[]
    data = scrapertools.cache_page(item.url)
    
#   {h=dec("111 123 123 119 65 54 54 124 119 123 118 105 118 127 53 106 118 116 54")+dec("114 114 110 115 110 55 121 117 64 120 120 115");}    
    patron = 'dec\("([^"]+)"\)\+dec\("([^"]+)"\)'
    matches = re.compile(patron,re.DOTALL).findall(data)
    recomendados = ["uptobox","thevideos","nowvideo","pcloud"]
    for scrapedurl,scrapedtitle in matches:
        if dec(scrapedurl) in servidor: 
           url = dec(scrapedurl)+dec(scrapedtitle)
           title = "Ver "+item.contentTitle+" en "+servidor[dec(scrapedurl)].upper()
           if (servidor[dec(scrapedurl)]) in recomendados:
              title=title+"[COLOR limegreen] [I] (Recomedado) [/I] [/COLOR]"
#           if (servidor[dec(scrapedurl)])=='pcloud':
#              thumbnail='https://pbs.twimg.com/profile_images/687592526694473728/bCQCZC7b.png'
#           else:
           thumbnail = servertools.guess_server_thumbnail(servidor[dec(scrapedurl)])
           plot = ""
           if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"])")
           itemlist.append( Item(channel=item.channel, action="play" , title=title ,fulltitle = item.title, url=url, thumbnail=thumbnail, plot=plot,extra=item.thumbnail, server=servidor[dec(scrapedurl)]))
    
    if config.get_library_support() and len(itemlist) > 0 and item.extra !='findvideos' :
        itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la biblioteca[/COLOR]', url=item.url,
                             action="add_pelicula_to_library", extra="findvideos", contentTitle = item.contentTitle))
    return itemlist
示例#8
0
def findvideos(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)

    patron = '<iframe class=metaframe rptss src=(.*?) frameborder=0 allowfullscreen><\/iframe>'
    matches = matches = re.compile(patron, re.DOTALL).findall(data)
    for videoitem in matches:
        itemlist.extend(servertools.find_video_items(data=videoitem))

    for videoitem in itemlist:
        videoitem.channel = item.channel
        videoitem.action = 'play'
        videoitem.thumbnail = servertools.guess_server_thumbnail(
            videoitem.server)
        videoitem.infoLabels = item.infoLabels
        videoitem.title = item.contentTitle + ' (' + videoitem.server + ')'
        if 'youtube' in videoitem.url:
            videoitem.title = '[COLOR orange]Trailer en Youtube[/COLOR]'

    if config.get_library_support(
    ) and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(channel=item.channel,
                 title=
                 '[COLOR yellow]Añadir esta pelicula a la biblioteca[/COLOR]',
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 contentTitle=item.contentTitle))
    return itemlist
示例#9
0
def findvideos(item):
    logger.info()

    itemlist = []

    data = httptools.downloadpage(item.url).data
    patron = 'function play.*?servidores.*?attr.*?src.*?\+([^;]+);'
    matches = re.compile(patron, re.DOTALL).findall(data)
    title = item.title
    enlace = scrapertools.find_single_match(
        data,
        'var e20fb341325556c0fc0145ce10d08a970538987 =.*?"\/your\.".*?"([^"]+)"'
    )

    for encurl in matches:

        if 'e20fb34' in encurl:
            url = dec(encurl)
            url = url + enlace

        else:
            url = dec(encurl)
        title = ''
        server = ''
        if '/opl.' in url:
            server = 'Openload'
        elif '/your' in url:
            server = 'Yourupload'
        elif '/sen.' in url:
            server = 'Sendvid'

        if item.extra == 'peliculas':
            title = item.contentTitle + ' (' + server + ')'
            plot = scrapertools.find_single_match(data, '<p>([^<]+)<\/p>')
        else:
            title = item.contentSerieName + ' (' + server + ')'
            plot = item.plot

        thumbnail = servertools.guess_server_thumbnail(title)

        if 'player' not in url:
            itemlist.append(
                item.clone(title=title,
                           url=url,
                           action="play",
                           plot=plot,
                           thumbnail=thumbnail))

    if config.get_library_support(
    ) and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(channel=item.channel,
                 title=
                 '[COLOR yellow]Añadir esta pelicula a la biblioteca[/COLOR]',
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 contentTitle=item.contentTitle))

    return itemlist
示例#10
0
def findvideos(item):
    servidor = {
        "http://uptobox.com/": "uptobox",
        "http://userscloud.com/": "userscloud",
        "https://my.pcloud.com/publink/show?code=": "pcloud",
        "http://thevideos.tv/": "thevideos",
        "http://ul.to/": "uploadedto",
        "http://turbobit.net/": "turbobit",
        "http://www.cinecalidad.com/protect/v.html?i=": "cinecalidad",
        "http://www.mediafire.com/download/": "mediafire",
        "https://www.youtube.com/watch?v=": "youtube",
        "http://thevideos.tv/embed-": "thevideos",
        "//www.youtube.com/embed/": "youtube",
        "http://ok.ru/video/": "okru",
        "http://ok.ru/videoembed/": "okru",
        "http://www.cinemaqualidade.com/protect/v.html?i=":
        "cinemaqualidade.com",
        "http://usersfiles.com/": "usersfiles",
        "https://depositfiles.com/files/": "depositfiles",
        "http://www.nowvideo.sx/video/": "nowvideo",
        "http://vidbull.com/": "vidbull"
    }
    logger.info("pelisalacarta.channels.cinecalidad links")
    itemlist = []
    data = scrapertools.cache_page(item.url)

    #   {h=dec("111 123 123 119 65 54 54 124 119 123 118 105 118 127 53 106 118 116 54")+dec("114 114 110 115 110 55 121 117 64 120 120 115");}
    patron = 'dec\("([^"]+)"\)\+dec\("([^"]+)"\)'
    matches = re.compile(patron, re.DOTALL).findall(data)
    recomendados = ["uptobox", "thevideos", "nowvideo", "pcloud"]
    for scrapedurl, scrapedtitle in matches:
        if dec(scrapedurl) in servidor:
            url = dec(scrapedurl) + dec(scrapedtitle)
            title = "Ver " + item.title + " en " + servidor[dec(
                scrapedurl)].upper()
            if (servidor[dec(scrapedurl)]) in recomendados:
                title = title + "[COLOR limegreen] [I] (Recomedado) [/I] [/COLOR]"


#           if (servidor[dec(scrapedurl)])=='pcloud':
#              thumbnail='https://pbs.twimg.com/profile_images/687592526694473728/bCQCZC7b.png'
#           else:
            thumbnail = servertools.guess_server_thumbnail(
                servidor[dec(scrapedurl)])
            plot = ""
            if (DEBUG):
                logger.info("title=[" + title + "], url=[" + url +
                            "], thumbnail=[" + thumbnail + "])")
            itemlist.append(
                Item(channel=__channel__,
                     action="play",
                     title=title,
                     fulltitle=item.title,
                     url=url,
                     thumbnail=thumbnail,
                     plot=plot,
                     extra=item.thumbnail,
                     server=servidor[dec(scrapedurl)]))
    return itemlist
示例#11
0
def play(item):
    logger.info("pelisalacarta.channels.sipeliculas play")
    itemlist=[]

    video = httptools.downloadpage(host+'/ajax.public.php','acc=ver_opc&f='+item.extra).data
    logger.info("video="+video)
    enlaces = servertools.findvideos(video)
    if enlaces:
    	logger.info("server="+enlaces[0][2])
    	thumbnail = servertools.guess_server_thumbnail(video)
    	# Añade al listado de XBMC
    	itemlist.append( Item(channel=item.channel, action="play", title=item.title , fulltitle=item.fulltitle, url=enlaces[0][1] , server=enlaces[0][2], thumbnail=thumbnail, folder=False) )
    
    return itemlist	
示例#12
0
def play(item):
    logger.info()
    itemlist=[]

    player = httptools.downloadpage(item.url,item.extra).data
    video = scrapertools.find_single_match(player,'<iframe class="embed-responsive-item" src="([^"]+)"')
    #logger.info("video="+video)
    enlaces = servertools.findvideos(video)
    if enlaces:    	
    	thumbnail = servertools.guess_server_thumbnail(video)
    	# Añade al listado de XBMC
    	itemlist.append( Item(channel=item.channel, action="play", title=item.title , fulltitle=item.fulltitle, url=enlaces[0][1] , server=enlaces[0][2], thumbnail=thumbnail, folder=False) )
    
    return itemlist	
示例#13
0
def findvideos(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)

    patron = '<iframe class=metaframe rptss src=(.*?) frameborder=0 allowfullscreen><\/iframe>'
    matches = matches = re.compile(patron, re.DOTALL).findall(data)

    for video_url in matches:

        # TODO Reparar directos
        # if 'stream' in video_url:
        #     data = httptools.downloadpage('https:'+video_url).data
        #     new_url=scrapertools.find_single_match(data, 'iframe src="(.*?)"')
        #     new_data = httptools.downloadpage(new_url).data
        #     logger.debug(new_data)
        #
        #     url, quality = scrapertools.find_single_match(new_data, "file:'(.*?)',label:'(.*?)'")
        #     headers_string = '|Referer=%s' % url
        #     url = url.replace('download', 'preview')+headers_string
        #     sub = scrapertools.find_single_match(new_data, "file:.*?'(.*?srt)'")
        #     new_item = (Item(title=item.title, url=url, quality=quality, server='directo',
        #                      subtitle=sub))
        #     itemlist.append(new_item)
        # else:
        itemlist.extend(servertools.find_video_items(data=video_url))

    for videoitem in itemlist:
        videoitem.channel = item.channel
        videoitem.action = 'play'
        videoitem.thumbnail = servertools.guess_server_thumbnail(
            videoitem.server)
        videoitem.infoLabels = item.infoLabels
        videoitem.title = item.contentTitle + ' (' + videoitem.server + ')'
        if 'youtube' in videoitem.url:
            videoitem.title = '[COLOR orange]Trailer en Youtube[/COLOR]'

    if config.get_videolibrary_support(
    ) and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(channel=item.channel,
                 title=
                 '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 contentTitle=item.contentTitle))
    return itemlist
示例#14
0
def findvideos(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    video_items = servertools.find_video_items(item)

    for videoitem in video_items:
        videoitem.thumbnail = servertools.guess_server_thumbnail(videoitem.server)
        videoitem.language = scrapertools.find_single_match(data, '<span class="f-info-title">Idioma:<\/span>\s*<span '
                                                                  'class="f-info-text">(.*?)<\/span>')
        videoitem.title = item.contentSerieName + ' (' + videoitem.server + ') (' + videoitem.language + ')'
        videoitem.quality = 'default'
        itemlist.append(videoitem)

    return itemlist
示例#15
0
def findvideos(item):
	logger.info()
	
	itemlist =[]

	data = httptools.downloadpage(item.url).data
	patron = 'function play.*?servidores.*?attr.*?src.*?\+([^;]+);'
	matches = re.compile(patron,re.DOTALL).findall(data)
	title = item.title
	enlace = scrapertools.find_single_match(data,'var e20fb341325556c0fc0145ce10d08a970538987 =.*?"\/your\.".*?"([^"]+)"')        

	for encurl in matches:
	
		if 'e20fb34' in encurl:
		   url= dec(encurl)
		   url=url+enlace

		else:
		   url=dec(encurl)
		title =''
		server=''
		if '/opl.' in url:
			server='Openload'
		elif '/your' in url:
			server='Yourupload'
		elif '/sen.'in url:
			server='Sendvid'

		if item.extra == 'peliculas':
		    title = item.contentTitle+' ('+server+')'
		    plot = scrapertools.find_single_match(data,'<p>([^<]+)<\/p>')
		else:
			title = item.contentSerieName+' ('+server+')'
			plot = item.plot

		thumbnail = servertools.guess_server_thumbnail(title)	
		
		if 'player' not in url:
			itemlist.append(item.clone(title=title, url=url, action="play",plot=plot, thumbnail=thumbnail))

	if config.get_library_support() and len(itemlist) > 0 and item.extra !='findvideos':
		itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la biblioteca[/COLOR]', url=item.url, 
			action="add_pelicula_to_library", extra="findvideos", contentTitle = item.contentTitle))

	
			

	return itemlist
示例#16
0
def play(item):
    logger.info()
    itemlist = []

    video = httptools.downloadpage(host + '/ajax.public.php', 'acc=ver_opc&f=' + item.extra).data
    logger.info("video=" + video)
    enlaces = servertools.findvideos(video)
    if enlaces:
        logger.info("server=" + enlaces[0][2])
        thumbnail = servertools.guess_server_thumbnail(video)
        # Añade al listado de XBMC
        itemlist.append(
            Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=enlaces[0][1],
                 server=enlaces[0][2], thumbnail=thumbnail, folder=False))

    return itemlist
示例#17
0
def findvideos(item):
    logger.info("pelisalacarta.channels.verpeliculasnuevas findvideos")
    itemlist = []
    data = scrapertools.cache_page(item.url)
    data = re.sub(r"'|\n|\r|\t|&nbsp;|<br>", "", data)

    patron = 'class="servidor" alt=""> ([^<]+)<\/span><span style="width: 40px;">([^<]+)<\/span><a class="verLink" rel="nofollow" href="([^"]+)" target="_blank"> <img title="Ver online gratis"'
    matches = matches = re.compile(patron, re.DOTALL).findall(data)
    for scrapedidioma, scrapedcalidad, scrapedurl in matches:

        scrapedidioma = scrapertools.decodeHtmlentities(scrapedidioma)

        scrapedcalidad = scrapertools.decodeHtmlentities(scrapedcalidad)
        if scrapedidioma.lower() == 'español':
            scrapedidioma = 'castellano'
        scrapedidioma = scrapedidioma.lower()
        idioma = taudio[scrapedidioma.lower()]
        calidad = tcalidad[scrapedcalidad.lower()]
        url = scrapedurl
        itemlist.append(
            Item(channel=item.channel,
                 action='play',
                 idioma=idioma,
                 calidad=calidad,
                 url=url))

    for videoitem in itemlist:
        videoitem.channel = item.channel
        videoitem.folder = False
        videoitem.thumbnail = servertools.guess_server_thumbnail(videoitem.url)
        videoitem.fulltitle = item.title
        videoitem.title = item.contentTitle + ' | ' + videoitem.calidad + ' | ' + videoitem.idioma
        videoitem.server = servertools.get_server_from_url(videoitem.url)

    if config.get_library_support(
    ) and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(channel=item.channel,
                 title=
                 '[COLOR yellow]Añadir esta pelicula a la biblioteca[/COLOR]',
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 contentTitle=item.contentTitle))
    return itemlist
示例#18
0
def findvideos(item):
    logger.info()
    itemlist = []
    lang = []
    data = httptools.downloadpage(item.url).data
    video_items = servertools.find_video_items(item)
    data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
    language_items = scrapertools.find_single_match(
        data, '<ul class=tabs-sidebar-ul>(.+?)<\/ul>')
    matches = scrapertools.find_multiple_matches(
        language_items, '<li><a href=#ts(.+?)><span>(.+?)<\/span><\/a><\/li>')
    for idl, scrapedlang in matches:
        if int(idl) < 5 and int(idl) != 1:
            lang.append(scrapedlang)
    i = 0
    if len(lang) != 0:
        lang.reverse()
    for videoitem in video_items:
        videoitem.thumbnail = servertools.guess_server_thumbnail(
            videoitem.server)
        if i < len(lang) and len(lang) != 0:
            videoitem.language = lang[i]
        else:
            videoitem.language = scrapertools.find_single_match(
                data, '<span class=f-info-title>Idioma:<\/span>\s*<span '
                'class=f-info-text>(.*?)<\/span>')
        videoitem.title = item.contentSerieName + ' (' + videoitem.server + ') (' + videoitem.language + ')'
        videoitem.quality = 'default'
        videoitem.context = item.context
        i = i + 1
        itemlist.append(videoitem)

    # Requerido para FilterTools

    if len(itemlist) > 0 and filtertools.context:
        itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    return itemlist
def play(item):
    logger.info("channels.peliculasaudiolatino play")
    itemlist=[]

    data = httptools.downloadpage(item.url).data
    data = scrapertools.find_single_match(data,'src="('+HOST+'/show/[^"]+)"')
    data = httptools.downloadpage(data,headers=[['User-Agent', 'Mozilla/5.0'],['Accept-Encoding', 'gzip, deflate'],['Referer', HOST],['Connection', 'keep-alive']]).data
    videoUrl=scrapertools.find_single_match(data,'<IFRAME SRC="([^"]+)"')
    goo = scrapertools.find_single_match(videoUrl,'://([^/]+)/')
    if(goo == 'goo.gl'):
        videoUrl=httptools.downloadpage(videoUrl, follow_redirects=False, only_headers=True).headers["location"]
        server = scrapertools.find_single_match(videoUrl,'://([^/]+)/')
    #logger.info("videoUrl = "+videoUrl)
    enlaces = servertools.findvideos(videoUrl)
    if enlaces:
    	thumbnail = servertools.guess_server_thumbnail(videoUrl)
    	# Añade al listado de XBMC
    	itemlist.append( Item(channel=item.channel, action="play", title=item.title , fulltitle=item.fulltitle, url=enlaces[0][1] , server=enlaces[0][2], thumbnail=thumbnail, folder=False) )
    
    return itemlist
示例#20
0
def findvideos(item):
    logger.info()
    itemlist = []

    data = scrapertools.cache_page(item.url)
    patron ='href="([^"]+)".*?domain=.*?>([^<]+).*?gold">([^<]+)<'
    logger.debug(data)
    matches = re.compile(patron,re.DOTALL).findall(data)

    for scrapedurl, scrapedserver, scrapedidioma in matches:
    	url = scrapedurl
        idioma = audio[scrapedidioma]
        title = item.contentSerieName+' '+str(item.contentSeasonNumber)+'x'+str(item.contentEpisodeNumber)+' '+idioma+' ('+scrapedserver.strip(' ')+')'
        if scrapedidioma == item.extra1 or item.extra1 == 'all':
           itemlist.append(item.clone(title=title, url=url, action="play", language=idioma, server = scrapedserver))
    for videoitem in itemlist:
        videoitem.thumbnail = servertools.guess_server_thumbnail(videoitem.server)
               
        

    return itemlist
示例#21
0
def findvideos(item):
    servers = {'pixshare': 'directo', 'bitshare HD': 'openload'}
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}|', "", str(data))
    patron = '<a class=option href=(.*?) target=_self>(.*?)<\/a>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for url, server in matches:
        if 'HD' in server:
            url = url
        else:
            url = host + url
        title = item.contentTitle + ' (' + servers[server] + ')'
        thumbnail = servertools.guess_server_thumbnail(servers[server])
        itemlist.append(
            Item(channel=item.channel,
                 action="play",
                 title=title,
                 url=url,
                 thumbnail=thumbnail,
                 plot=item.plot,
                 infoLabels=item.infoLabels))

    if item.tipo != 'serie' and item.tipo != 'findvideos':
        if config.get_library_support(
        ) and len(itemlist) > 0 and item.extra != 'findvideos':
            contentTitle = item.contentTitle
            itemlist.append(
                Item(
                    channel=item.channel,
                    title=
                    '[COLOR yellow]Añadir esta pelicula a la biblioteca[/COLOR]',
                    url=item.url,
                    action="add_pelicula_to_library",
                    extra="findvideos",
                    contentTitle=contentTitle))
    return itemlist
示例#22
0
def play(item):
    logger.info()
    itemlist = []

    player = httptools.downloadpage(item.url, item.extra).data
    video = scrapertools.find_single_match(
        player, '<iframe class="embed-responsive-item" src="([^"]+)"')
    #logger.info("video="+video)
    enlaces = servertools.findvideos(video)
    if enlaces:
        thumbnail = servertools.guess_server_thumbnail(video)
        # Añade al listado de XBMC
        itemlist.append(
            Item(channel=item.channel,
                 action="play",
                 title=item.title,
                 fulltitle=item.fulltitle,
                 url=enlaces[0][1],
                 server=enlaces[0][2],
                 thumbnail=thumbnail,
                 folder=False))

    return itemlist
示例#23
0
def findvideos(item):
    logger.info()
    itemlist=[]
    data = httptools.downloadpage(item.url).data
    
    patron = '<a href="(.*?)" rel="nofollow"'
    matches = matches = re.compile(patron,re.DOTALL).findall(data)
    for videoitem in matches:
        itemlist.extend(servertools.find_video_items(data=videoitem))

    for videoitem in itemlist:
        videoitem.channel = item.channel
        videoitem.action ='play'
        videoitem.thumbnail = servertools.guess_server_thumbnail(videoitem.server)
        videoitem.infoLabels = item.infoLabels
        videoitem.title = item.contentTitle+' ('+videoitem.server+')'
        if 'youtube' in videoitem.url:
            videoitem.title = '[COLOR orange]Trailer en Youtube[/COLOR]'

    if config.get_library_support() and len(itemlist) > 0 and item.extra !='findvideos':
        itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la biblioteca[/COLOR]', url=item.url,
                             action="add_pelicula_to_library", extra="findvideos", contentTitle = item.contentTitle))
    return itemlist
示例#24
0
def findvideos(item):
    servers ={'pixshare':'directo','bitshare HD':'openload'}
    logger.info ()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}|', "", str(data))
    patron ='<a class=option href=(.*?) target=_self>(.*?)<\/a>'
    matches = re.compile(patron,re.DOTALL).findall(data)

    for url, server in matches:
        if 'HD' in server:
            url =url
        else:
            url = host+url
        title = item.contentTitle+' ('+servers[server]+')'
        thumbnail = servertools.guess_server_thumbnail(servers[server])
        itemlist.append( Item(channel=item.channel, action="play" , title=title ,url=url, thumbnail=thumbnail, plot=item.plot, infoLabels = item.infoLabels))

    if item.tipo != 'serie' and item.tipo !='findvideos':
       if config.get_library_support() and len(itemlist) > 0 and item.extra !='findvideos':
          contentTitle= item.contentTitle
          itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la biblioteca[/COLOR]', url=item.url,
                             action="add_pelicula_to_library", extra="findvideos", contentTitle = contentTitle))
    return itemlist
示例#25
0
def findvideos(item):
    logger.info()
    itemlist=[]
    data=httptools.downloadpage(item.url).data
    data = re.sub(r"'|\n|\r|\t|&nbsp;|<br>", "", data)

    patron = 'class="servidor" alt=""> ([^<]+)<\/span><span style="width: 40px;">([^<]+)<\/span><a class="verLink" rel="nofollow" href="([^"]+)" target="_blank"> <img title="Ver online gratis"'
    matches = matches = re.compile(patron,re.DOTALL).findall(data)
    for scrapedidioma, scrapedcalidad, scrapedurl in matches:

    	scrapedidioma = scrapertools.decodeHtmlentities(scrapedidioma)
    	
    	scrapedcalidad = scrapertools.decodeHtmlentities(scrapedcalidad)
    	if scrapedidioma.lower() == 'español':
    	   scrapedidioma = 'castellano'
    	scrapedidioma = scrapedidioma.lower()
    	idioma = taudio[scrapedidioma.lower()]
    	calidad = tcalidad[scrapedcalidad.lower()]
    	url = scrapedurl
    	itemlist.append( Item(channel=item.channel, action='play' , idioma=idioma, calidad=calidad, url=url))

    for videoitem in itemlist:
        videoitem.infoLabels=item.infoLabels
        videoitem.channel = item.channel
        videoitem.folder = False
        videoitem.thumbnail = servertools.guess_server_thumbnail(videoitem.url)
        videoitem.fulltitle = item.title
        videoitem.server = servertools.get_server_from_url(videoitem.url)
        videoitem.title = item.contentTitle+' | '+videoitem.calidad+' | '+videoitem.idioma+' ('+videoitem.server+')'

       

    if config.get_library_support() and len(itemlist) > 0 and item.extra !='findvideos' :
        itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la biblioteca[/COLOR]', url=item.url,
                             action="add_pelicula_to_library", extra="findvideos", contentTitle = item.contentTitle)) 
    return itemlist
示例#26
0
def findvideos(item):
    logger.info("pelisalacarta.yaske findvideos url="+item.url)

    # Descarga la página
    data = scrapertools.cache_page(item.url,headers=HEADER)

    item.plot = scrapertools.find_single_match(data,'<meta name="sinopsis" content="([^"]+)"')
    item.plot = scrapertools.htmlclean(item.plot)
    item.contentPlot = item.plot

    # Extrae las entradas
    '''
    <tr bgcolor="">
    <td height="32" align="center"><a class="btn btn-mini enlace_link" style="text-decoration:none;" rel="nofollow" target="_blank" title="Ver..." href="http://www.yaske.net/es/reproductor/pelicula/2141/44446/"><i class="icon-play"></i><b>&nbsp; Opcion &nbsp; 04</b></a></td>
    <td align="left"><img src="http://www.google.com/s2/favicons?domain=played.to"/>played</td>
    <td align="center"><img src="http://www.yaske.net/theme/01/data/images/flags/la_la.png" width="21">Lat.</td>
    <td align="center" class="center"><span title="" style="text-transform:capitalize;">hd real 720</span></td>
    <td align="center"><div class="star_rating" title="HD REAL 720 ( 5 de 5 )">
    <ul class="star"><li class="curr" style="width: 100%;"></li></ul>
    </div>
    </td> <td align="center" class="center">2553</td> </tr>
    '''

    patron  = '<tr bgcolor=(.*?)</tr>'
    matches = re.compile(patron,re.DOTALL).findall(data)

    itemlist = []

    #n = 1
    for tr in matches:
        logger.info("tr="+tr)
        try:
            title = scrapertools.get_match(tr,'<b>([^<]+)</b>')
            server = scrapertools.get_match(tr,'"http\://www.google.com/s2/favicons\?domain\=([^"]+)"')

            # <td align="center"><img src="http://www.yaske.net/theme/01/data/images/flags/la_la.png" width="19">Lat.</td>
            idioma = scrapertools.get_match(tr,'<img src="http://www.yaske.[a-z]+/theme/01/data/images/flags/([a-z_]+).png"[^>]+>[^<]*<')
            subtitulos = scrapertools.get_match(tr,'<img src="http://www.yaske.[a-z]+/theme/01/data/images/flags/[^"]+"[^>]+>([^<]*)<')
            calidad = scrapertools.get_match(tr,'<td align="center" class="center"[^<]+<span title="[^"]*" style="text-transform.capitalize.">([^<]+)</span></td>')
            
            #<a [....] href="http://api.ysk.pe/noref/?u=< URL Vídeo >">
            url = scrapertools.get_match(tr,'<a.*?href="([^"]+)"')

            # Para extraer netutv se necesita en la actualidad pasar por varias páginas con lo que relentiza mucho la carga.
            # De momento mostrará "No hay nada que reproducir"
            '''
            if "/netu/tv/" in url:
                import base64
                ###################################################
                # Añadido 17-09-14
                ###################################################
                try: data = scrapertools.cache_page(url,headers=getSetCookie(url1))
                except: data = scrapertools.cache_page(url)
                ###################################################
                match_b64_1 = 'base64,([^"]+)"'
                b64_1 = scrapertools.get_match(data, match_b64_1)
                utf8_1 = base64.decodestring(b64_1)
                match_b64_inv = "='([^']+)';"
                b64_inv = scrapertools.get_match(utf8_1, match_b64_inv)
                b64_2 = b64_inv[::-1]
                utf8_2 = base64.decodestring(b64_2).replace("%","\\").decode('unicode-escape')
                id_video = scrapertools.get_match(utf8_2,'<input name="vid" id="text" value="([^"]+)">')
                url = "http://netu.tv/watch_video.php?v="+id_video
            '''

            title = title.replace("&nbsp;","")

            if "es_es" in idioma:
                scrapedtitle = title + " en "+server.strip()+" [ESP]["+calidad+"]"
            elif "la_la" in idioma:
                scrapedtitle = title + " en "+server.strip()+" [LAT]["+calidad+"]"
            elif "en_es" in idioma:
                scrapedtitle = title + " en "+server.strip()+" [SUB]["+calidad+"]"
            elif "en_en" in idioma:
                scrapedtitle = title + " en "+server.strip()+" [ENG]["+calidad+"]"
            else:
                scrapedtitle = title + " en "+server.strip()+" ["+idioma+" / "+subtitulos+"]["+calidad+"]"
            scrapedtitle = scrapertools.entityunescape(scrapedtitle)
            scrapedtitle = scrapedtitle.strip()

            scrapedurl = url

            scrapedthumbnail = servertools.guess_server_thumbnail(scrapedtitle)

            logger.info("server="+server+", scrapedurl="+scrapedurl)
            if scrapedurl.startswith("http") and not "olimpo.link" in scrapedurl:
                itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , folder=False, parentContent=item) )
        except:
            import traceback
            logger.info("Excepcion: "+traceback.format_exc())

    return itemlist
示例#27
0
def findvideos(item):
    logger.info()
    duplicated = []

    data = get_source(item.url)
    video_info = scrapertools.find_single_match(
        data, "load_player\('([^']+).*?([^']+)")
    movie_info = scrapertools.find_single_match(
        item.url,
        'http:\/\/ver-peliculas\.(io|org)\/peliculas\/(\d+)-(.*?)-\d{4}-online\.'
    )
    movie_host = movie_info[0]
    movie_id = movie_info[1]
    movie_name = movie_info[2]
    sub = video_info[1]
    url_base = 'http://ver-peliculas.%s/core/api.php?id=%s&slug=%s' % (
        movie_host, movie_id, movie_name)
    data = httptools.downloadpage(url_base).data
    json_data = jsontools.load(data)
    video_list = json_data['lista']
    itemlist = []
    for videoitem in video_list:
        video_base_url = host + '/core/videofinal.php'
        if video_list[videoitem] != None:
            video_lang = video_list[videoitem]
            languages = ['latino', 'spanish', 'subtitulos']
            for lang in languages:
                if video_lang[lang] != None:
                    if not isinstance(video_lang[lang], int):
                        video_id = video_lang[lang][0]["video"]
                        post = {"video": video_id, "sub": sub}
                        post = urllib.urlencode(post)
                        data = httptools.downloadpage(video_base_url,
                                                      post=post).data
                        playlist = jsontools.load(data)
                        sources = playlist[['playlist'][0]]
                        server = playlist['server']
                        for video_link in sources:
                            url = video_link['sources']
                            if url not in duplicated and server != 'drive':
                                lang = lang.capitalize()
                                if lang == 'Spanish':
                                    lang = 'Español'
                                title = 'Ver en %s [' + lang + ']'
                                thumbnail = servertools.guess_server_thumbnail(
                                    server)
                                itemlist.append(
                                    item.clone(title=title,
                                               url=url,
                                               thumbnail=thumbnail,
                                               action='play'))
                                duplicated.append(url)
    tmdb.set_infoLabels(itemlist, __modo_grafico__)
    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda i: i.title % i.server.capitalize())
    if config.get_videolibrary_support(
    ) and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(channel=item.channel,
                 title=
                 '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 contentTitle=item.contentTitle))

    return itemlist
示例#28
0
def findvideos(item):
    logger.info()
    itemlist = []

    # Descarga la pagina
    item.url = item.url.replace("divxatope1.com/descargar/",
                                "divxatope1.com/ver-online/")

    # Descarga la pagina
    data = scrapertools.cachePage(item.url)

    item.plot = scrapertools.find_single_match(
        data, '<div class="post-entry" style="height:300px;">(.*?)</div>')
    item.plot = scrapertools.htmlclean(item.plot).strip()
    item.contentPlot = item.plot

    link = scrapertools.find_single_match(
        data, 'href="http://tumejorjuego.*?link=([^"]+)"')
    if link != "":
        link = "http://www.divxatope1.com/" + link
        logger.info("pelisalacarta.channels.divxatope torrent=" + link)
        itemlist.append(
            Item(channel=item.channel,
                 action="play",
                 server="torrent",
                 title="Vídeo en torrent",
                 fulltitle=item.title,
                 url=link,
                 thumbnail=servertools.guess_server_thumbnail("torrent"),
                 plot=item.plot,
                 folder=False,
                 parentContent=item))

    patron = "<div class=\"box1\"[^<]+<img[^<]+</div[^<]+"
    patron += '<div class="box2">([^<]+)</div[^<]+'
    patron += '<div class="box3">([^<]+)</div[^<]+'
    patron += '<div class="box4">([^<]+)</div[^<]+'
    patron += '<div class="box5">(.*?)</div[^<]+'
    patron += '<div class="box6">([^<]+)<'
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    itemlist_ver = []
    itemlist_descargar = []

    for servername, idioma, calidad, scrapedurl, comentarios in matches:
        title = "Mirror en " + servername + " (" + calidad + ")" + " (" + idioma + ")"
        if comentarios.strip() != "":
            title = title + " (" + comentarios.strip() + ")"
        url = urlparse.urljoin(item.url, scrapedurl)
        thumbnail = servertools.guess_server_thumbnail(title)
        plot = ""
        logger.info("title=[" + title + "], url=[" + url + "], thumbnail=[" +
                    thumbnail + "]")
        new_item = Item(channel=item.channel,
                        action="extract_url",
                        title=title,
                        fulltitle=title,
                        url=url,
                        thumbnail=thumbnail,
                        plot=plot,
                        folder=True,
                        parentContent=item)
        if comentarios.startswith("Ver en"):
            itemlist_ver.append(new_item)
        else:
            itemlist_descargar.append(new_item)

    for new_item in itemlist_ver:
        itemlist.append(new_item)

    for new_item in itemlist_descargar:
        itemlist.append(new_item)

    if len(itemlist) == 0:
        itemlist = servertools.find_video_items(item=item, data=data)
        for videoitem in itemlist:
            videoitem.title = "Enlace encontrado en " + videoitem.server + " (" + scrapertools.get_filename_from_url(
                videoitem.url) + ")"
            videoitem.fulltitle = item.fulltitle
            videoitem.thumbnail = item.thumbnail
            videoitem.channel = item.channel

    return itemlist
示例#29
0
def findvideos(item):
    logger.info("pelisalacarta.channels.divxatope findvideos")
    itemlist=[]

    # Descarga la pagina
    item.url = item.url.replace("divxatope.com/descargar/","divxatope.com/ver-online/")

    '''
    <div class="box1"><img src='http://www.divxatope.com/uploads/images/gestores/thumbs/1411605666_nowvideo.jpg' width='33' height='33'></div>
    <div class="box2">nowvideo</div>
    <div class="box3">Español Castel</div>
    <div class="box4">DVD-Screene</div>
    <div class="box5"><a href="http://www.nowvideo.ch/video/affd21b283421" rel="nofollow" target="_blank">Ver Online</a></div>
    '''
    # Descarga la pagina
    data = scrapertools.cachePage(item.url)

    item.plot = scrapertools.find_single_match(data,'<div class="post-entry" style="height:300px;">(.*?)</div>')
    item.plot = scrapertools.htmlclean(item.plot).strip()
    item.contentPlot = item.plot

    link = scrapertools.find_single_match(data,'href="http://tumejorserie.*?link=([^"]+)"')
    if link!="":
        link = "http://www.divxatope.com/"+link
        logger.info("pelisalacarta.channels.divxatope torrent="+link)
        itemlist.append( Item(channel=item.channel, action="play", server="torrent", title="Vídeo en torrent" , fulltitle = item.title, url=link , thumbnail=servertools.guess_server_thumbnail("torrent") , plot=item.plot , folder=False, parentContent=item) )

    patron  = "<div class=\"box1\"[^<]+<img[^<]+</div[^<]+"
    patron += '<div class="box2">([^<]+)</div[^<]+'
    patron += '<div class="box3">([^<]+)</div[^<]+'
    patron += '<div class="box4">([^<]+)</div[^<]+'
    patron += '<div class="box5">(.*?)</div[^<]+'
    patron += '<div class="box6">([^<]+)<'
    matches = re.compile(patron,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    itemlist_ver = []
    itemlist_descargar = []

    for servername,idioma,calidad,scrapedurl,comentarios in matches:
        title = "Mirror en "+servername+" ("+calidad+")"+" ("+idioma+")"
        if comentarios.strip()!="":
            title = title + " ("+comentarios.strip()+")"
        url = urlparse.urljoin(item.url,scrapedurl)
        thumbnail = servertools.guess_server_thumbnail(title)
        plot = ""
        if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
        new_item = Item(channel=item.channel, action="extract_url", title=title , fulltitle = title, url=url , thumbnail=thumbnail , plot=plot , folder=True, parentContent=item)
        if comentarios.startswith("Ver en"):
            itemlist_ver.append( new_item)
        else:
            itemlist_descargar.append( new_item )

    for new_item in itemlist_ver:
        itemlist.append(new_item)
    
    for new_item in itemlist_descargar:
        itemlist.append(new_item)

    if len(itemlist)==0:
        itemlist = servertools.find_video_items(item=item,data=data)
        for videoitem in itemlist:
            videoitem.title = "Enlace encontrado en "+videoitem.server+" ("+scrapertools.get_filename_from_url(videoitem.url)+")"
            videoitem.fulltitle = item.fulltitle
            videoitem.thumbnail = item.thumbnail
            videoitem.channel = item.channel

    return itemlist
示例#30
0
def findvideos(item):
    logger.info("pelisalacarta.channels.pelisplus findvideos")
    itemlist = []
    datas = scrapertools.cache_page(item.url)

    patron = "<iframe.*?src='([^']+)' frameborder='0' allowfullscreen.*?"
    matches = re.compile(patron, re.DOTALL).findall(datas)

    for scrapedurl in matches:

        if 'elreyxhd' or 'pelisplus.biz' in scrapedurl:
            data = scrapertools.cachePage(scrapedurl, headers=headers)
            quote = scrapertools.find_single_match(data,
                                                   'sources.*?file.*?http')

            if quote and "'" in quote:
                patronr = "file:'([^']+)',label:'([^.*?]+)',type:.*?'.*?}"
            elif '"' in quote:
                patronr = 'file:"([^"]+)",label:"([^.*?]+)",type:.*?".*?}'
            matchesr = re.compile(patronr, re.DOTALL).findall(data)

            for scrapedurl, scrapedcalidad in matchesr:
                print scrapedurl + ' ' + scrapedcalidad
                url = scrapedurl
                title = item.contentTitle + ' (' + scrapedcalidad + ')'
                thumbnail = item.thumbnail
                fanart = item.fanart
                if (DEBUG):
                    logger.info("title=[" + title + "], url=[" + url +
                                "], thumbnail=[" + thumbnail + "])")
                itemlist.append(
                    Item(channel=item.channel,
                         action="play",
                         title=title,
                         url=url,
                         thumbnail=thumbnail,
                         fanart=fanart))

    url = scrapedurl
    from core import servertools
    itemlist.extend(servertools.find_video_items(data=datas))

    for videoitem in itemlist:

        videoitem.channel = item.channel
        if videoitem.server != '':
            videoitem.thumbnail = servertools.guess_server_thumbnail(
                videoitem.server)
        else:
            videoitem.thumbnail = item.thumbnail
        videoitem.action = 'play'
        videoitem.fulltitle = item.title

        if 'redirector' not in videoitem.url and 'youtube' not in videoitem.url:
            videoitem.title = item.contentTitle + ' (' + videoitem.server + ')'

    n = 0
    for videoitem in itemlist:
        if 'youtube' in videoitem.url:
            videoitem.title = '[COLOR orange]Trailer en' + ' (' + videoitem.server + ')[/COLOR]'
            itemlist[n], itemlist[-1] = itemlist[-1], itemlist[n]
        n = n + 1

    if item.extra == 'findvideos' and 'youtube' in itemlist[-1]:
        itemlist.pop(1)

    if 'serie' not in item.url:
        if config.get_library_support(
        ) and len(itemlist) > 0 and item.extra != 'findvideos':
            itemlist.append(
                Item(
                    channel=item.channel,
                    title=
                    '[COLOR yellow]Añadir esta pelicula a la biblioteca[/COLOR]',
                    url=item.url,
                    action="add_pelicula_to_library",
                    extra="findvideos",
                    contentTitle=item.contentTitle))

    return itemlist
示例#31
0
def findvideos(item):
    logger.info()
    duplicated = []

    data = get_source(item.url)
    video_info = scrapertools.find_single_match(
        data, "load_player\('(.*?)','(.*?)'\);")
    movie_info = scrapertools.find_single_match(
        item.url, 'http:\/\/ver-peliculas\.org\/peliculas\/(\d+)-(.*?)-\d{'
        '4}-online\.')
    movie_id = movie_info[0]
    movie_name = movie_info[1]
    sub = video_info[1]
    url_base = 'http://ver-peliculas.org/core/api.php?id=%s&slug=%s' % (
        movie_id, movie_name)
    data = httptools.downloadpage(url_base).data
    json_data = jsontools.load(data)
    video_list = json_data['lista']
    itemlist = []
    for videoitem in video_list:
        video_base_url = 'http://ver-peliculas.org/core/videofinal.php'
        if video_list[videoitem] != None:
            video_lang = video_list[videoitem]
            languages = ['latino', 'spanish', 'subtitulos']
            for lang in languages:
                if video_lang[lang] != None:
                    if not isinstance(video_lang[lang], int):
                        video_id = video_lang[lang][0]["video"]
                        post = {"video": video_id, "sub": sub}
                        post = urllib.urlencode(post)
                        data = httptools.downloadpage(video_base_url,
                                                      post=post).data
                        playlist = jsontools.load(data)
                        sources = playlist[['playlist'][0]]
                        server = playlist['server']

                        for video_link in sources:
                            url = video_link['sources']
                            # if 'onevideo' in url:
                            # data = get_source(url)
                            # g_urls = servertools.findvideos(data=data)
                            # url = g_urls[0][1]
                            # server = g_urls[0][0]
                            if url not in duplicated and server != 'drive':
                                lang = lang.capitalize()
                                if lang == 'Spanish':
                                    lang = 'Español'
                                title = '(%s) %s (%s)' % (server, item.title,
                                                          lang)
                                thumbnail = servertools.guess_server_thumbnail(
                                    server)
                                itemlist.append(
                                    item.clone(title=title,
                                               url=url,
                                               server=server,
                                               thumbnail=thumbnail,
                                               action='play'))
                                duplicated.append(url)

    if config.get_videolibrary_support(
    ) and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(channel=item.channel,
                 title=
                 '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 contentTitle=item.contentTitle))

    return itemlist
def findvideos(item):
    logger.info()

    itemlist = []
    langs = dict()

    data = httptools.downloadpage(item.url).data
    patron = '<a.*?onclick="return (play\d+).*?;".*?> (.*?) <\/a>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for key, value in matches:
        langs[key] = value.strip()

    patron = 'function (play\d).*?servidores.*?attr.*?src.*?\+([^;]+);'
    matches = re.compile(patron, re.DOTALL).findall(data)
    title = item.title
    enlace = scrapertools.find_single_match(data,
                                            'var e20fb341325556c0fc0145ce10d08a970538987 =.*?"\/your\.".*?"([^"]+)"')

    for scrapedlang, encurl in matches:

        if 'e20fb34' in encurl:
            url = dec(encurl)
            url = url + enlace

        else:
            url = dec(encurl)
        title = ''
        server = ''
        servers = {'/opl': 'openload', '/your': 'yourupload', '/sen': 'senvid', '/face': 'netutv', '/vk': 'vk'}
        server_id = re.sub(r'.*?embed|\.php.*', '', url)
        if server_id and server_id in servers:
            server = servers[server_id]
        logger.debug('server_id: %s' % server_id)
        logger.debug('langs: %s' % langs)
        if langs[scrapedlang] in list_language:
            language = IDIOMAS[langs[scrapedlang]]
        else:
            language = 'Latino'
        if langs[scrapedlang] == 'Latino':
            idioma = '[COLOR limegreen]LATINO[/COLOR]'
        elif langs[scrapedlang] == 'Sub Español':
            idioma = '[COLOR red]SUB[/COLOR]'

        title = item.contentSerieName + ' (' + server + ') ' + idioma
        plot = item.plot

        thumbnail = servertools.guess_server_thumbnail(title)

        if 'player' not in url and 'php' in url:
            itemlist.append(item.clone(title=title,
                                       url=url,
                                       action="play",
                                       plot=plot,
                                       thumbnail=thumbnail,
                                       server=server,
                                       quality='',
                                       language=language
                                       ))
        logger.debug('url: %s' % url)
    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    return itemlist
示例#33
0
def findvideos(item):
    logger.info()
    itemlist = []
    duplicados = []
    data = httptools.downloadpage(item.url).data
    logger.debug('data: %s' % data)
    video_page = scrapertools.find_single_match(
        data, "<iframe width='100%' height='500' src='(.*?)' frameborder='0'")
    data = httptools.downloadpage(video_page).data
    patron = '<li data-id=".*?">\s+<a href="(.*?)" >'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl in matches:

        if 'tipo' in scrapedurl:
            server = 'gvideo'
            gvideo_data = httptools.downloadpage(scrapedurl).data
            video_url = scrapertools.find_single_match(
                gvideo_data,
                '<div id="player">.*?border: none" src="\/\/(.*?)" ')
            video_url = 'http://%s' % video_url
            gvideo_url = httptools.downloadpage(video_url).data
            videourl = servertools.findvideosbyserver(gvideo_url, server)

            logger.debug('videourl: %s' % videourl)
            language = 'latino'
            quality = 'default'
            url = videourl[0][1]
            title = '%s (%s)' % (item.contentTitle, server)
            thumbnail = item.thumbnail
            fanart = item.fanart
            if video_url not in duplicados:
                itemlist.append(
                    item.clone(action="play",
                               title=title,
                               url=url,
                               thumbnail=thumbnail,
                               fanart=fanart,
                               show=title,
                               extra='gvideo',
                               language=language,
                               quality=quality,
                               server=server))
                duplicados.append(video_url)

    itemlist.extend(servertools.find_video_items(data=data))

    for videoitem in itemlist:
        # videoitem.infoLabels = item.infoLabels
        videoitem.channel = item.channel
        if videoitem.quality == '' or videoitem.language == '':
            videoitem.quality = 'default'
            videoitem.language = 'Latino'
        if videoitem.server != '':
            videoitem.thumbnail = servertools.guess_server_thumbnail(
                videoitem.server)
        else:
            videoitem.thumbnail = item.thumbnail
            videoitem.server = 'directo'
        videoitem.action = 'play'
        videoitem.fulltitle = item.title

        if videoitem.extra != 'directo' and 'youtube' not in videoitem.url:
            videoitem.title = item.contentTitle + ' (' + videoitem.server + ')'

    n = 0
    for videoitem in itemlist:
        if 'youtube' in videoitem.url:
            videoitem.title = '[COLOR orange]Trailer en' + ' (' + videoitem.server + ')[/COLOR]'
            itemlist[n], itemlist[-1] = itemlist[-1], itemlist[n]
        n = n + 1

    if item.extra == 'findvideos' and 'youtube' in itemlist[-1]:
        itemlist.pop(1)

        # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if 'serie' not in item.url:
        if config.get_videolibrary_support(
        ) and len(itemlist) > 0 and item.extra != 'findvideos':
            itemlist.append(
                Item(
                    channel=item.channel,
                    title=
                    '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                    url=item.url,
                    action="add_pelicula_to_library",
                    extra="findvideos",
                    contentTitle=item.contentTitle))

    return itemlist
示例#34
0
def findvideos(item):
    logger.info()
    itemlist = []
    duplicados = []
    datas = httptools.downloadpage(item.url).data
    patron = "<iframe.*?src='([^']+)' frameborder='0' allowfullscreen.*?"
    matches = re.compile(patron, re.DOTALL).findall(datas)

    for scrapedurl in matches:

        if 'elreyxhd' or 'pelisplus.biz' in scrapedurl:
            patronr = ''
            data = httptools.downloadpage(scrapedurl, headers=headers).data

            quote = scrapertools.find_single_match(data,
                                                   'sources.*?file.*?http')
            if quote and "'" in quote:
                patronr = "file:'([^']+)',label:'([^.*?]+)',type:.*?'.*?}"
            elif '"' in quote:
                patronr = '{file:"(.*?)",label:"(.*?)"}'
            if patronr != '':
                matchesr = re.compile(patronr, re.DOTALL).findall(data)

                for scrapedurl, scrapedcalidad in matchesr:
                    url = scrapedurl
                    language = 'latino'
                    quality = scrapedcalidad.decode('cp1252').encode('utf8')
                    title = item.contentTitle + ' (' + str(
                        scrapedcalidad) + ')'
                    thumbnail = item.thumbnail
                    fanart = item.fanart
                    if url not in duplicados:
                        itemlist.append(
                            item.clone(
                                action="play",
                                title=title,
                                url=url,
                                thumbnail=thumbnail,
                                fanart=fanart,
                                show=title,
                                extra='directo',
                                language=language,
                                quality=quality,
                                server='directo',
                            ))
                        duplicados.append(url)

    url = scrapedurl
    from core import servertools
    itemlist.extend(servertools.find_video_items(data=datas))

    for videoitem in itemlist:
        # videoitem.infoLabels = item.infoLabels
        videoitem.channel = item.channel
        if videoitem.quality == '' or videoitem.language == '':
            videoitem.quality = 'default'
            videoitem.language = 'Latino'
        if videoitem.server != '':
            videoitem.thumbnail = servertools.guess_server_thumbnail(
                videoitem.server)
        else:
            videoitem.thumbnail = item.thumbnail
            videoitem.server = 'directo'
        videoitem.action = 'play'
        videoitem.fulltitle = item.title

        if videoitem.extra != 'directo' and 'youtube' not in videoitem.url:
            videoitem.title = item.contentTitle + ' (' + videoitem.server + ')'

    n = 0
    for videoitem in itemlist:
        if 'youtube' in videoitem.url:
            videoitem.title = '[COLOR orange]Trailer en' + ' (' + videoitem.server + ')[/COLOR]'
            itemlist[n], itemlist[-1] = itemlist[-1], itemlist[n]
        n = n + 1

    if item.extra == 'findvideos' and 'youtube' in itemlist[-1]:
        itemlist.pop(1)

        # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if 'serie' not in item.url:
        if config.get_videolibrary_support(
        ) and len(itemlist) > 0 and item.extra != 'findvideos':
            itemlist.append(
                Item(
                    channel=item.channel,
                    title=
                    '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                    url=item.url,
                    action="add_pelicula_to_library",
                    extra="findvideos",
                    contentTitle=item.contentTitle))

    return itemlist
示例#35
0
def findvideos(item):
    logger.info("pelisalacarta.yaske findvideos url=" + item.url)

    # Descarga la página
    data = scrapertools.cache_page(item.url, headers=HEADER)

    item.plot = scrapertools.find_single_match(data, '<meta name="sinopsis" content="([^"]+)"')
    item.plot = scrapertools.htmlclean(item.plot)
    item.contentPlot = item.plot

    # Extrae las entradas
    """
    <tr bgcolor="">
    <td height="32" align="center"><a class="btn btn-mini enlace_link" style="text-decoration:none;" rel="nofollow" target="_blank" title="Ver..." href="http://www.yaske.net/es/reproductor/pelicula/2141/44446/"><i class="icon-play"></i><b>&nbsp; Opcion &nbsp; 04</b></a></td>
    <td align="left"><img src="http://www.google.com/s2/favicons?domain=played.to"/>played</td>
    <td align="center"><img src="http://www.yaske.net/theme/01/data/images/flags/la_la.png" width="21">Lat.</td>
    <td align="center" class="center"><span title="" style="text-transform:capitalize;">hd real 720</span></td>
    <td align="center"><div class="star_rating" title="HD REAL 720 ( 5 de 5 )">
    <ul class="star"><li class="curr" style="width: 100%;"></li></ul>
    </div>
    </td> <td align="center" class="center">2553</td> </tr>
    """

    patron = "<tr bgcolor=(.*?)</tr>"
    matches = re.compile(patron, re.DOTALL).findall(data)

    itemlist = []

    # n = 1
    for tr in matches:
        logger.info("tr=" + tr)
        try:
            title = scrapertools.get_match(tr, "<b>([^<]+)</b>")
            server = scrapertools.get_match(tr, '"http\://www.google.com/s2/favicons\?domain\=([^"]+)"')

            # <td align="center"><img src="http://www.yaske.net/theme/01/data/images/flags/la_la.png" width="19">Lat.</td>
            idioma = scrapertools.get_match(
                tr, '<img src="http://www.yaske.[a-z]+/theme/01/data/images/flags/([a-z_]+).png"[^>]+>[^<]*<'
            )
            subtitulos = scrapertools.get_match(
                tr, '<img src="http://www.yaske.[a-z]+/theme/01/data/images/flags/[^"]+"[^>]+>([^<]*)<'
            )
            calidad = scrapertools.get_match(
                tr,
                '<td align="center" class="center"[^<]+<span title="[^"]*" style="text-transform.capitalize.">([^<]+)</span></td>',
            )

            # <a [....] href="http://api.ysk.pe/noref/?u=< URL Vídeo >">
            url = scrapertools.get_match(tr, '<a.*?href="([^"]+)"')

            # Para extraer netutv se necesita en la actualidad pasar por varias páginas con lo que relentiza mucho la carga.
            # De momento mostrará "No hay nada que reproducir"
            """
            if "/netu/tv/" in url:
                import base64
                ###################################################
                # Añadido 17-09-14
                ###################################################
                try: data = scrapertools.cache_page(url,headers=getSetCookie(url1))
                except: data = scrapertools.cache_page(url)
                ###################################################
                match_b64_1 = 'base64,([^"]+)"'
                b64_1 = scrapertools.get_match(data, match_b64_1)
                utf8_1 = base64.decodestring(b64_1)
                match_b64_inv = "='([^']+)';"
                b64_inv = scrapertools.get_match(utf8_1, match_b64_inv)
                b64_2 = b64_inv[::-1]
                utf8_2 = base64.decodestring(b64_2).replace("%","\\").decode('unicode-escape')
                id_video = scrapertools.get_match(utf8_2,'<input name="vid" id="text" value="([^"]+)">')
                url = "http://netu.tv/watch_video.php?v="+id_video
            """

            title = title.replace("&nbsp;", "")

            if "es_es" in idioma:
                scrapedtitle = title + " en " + server.strip() + " [ESP][" + calidad + "]"
            elif "la_la" in idioma:
                scrapedtitle = title + " en " + server.strip() + " [LAT][" + calidad + "]"
            elif "en_es" in idioma:
                scrapedtitle = title + " en " + server.strip() + " [SUB][" + calidad + "]"
            elif "en_en" in idioma:
                scrapedtitle = title + " en " + server.strip() + " [ENG][" + calidad + "]"
            else:
                scrapedtitle = (
                    title + " en " + server.strip() + " [" + idioma + " / " + subtitulos + "][" + calidad + "]"
                )
            scrapedtitle = scrapertools.entityunescape(scrapedtitle)
            scrapedtitle = scrapedtitle.strip()

            scrapedurl = url

            scrapedthumbnail = servertools.guess_server_thumbnail(scrapedtitle)

            logger.info("server=" + server + ", scrapedurl=" + scrapedurl)
            if scrapedurl.startswith("http") and not "olimpo.link" in scrapedurl:
                itemlist.append(
                    Item(
                        channel=item.channel,
                        action="play",
                        title=scrapedtitle,
                        url=scrapedurl,
                        thumbnail=scrapedthumbnail,
                        folder=False,
                        parentContent=item,
                    )
                )
        except:
            import traceback

            logger.info("Excepcion: " + traceback.format_exc())

    return itemlist
示例#36
0
def findvideos(item):
    itemlist = []
    duplicated = []

    data = httptools.downloadpage(item.url).data
    patron = '<div class="player-box" id="tabs-(\d+)"><iframe data-src="(.*?)".*?allowfullscreen'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for id, scrapedurl in matches:
        lang = scrapertools.find_single_match(
            data,
            '<li><a href="#tabs-%s"><img src=".*?"  alt="(.*?)".*?\/>' % id)
        server = servertools.get_server_from_url(scrapedurl)
        title = '%s (%s) (%s)' % (item.title, server, lang)
        thumbnail = ''
        if 'enlac' in scrapedurl:

            if 'google' in scrapedurl:
                server = 'gvideo'
            elif 'openload' in scrapedurl:
                server = 'openload'

            title = '%s (%s) (%s)' % (item.title, server, lang)
            scrapedurl = scrapedurl.replace('embed', 'stream')
            gdata = httptools.downloadpage(scrapedurl).data
            url_list = servertools.findvideosbyserver(gdata, server)
            for url in url_list:
                if url[1] not in duplicated:
                    thumbnail = servertools.guess_server_thumbnail(server)
                    itemlist.append(
                        item.clone(title=title,
                                   url=url[1],
                                   action='play',
                                   server=server,
                                   thumbnail=thumbnail))
                    duplicated.append(url[1])

        elif '.html' in scrapedurl:
            url_list = servertools.findvideosbyserver(data, server)
            for url in url_list:
                if url[1] not in duplicated:
                    thumbnail = servertools.guess_server_thumbnail(server)
                    itemlist.append(
                        item.clone(title=title,
                                   url=url[1],
                                   action='play',
                                   server=server,
                                   thumbnail=thumbnail))
                    duplicated.append(url[1])
        else:
            url = scrapedurl
            if url not in duplicated:
                thumbnail = servertools.guess_server_thumbnail(server)
                itemlist.append(
                    item.clone(title=title,
                               url=url,
                               action='play',
                               server=server,
                               thumbnail=thumbnail))
                duplicated.append(url)

    return itemlist
示例#37
0
def findvideos(item):
    logger.info ("pelisalacarta.channels.pelisplus findvideos")
    itemlist=[]
    datas=scrapertools.cache_page(item.url)

    patron ="<iframe.*?src='([^']+)' frameborder='0' allowfullscreen.*?"
    matches = re.compile(patron,re.DOTALL).findall(datas)
    
    for scrapedurl in matches:
       
       
       if 'elreyxhd' or 'pelisplus.biz'in scrapedurl:
            data = scrapertools.cachePage(scrapedurl, headers=headers)
            quote = scrapertools.find_single_match(data,'sources.*?file.*?http')
            
            if quote and "'" in quote:
               patronr ="file:'([^']+)',label:'([^.*?]+)',type:.*?'.*?}"
            elif '"' in quote:
               patronr ='file:"([^"]+)",label:"([^.*?]+)",type:.*?".*?}'
            matchesr = re.compile(patronr,re.DOTALL).findall(data)
            
            for scrapedurl, scrapedcalidad in matchesr:
               print scrapedurl +' '+scrapedcalidad
               url = scrapedurl 
               title = item.contentTitle+' ('+scrapedcalidad+')'
               thumbnail = item.thumbnail
               fanart=item.fanart
               if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"])")
               itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail,fanart =fanart))


    url = scrapedurl
    from core import servertools
    itemlist.extend(servertools.find_video_items(data=datas))
    
    for videoitem in itemlist:

        videoitem.channel = item.channel
        if videoitem.server != '':
           videoitem.thumbnail = servertools.guess_server_thumbnail (videoitem.server)
        else:
          videoitem.thumbnail = item.thumbnail
        videoitem.action = 'play'
        videoitem.fulltitle = item.title
        
        if 'redirector' not in videoitem.url and 'youtube' not in videoitem.url:
           videoitem.title = item.contentTitle+' ('+videoitem.server+')'
        
    n=0   
    for videoitem in itemlist:
       if 'youtube' in videoitem.url:
          videoitem.title='[COLOR orange]Trailer en'+' ('+videoitem.server+')[/COLOR]'
          itemlist[n], itemlist[-1] = itemlist[-1], itemlist[n]
       n=n+1

    if item.extra =='findvideos'and 'youtube' in itemlist[-1]:
      itemlist.pop(1)

    if 'serie' not in item.url:
       if config.get_library_support() and len(itemlist) > 0 and item.extra !='findvideos':
          itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la biblioteca[/COLOR]', url=item.url,
                             action="add_pelicula_to_library", extra="findvideos", contentTitle = item.contentTitle))
          
    return itemlist
示例#38
0
def findvideos(item):
    logger.info()
    duplicated = []
    itemlist = []
    data = get_source(item.url)
    video_info = scrapertools.find_single_match(
        data, "load_player\('([^']+).*?([^']+)")
    movie_info = scrapertools.find_single_match(
        item.url,
        'http.:\/\/ver-peliculas\.(io|org|co)\/peliculas\/(\d+)-(.*?)-\d{4}-online\.'
    )

    if movie_info:
        movie_host = movie_info[0]
        movie_id = scrapertools.find_single_match(
            data, 'id=idpelicula value=(.*?)>')
        movie_name = scrapertools.find_single_match(
            data, 'id=nombreslug value=(.*?)>')
        sub = scrapertools.find_single_match(data, 'id=imdb value=(.*?)>')
        sub = '%s/subtix/%s.srt' % (movie_host, sub)
        url_base = 'https://ver-peliculas.%s/core/api.php?id=%s&slug=%s' % (
            movie_host, movie_id, movie_name)
        data = httptools.downloadpage(url_base).data
        json_data = jsontools.load(data)
        video_list = json_data['lista']
        for videoitem in video_list:
            video_base_url = host.replace(
                '.io', '.%s' % movie_host) + 'core/videofinal.php'
            if video_list[videoitem] != None:
                video_lang = video_list[videoitem]
                languages = ['latino', 'spanish', 'subtitulos', 'subtitulosp']
                for lang in languages:
                    if lang not in video_lang:
                        continue
                    if video_lang[lang] != None:
                        if not isinstance(video_lang[lang], int):
                            video_id = video_lang[lang][0]["video"]
                            post = {"video": video_id, "sub": sub}
                            post = urllib.urlencode(post)
                            data = httptools.downloadpage(video_base_url,
                                                          post=post).data
                            playlist = jsontools.load(data)
                            sources = playlist[['playlist'][0]]
                            server = playlist['server']
                            for video_link in sources:
                                url = video_link['sources']
                                if url not in duplicated and server != 'drive':

                                    if lang == 'spanish':
                                        lang = 'Castellano'
                                    elif 'sub' in lang:
                                        lang = 'Subtitulada'
                                    lang = lang.capitalize()
                                    title = 'Ver en %s [' + lang + ']'
                                    thumbnail = servertools.guess_server_thumbnail(
                                        server)
                                    itemlist.append(
                                        item.clone(title=title,
                                                   url=url,
                                                   thumbnail=thumbnail,
                                                   action='play',
                                                   language=IDIOMAS[lang]))
                                    duplicated.append(url)
    tmdb.set_infoLabels(itemlist, __modo_grafico__)
    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda i: i.title % i.server.capitalize())

    itemlist = sorted(itemlist, key=lambda i: i.language)

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if config.get_videolibrary_support(
    ) and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(channel=item.channel,
                 title=
                 '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 contentTitle=item.contentTitle))

    return itemlist
示例#39
0
def findvideos(item):
    logger.info()
    itemlist = []
    audio = {
        'la': '[COLOR limegreen]LATINO[/COLOR]',
        'es': '[COLOR yellow]ESPAÑOL[/COLOR]',
        'sub': '[COLOR red]ORIGINAL SUBTITULADO[/COLOR]'
    }
    data = httptools.downloadpage(item.url).data
    patron = '<td><img src="http:\/\/metaserie\.com\/wp-content\/themes\/mstheme\/gt\/assets\/img\/([^\.]+).png" ' \
             'width="20".*?<\/td>.*?<td><img src="http:\/\/www\.google\.com\/s2\/favicons\?domain=([^"]+)" \/>&nbsp;(' \
             '[^<]+)<\/td>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    anterior = scrapertools.find_single_match(
        data, '<th scope="col"><a href="([^"]+)" rel="prev" '
        'class="local-link">Anterior</a></th>')
    siguiente = scrapertools.find_single_match(
        data, '<th scope="col"><a href="([^"]+)" rel="next" '
        'class="local-link">Siguiente</a></th>')

    for scrapedid, scrapedurl, scrapedserv in matches:
        url = scrapedurl
        server = servertools.get_server_from_url(url).lower()
        title = item.title + ' audio ' + audio[scrapedid] + ' en ' + server
        extra = item.thumbnail
        thumbnail = servertools.guess_server_thumbnail(server)

        itemlist.append(
            Item(
                channel=item.channel,
                action="play",
                title=title,
                fulltitle=item.contentSerieName,
                url=url,
                thumbnail=thumbnail,
                extra=extra,
                language=IDIOMAS[scrapedid],
                server=server,
            ))
    if item.extra1 != 'capitulos':
        if anterior != '':
            itemlist.append(
                Item(
                    channel=item.channel,
                    action="findvideos",
                    title='Capitulo Anterior',
                    url=anterior,
                    thumbnail='https://s31.postimg.cc/k5kpwyrgb/anterior.png'))
        if siguiente != '':
            itemlist.append(
                Item(channel=item.channel,
                     action="findvideos",
                     title='Capitulo Siguiente',
                     url=siguiente,
                     thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png'
                     ))

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    return itemlist
示例#40
0
def findvideos(item):
    servidor = {
        "http://uptobox.com/": "uptobox",
        "http://userscloud.com/": "userscloud",
        "https://my.pcloud.com/publink/show?code=": "pcloud",
        "http://thevideos.tv/": "thevideos",
        "http://ul.to/": "uploadedto",
        "http://turbobit.net/": "turbobit",
        "http://www.cinecalidad.com/protect/v.html?i=": "cinecalidad",
        "http://www.mediafire.com/download/": "mediafire",
        "https://www.youtube.com/watch?v=": "youtube",
        "http://thevideos.tv/embed-": "thevideos",
        "//www.youtube.com/embed/": "youtube",
        "http://ok.ru/video/": "okru",
        "http://ok.ru/videoembed/": "okru",
        "http://www.cinemaqualidade.com/protect/v.html?i=":
        "cinemaqualidade.com",
        "http://usersfiles.com/": "usersfiles",
        "https://depositfiles.com/files/": "depositfiles",
        "http://www.nowvideo.sx/video/": "nowvideo",
        "http://vidbull.com/": "vidbull",
        "http://filescdn.com/": "filescdn",
        "https://www.yourupload.com/watch/": "yourupload",
        "http://www.cinecalidad.to/protect/gdredirect.php?l=": "directo",
        "https://openload.co/embed/": "openload"
    }

    logger.info()
    itemlist = []
    duplicados = []
    data = httptools.downloadpage(item.url).data
    patron = 'target="_blank".*? service=".*?" data="(.*?)"><li>(.*?)<\/li>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    server_url = {
        'YourUpload': 'https://www.yourupload.com/embed/',
        'Openload': 'https://openload.co/embed/',
        'TVM': 'https://thevideo.me/embed-',
        'Trailer': '',
        'BitTorrent': '',
        'Mega': '',
        'MediaFire': ''
    }

    for video_cod, server_id in matches:
        if server_id not in ['BitTorrent', 'Mega', 'MediaFire', 'Trailer', '']:
            video_id = dec(video_cod)

        if server_id in server_url:
            server = server_id.lower()
            thumbnail = servertools.guess_server_thumbnail(server_id)
            if server_id == 'TVM':
                server = 'thevideo.me'
                url = server_url[server_id] + video_id + '.html'
            else:
                url = server_url[server_id] + video_id
        title = item.contentTitle + ' (%s)' % server
        quality = 'default'

        if server_id not in ['BitTorrent', 'Mega', 'MediaFire', 'Trailer']:
            if url not in duplicados:
                itemlist.append(
                    item.clone(action='play',
                               title=title,
                               fulltitle=item.contentTitle,
                               url=url,
                               language=IDIOMAS[item.language],
                               thumbnail=thumbnail,
                               quality=quality,
                               server=server))
                duplicados.append(url)

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    #itemlist.append(trailer_item)
    if config.get_videolibrary_support(
    ) and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(
                channel=item.channel,
                title=
                '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                url=item.url,
                action="add_pelicula_to_library",
                extra="findvideos",
                contentTitle=item.contentTitle,
            ))

    return itemlist
示例#41
0
def findvideos(item):
    servidor = {
        "http://uptobox.com/": "uptobox",
        "http://userscloud.com/": "userscloud",
        "https://my.pcloud.com/publink/show?code=": "pcloud",
        "http://thevideos.tv/": "thevideos",
        "http://ul.to/": "uploadedto",
        "http://turbobit.net/": "turbobit",
        "http://www.cinecalidad.com/protect/v.html?i=": "cinecalidad",
        "http://www.mediafire.com/download/": "mediafire",
        "https://www.youtube.com/watch?v=": "youtube",
        "http://thevideos.tv/embed-": "thevideos",
        "//www.youtube.com/embed/": "youtube",
        "http://ok.ru/video/": "okru",
        "http://ok.ru/videoembed/": "okru",
        "http://www.cinemaqualidade.com/protect/v.html?i=":
        "cinemaqualidade.com",
        "http://usersfiles.com/": "usersfiles",
        "https://depositfiles.com/files/": "depositfiles",
        "http://www.nowvideo.sx/video/": "nowvideo",
        "http://vidbull.com/": "vidbull",
        "http://filescdn.com/": "filescdn",
        "https://www.yourupload.com/watch/": "yourupload"
    }
    logger.info()
    itemlist = []
    duplicados = []
    data = httptools.downloadpage(item.url).data

    patron = 'dec\("([^"]+)"\)\+dec\("([^"]+)"\)'
    matches = re.compile(patron, re.DOTALL).findall(data)
    recomendados = ["uptobox", "thevideos", "nowvideo", "pcloud"]
    for scrapedurl, scrapedtitle in matches:
        if dec(scrapedurl) in servidor:
            title = "Ver " + item.contentTitle + " en " + servidor[dec(
                scrapedurl)].upper()
            if 'yourupload' in dec(scrapedurl):
                url = dec(scrapedurl).replace('watch',
                                              'embed') + dec(scrapedtitle)
            else:

                if 'youtube' in dec(scrapedurl):
                    title = '[COLOR orange]Trailer en Youtube[/COLOR]'
                url = dec(scrapedurl) + dec(scrapedtitle)

            if (servidor[dec(scrapedurl)]) in recomendados:
                title = title + "[COLOR limegreen] [I] (Recomedado) [/I] [/COLOR]"
            thumbnail = servertools.guess_server_thumbnail(
                servidor[dec(scrapedurl)])
            plot = ""
            if title not in duplicados:
                itemlist.append(
                    Item(channel=item.channel,
                         action="play",
                         title=title,
                         fulltitle=item.title,
                         url=url,
                         thumbnail=thumbnail,
                         plot=plot,
                         extra=item.thumbnail,
                         server=servidor[dec(scrapedurl)]))
            duplicados.append(title)
    if config.get_library_support(
    ) and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(channel=item.channel,
                 title=
                 '[COLOR yellow]Añadir esta pelicula a la biblioteca[/COLOR]',
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 contentTitle=item.contentTitle))

    return itemlist
示例#42
0
def findvideos(item):
    logger.info ()
    itemlist=[]
    duplicados=[]
    datas=httptools.downloadpage(item.url).data
    patron ="<iframe.*?src='(.*?)' frameborder.*?"
    matches = re.compile(patron,re.DOTALL).findall(datas)
    
    for scrapedurl in matches:
       
       
       if 'elreyxhd' in scrapedurl or 'pelisplus.biz'in scrapedurl:
            data = httptools.downloadpage(scrapedurl, headers=headers).data
            
            quote = scrapertools.find_single_match(data,'sources.*?file.*?http')
            if quote and "'" in quote:
               patronr ="file:'([^']+)',label:'([^.*?]+)',type:.*?'.*?}"
            elif '"' in quote:
               patronr ='{file:"(.*?)",label:"(.*?)"}'
            matchesr = re.compile(patronr,re.DOTALL).findall(data)
            
            for scrapedurl, scrapedcalidad in matchesr:
               url = scrapedurl 
               
               title = item.contentTitle+' ('+str(scrapedcalidad)+')'
               thumbnail = item.thumbnail
               fanart=item.fanart
               if url not in duplicados:
               	itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail,fanart =fanart, extra='directo'))
               	duplicados.append(url)

    url = scrapedurl
    from core import servertools
    itemlist.extend(servertools.find_video_items(data=datas))
    
    for videoitem in itemlist:
        videoitem.infoLabels = item.infoLabels
        videoitem.channel = item.channel
        if videoitem.server != '':
           videoitem.thumbnail = servertools.guess_server_thumbnail (videoitem.server)
        else:
          videoitem.thumbnail = item.thumbnail
        videoitem.action = 'play'
        videoitem.fulltitle = item.title
        
        if videoitem.extra !='directo' and 'youtube' not in videoitem.url:
           videoitem.title = item.contentTitle+' ('+videoitem.server+')'
        
    n=0   
    for videoitem in itemlist:
       if 'youtube' in videoitem.url:
          videoitem.title='[COLOR orange]Trailer en'+' ('+videoitem.server+')[/COLOR]'
          itemlist[n], itemlist[-1] = itemlist[-1], itemlist[n]
       n=n+1

    if item.extra =='findvideos'and 'youtube' in itemlist[-1]:
      itemlist.pop(1)

    if 'serie' not in item.url:
       if config.get_library_support() and len(itemlist) > 0 and item.extra !='findvideos':
          itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la biblioteca[/COLOR]', url=item.url,
                             action="add_pelicula_to_library", extra="findvideos", contentTitle = item.contentTitle))
          
    return itemlist
示例#43
0
def findvideos(item):
    logger.info()

    itemlist = []
    langs = dict()

    data = httptools.downloadpage(item.url).data
    patron = '<a onclick="return (play\d+).*?;"> (.*?) <\/a>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for key, value in matches:
        langs[key] = value.strip()

    patron = 'function (play\d).*?servidores.*?attr.*?src.*?\+([^;]+);'
    matches = re.compile(patron, re.DOTALL).findall(data)
    title = item.title
    enlace = scrapertools.find_single_match(
        data,
        'var e20fb341325556c0fc0145ce10d08a970538987 =.*?"\/your\.".*?"([^"]+)"'
    )

    for scrapedlang, encurl in matches:

        if 'e20fb34' in encurl:
            url = dec(encurl)
            url = url + enlace

        else:
            url = dec(encurl)
        title = ''
        server = ''
        servers = {
            '/opl': 'openload',
            '/your': 'yourupload',
            '/sen': 'senvid',
            '/face': 'netutv',
            '/vk': 'vk',
            '/jk': 'streamcherry',
            '/vim': 'gamovideo'
        }
        server_id = re.sub(r'.*?embed|\.php.*', '', url)
        if server_id and server_id in servers:
            server = servers[server_id]

        if (scrapedlang in langs) and langs[scrapedlang] in list_language:
            language = IDIOMAS[langs[scrapedlang]]
        else:
            language = 'Latino'
        #
        # if langs[scrapedlang] == 'Latino':
        #     idioma = '[COLOR limegreen]LATINO[/COLOR]'
        # elif langs[scrapedlang] == 'Sub Español':
        #     idioma = '[COLOR red]SUB[/COLOR]'

        if item.extra == 'peliculas':
            title = item.contentTitle + ' (' + server + ') ' + language
            plot = scrapertools.find_single_match(data, '<p>([^<]+)<\/p>')
        else:
            title = item.contentSerieName + ' (' + server + ') ' + language
            plot = item.plot

        thumbnail = servertools.guess_server_thumbnail(title)

        if 'player' not in url and 'php' in url:
            itemlist.append(
                item.clone(title=title,
                           url=url,
                           action="play",
                           plot=plot,
                           thumbnail=thumbnail,
                           server=server,
                           quality='',
                           language=language))
    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if config.get_videolibrary_support(
    ) and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(channel=item.channel,
                 title=
                 '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 contentTitle=item.contentTitle))

    return itemlist
示例#44
0
def findvideos(item):
    logger.info()
    itemlist = []

    # Descarga la pagina
    data = httptools.downloadpage(item.url).data

    item.plot = scrapertools.find_single_match(
        data, '<div class="post-entry" style="height:300px;">(.*?)</div>')
    item.plot = scrapertools.htmlclean(item.plot).strip()
    item.contentPlot = item.plot

    link = scrapertools.find_single_match(
        data, 'href="http://(?:tumejorserie|tumejorjuego).*?link=([^"]+)"')
    if link != "":
        link = "http://www.divxatope1.com/" + link
        logger.info("torrent=" + link)
        itemlist.append(
            Item(channel=item.channel,
                 action="play",
                 server="torrent",
                 title="Vídeo en torrent",
                 fulltitle=item.title,
                 url=link,
                 thumbnail=servertools.guess_server_thumbnail("torrent"),
                 plot=item.plot,
                 folder=False,
                 parentContent=item))

    patron = "<div class=\"box1\"[^<]+<img[^<]+</div[^<]+"
    patron += '<div class="box2">([^<]+)</div[^<]+'
    patron += '<div class="box3">([^<]+)</div[^<]+'
    patron += '<div class="box4">([^<]+)</div[^<]+'
    patron += '<div class="box5">(.*?)</div[^<]+'
    patron += '<div class="box6">([^<]+)<'
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    itemlist_ver = []
    itemlist_descargar = []

    for servername, idioma, calidad, scrapedurl, comentarios in matches:
        title = "Mirror en " + servername + " (" + calidad + ")" + " (" + idioma + ")"
        servername = servername.replace("uploaded", "uploadedto").replace(
            "1fichier", "onefichier")
        if comentarios.strip() != "":
            title = title + " (" + comentarios.strip() + ")"
        url = urlparse.urljoin(item.url, scrapedurl)
        mostrar_server = servertools.is_server_enabled(servername)
        if mostrar_server:
            thumbnail = servertools.guess_server_thumbnail(title)
            plot = ""
            logger.debug("title=[" + title + "], url=[" + url +
                         "], thumbnail=[" + thumbnail + "]")
            action = "play"
            if "partes" in title:
                action = "extract_url"
            new_item = Item(channel=item.channel,
                            action=action,
                            title=title,
                            fulltitle=title,
                            url=url,
                            thumbnail=thumbnail,
                            plot=plot,
                            parentContent=item)
            if comentarios.startswith("Ver en"):
                itemlist_ver.append(new_item)
            else:
                itemlist_descargar.append(new_item)

    for new_item in itemlist_ver:
        itemlist.append(new_item)

    for new_item in itemlist_descargar:
        itemlist.append(new_item)

    return itemlist