def entradas(item):
    logger.info("pelisalacarta.channels.grabaciones_deportivas entradas")
    itemlist = []
    data = scrapertools.cachePage(item.url)
    data = scrapertools.decodeHtmlentities(data)
    title = item.title.replace("+++ ","")
    ymd = scrapertools.find_single_match(data, '<div id="vafs".*?value="([^"]+)"')
    cat = scrapertools.find_single_match(data, '<label for="s([^"]+)">(?:<b>|)'+title+'(?:</b>|)</label>')

    item.extra = cat
    item.url = item.url + ymd
    itemlist = partidos(item)

    if itemlist[0].action== "": return itemlist
    if not "Primer día con vídeos disponibles" in itemlist[0].title: itemlist.insert(0, Item(channel=__channel__, title="--Hoy--", url="", action="", thumbnail=item.thumbnail, folder=False)) 
    itemlist.append(Item(channel=__channel__, title=bbcode_kodi2html("     [COLOR red]***Elegir Fecha***[/COLOR]"), url="", action="", thumbnail=item.thumbnail, folder=False))
    matches = scrapertools.find_multiple_matches(data, '<a class="small"href="([^"]+)".*?<b>(.*?)</b>')
    length = len(itemlist)
    for scrapedurl, scrapedtitle in matches:
        if scrapedtitle == "Hoy": continue
        scrapedurl = host_live + scrapedurl
        itemlist.insert(length, Item(channel=__channel__, title=scrapedtitle, url=scrapedurl, action="partidos", extra=cat, thumbnail=item.thumbnail, folder=True))

    calendar = scrapertools.cachePage("http://livetv.sx/ajax/vacal.php?cal&lng=es")
    matches = scrapertools.find_multiple_matches(calendar, "load\('([^']+)'\).*?<b>(.*?)</b>")
    for scrapedurl, scrapedtitle in matches:
        scrapedurl = host_live + scrapedurl
        itemlist.append(Item(channel=__channel__, title=scrapedtitle, url=scrapedurl, action="calendario", extra=cat, thumbnail=item.thumbnail, folder=True))

    return itemlist
Ejemplo n.º 2
0
def convert_link(html, link_type):

    hash_seed = get_cookie(html);
    logger.info("[seriespepito.py] hash_seed="+hash_seed)

    HASH_PAT = 'CryptoJS\.(\w+)\('
    hash_func = scrapertools.find_single_match(html, HASH_PAT).lower()

    if hash_func == "md5":
        hash = hashlib.md5(hash_seed).hexdigest()
    else:
        hash = hashlib.sha256(hash_seed).hexdigest()

    if link_type == PELICULAS_PEPITO:
        hash += '0'
    logger.info("[seriespepito.py] hash="+hash)

    HREF_SEARCH_PAT = '<a class=".' + hash + '".*?href="http://www.enlacespepito.com\/([^\.]*).html"><i class="icon-(?:play|download)">'
    logger.info("[seriespepito.py] HREF_SEARCH_PAT="+HREF_SEARCH_PAT)

    href = list(scrapertools.find_single_match(html, HREF_SEARCH_PAT))
    logger.info("[seriespepito.py] href="+repr(href))
    CHAR_REPLACE_PAT = '[a-z]\[(\d+)\]="(.)";'

    matches = re.findall(CHAR_REPLACE_PAT , html, flags=re.DOTALL|re.IGNORECASE)
    logger.info("[seriespepito.py] matches="+repr(matches))

    for match in matches:
        href[int(match[0])] = match[1]

    href = ''.join(href)

    return 'http://www.enlacespepito.com/' + href + '.html'
Ejemplo n.º 3
0
def episodios(item):
    logger.info("{0} - {1}".format(item.title, item.url))

    itemlist = []

    # Descarga la página
    data = scrapertools.cache_page(item.url)

    fanart = scrapertools.find_single_match(data, "background-image[^'\"]+['\"]([^'\"]+)")
    plot = scrapertools.find_single_match(data, "id=['\"]profile2['\"]>\s*(.*?)\s*</div>")

    logger.debug("fanart: {0}".format(fanart))
    logger.debug("plot: {0}".format(plot))


    episodes = re.findall("<tr.*?href=['\"](?P<url>[^'\"]+).+?>(?P<title>.+?)</a>.*?<td>(?P<flags>.*?)</td>", data, re.MULTILINE | re.DOTALL)
    for url, title, flags in episodes:
        idiomas = " ".join(["[{0}]".format(IDIOMAS.get(language, "OVOS")) for language in re.findall("banderas/([^\.]+)", flags, re.MULTILINE)])
        displayTitle = "{show} - {title} {languages}".format(show = item.show, title = title, languages = idiomas)
        logger.debug("Episode found {0}: {1}".format(displayTitle, urlparse.urljoin(HOST, url)))
        itemlist.append(item.clone(title=displayTitle, url=urlparse.urljoin(HOST, url),
                                   action="findvideos", plot=plot, fanart=fanart, language=idiomas,
                                   list_idiomas=list_idiomas, list_calidad=CALIDADES, context=filtertools.context))

    if len(itemlist) > 0 and filtertools.context:
        itemlist = filtertools.get_links(itemlist, item.channel)

    if config.get_library_support() and len(itemlist) > 0:
        itemlist.append(item.clone(title="Añadir esta serie a la biblioteca", action="add_serie_to_library", extra="episodios"))

    return itemlist
Ejemplo n.º 4
0
def temporadas(item):
    logger.info()
    
    itemlist = []
    templist = []
    data = httptools.downloadpage(item.url).data
    realplot = ''
    patron = "<button class='classnamer' onclick='javascript: mostrarcapitulos.*?blank'>([^<]+)</button>"
    
    matches = re.compile(patron,re.DOTALL).findall(data)
    
    serieid = scrapertools.find_single_match(data,"<link rel='shortlink' href='http:\/\/mundoflv.com\/\?p=([^']+)' \/>")
    item.thumbnail = item.thumbvid
    infoLabels=item.infoLabels
    for scrapedtitle in matches:
        url = 'http://mundoflv.com/wp-content/themes/wpRafael/includes/capitulos.php?serie='+serieid+'&temporada=' + scrapedtitle
        title = 'Temporada '+ scrapertools.decodeHtmlentities(scrapedtitle)
        contentSeasonNumber = scrapedtitle
        thumbnail = item.thumbnail
        realplot = scrapertools.find_single_match(data, '\/><\/a>([^*]+)<p><\/p>.*')
        plot =''
        fanart = ''
        itemlist.append( Item(channel=item.channel, action="episodiosxtemp" , title=title , fulltitle=item.title, url=url, thumbnail=thumbnail, plot=plot, fanart = fanart, extra1=item.extra1, contentSerieName=item.contentSerieName, contentSeasonNumber = contentSeasonNumber, infoLabels = {'season':contentSeasonNumber}))
    
    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)      
    if config.get_library_support() and len(itemlist) > 0:
        itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la biblioteca[/COLOR]', url=item.url,
                             action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName, extra1 = item.extra1))
    
    return itemlist
Ejemplo n.º 5
0
def play(item):
    logger.info("{0} - {1} = {2}".format(item.show, item.title, item.url))

    if item.url.startswith(HOST):
        data = scrapertools.cache_page(item.url)

        patron = "<input type='button' value='Ver o Descargar' onclick='window.open\(\"([^\"]+)\"\);'/>"
        url = scrapertools.find_single_match(data, patron)
    else:
        url = item.url

    itemlist = servertools.find_video_items(data=url)

    titulo = scrapertools.find_single_match(item.fulltitle, "^(.*?)\s\[.+?$")
    if titulo:
        titulo += " [{language}]".format(language=item.language)

    for videoitem in itemlist:
        if titulo:
            videoitem.title = titulo
        else:
            videoitem.title = item.title
        videoitem.channel = item.channel

    return itemlist
def play(item):
    logger.info("[documoo.py] play")
    itemlist = []
    video_url = ""
    server = None

    data = scrapertools.cache_page(item.url)
    url = scrapertools.find_single_match(data, '<iframe\s+(?:width="[^"]*"\s*height="[^"]*"\s*)?src="([^"]+)"')

    if 'youtu' in url:
        data = scrapertools.cache_page(url)
        vid = scrapertools.find_single_match(data, '\'VIDEO_ID\'\s*:\s*"([^"]+)')
        if vid != "":
            video_url = "http://www.youtube.com/watch?v=%s" % vid
            server = 'youtube'
    elif 'rai.tv' in url:
        data = scrapertools.cache_page(url)
        video_url = scrapertools.find_single_match(data, '<meta\s+name="videourl_m3u8"\s*content="([^"]+)"')

    if video_url != "":
        item.url = video_url
        item.server = server
        itemlist.append(item)

    return itemlist
def findvideos(item):
    logger.info("pelisalacarta.channels.elsenordelanillo findvideos")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url)
    #logger.info("data="+data)

    bloquecalidades = scrapertools.find_single_match(data,'<ul class="tabs(.*?)</ul>')
    logger.info("bloquecalidades="+bloquecalidades)
    patroncalidades = '<li><a href=".([^"]+)">([^<]+)</a></li>'
    matchescalidades = re.compile(patroncalidades,re.DOTALL).findall(bloquecalidades)

    for idcalidad,nombrecalidad in matchescalidades:
        if nombrecalidad.lower().strip()!="publicidad":
            bloquelink = scrapertools.find_single_match(data,'<div id="'+idcalidad+'"(.*?)</div> ')
            logger.info("bloquelink="+bloquelink)
            title = nombrecalidad
            url = bloquelink
            thumbnail = ""
            plot = ""
            if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
            itemlist.append( Item(channel=__channel__, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, folder=False))

    return itemlist
Ejemplo n.º 8
0
def buscar(item, texto=""):
    if item.extra:
      post = item.extra
      texto = item.extra.split("=")[len(item.extra.split("="))-1]
    else:
      post= "do=search&subaction=search&story="+texto
    
    #post = "do=search&subaction=search&search_start=4&full_search=0&result_from=31&story=" + texto
    data = scrapertools.cache_page(item.url, post=post)

    patron = '<div class="base shortstory">(.*?)<div class="bsep">&nbsp;</div>'
    resultados = re.compile(patron,re.DOTALL).findall(data)
    itemlist = []
    for resultado in resultados:
      url, title = scrapertools.find_single_match(resultado,'<h3 class="btl"><a href="([^"]+)">(.*?)</a></h3>')
      plot = scrapertools.find_single_match(resultado,'<div>&nbsp;</div>\r\n<div>(.*?)<br />')
      title = re.sub('<[^>]+>',"",title)
      plot = re.sub('<[^>]+>',"",plot)
      
      if "table" in resultado:
        itemlist.append(Item(title=title, channel=__channel__,action="episodios", url=url,plot=plot,folder=True))
      else:
        itemlist.append(Item(title=title, channel=__channel__,action="findvideos", url=url,plot=plot,folder=True))
    
    next_page = scrapertools.find_single_match(data,'<a name="nextlink" id="nextlink" onclick="javascript:list_submit\(([^"]+)\); return\(false\)" href="#"><span class="thide pnext">Siguiente</span></a>')
    logger.info(next_page)
    if next_page!="":
        itemlist.append( Item(channel=__channel__, action="buscar", title=">> Pagina siguiente" , url=item.url,extra="do=search&subaction=search&search_start="+next_page+"&full_search=0&result_from="+str(((int(next_page)-1)*10)+1)+"&story=" +texto , folder=True) )
    
   
    return itemlist
def novedades(item):
    logger.info("pelisalacarta.channels.sportvideo novedades")
    itemlist = []
    data = scrapertools.cachePage(item.url)
    data = scrapertools.decodeHtmlentities(data)
    data = data.replace("\n","").replace("\t","")

    bloque = scrapertools.find_multiple_matches(data, '(ayer.*?)<div id="L')
    for match in bloque:
        if "TORRENT" not in match: continue
        position = scrapertools.find_single_match(match, 'ayer.*?top:(.*?)px')
        scrapedthumbnail = scrapertools.find_single_match(match, 'javascript.*?src="([^"]+)"')
        scrapedurl = scrapertools.find_single_match(match, '<a href=".\/([^"]+)" title="TORRENT">')
        scrapedtitle = scrapertools.find_single_match(match, 'class="style3">(.*?)</a>')
        scrapedtitle = scrapedtitle.replace(" at "," vs ")
        scrapedtitle = " [COLOR gold]"+scrapedtitle.rsplit(" ",1)[0]+"[/COLOR] [COLOR brown]"+scrapedtitle.rsplit(" ",1)[1]+"[/COLOR]"
        scrapedthumbnail = host_sport + scrapedthumbnail
        scrapedurl = host_sport + scrapedurl
        itemlist.append(Item(channel=__channel__, title=bbcode_kodi2html(scrapedtitle), url=scrapedurl, action="play", server="torrent", thumbnail=scrapedthumbnail, order=int(position), fanart=item.fanart, folder=False))

    itemlist.sort(key=lambda item: item.order)

    page = item.page + 1
    next_page = scrapertools.find_single_match(data, '<a href="./([^"]+)" class="style2">'+str(page)+'</a>')
    if next_page != "":
        scrapedurl = host_sport + next_page
        itemlist.append(Item(channel=__channel__, title=">> Siguiente", url=scrapedurl, action="novedades", page=page, thumbnail=item.thumbnail, fanart=item.fanart, folder=True))

    return itemlist
def findvideos(item):
    logger.info("[asiansubita.py] findvideos")

    itemlist = []

    # Descarga la página
    data = scrapertools.cache_page(item.url)

    # Extrae las datos
    thumbnail = scrapertools.find_single_match(data, 'src="([^"]+)"[^<]+</p>')
    plot = scrapertools.find_single_match(data, '<p style="text-align: justify;">(.*?)</p>')
    plot = scrapertools.decodeHtmlentities(plot)

    patron = 'href="(http://adf.ly/[^"]+)" target="_blank">([^<]+)</a>'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle in matches:
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        title = "[" + scrapedtitle + "] " + item.fulltitle

        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 title=title,
                 url=scrapedurl,
                 thumbnail=thumbnail,
                 plot=plot,
                 fulltitle=item.fulltitle,
                 show=item.show))

    return itemlist
Ejemplo n.º 11
0
def busqueda(item):
    logger.info("pelisalacarta.channels.descargasmix busqueda")
    itemlist = []
    data = scrapertools.cachePage(item.url)
    bloque = scrapertools.find_single_match(data, '<div id="content" role="main">(.*?)<div id="sidebar" role="complementary">')
    patron = '<a class="clip-link".*?href="([^"]+)".*?<img alt="([^"]+)" src="([^"]+)"'
    patron += '.*?<p class="stats">(.*?)</p>'
    matches = scrapertools.find_multiple_matches(bloque, patron)
    for scrapedurl, scrapedtitle, scrapedthumbnail,scrapedcat in matches:
        scrapedthumbnail = "http:"+scrapedthumbnail.replace("-129x180","")
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
        if ("Películas" in scrapedcat) | ("Documentales" in scrapedcat):
            titulo = scrapedtitle.split("[")[0]
            itemlist.append( Item(channel=__channel__, action='findvideos', title= scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail, fulltitle=titulo, context = "0", contentTitle=titulo,folder=True) )
        else:
            itemlist.append( Item(channel=__channel__, action='temporadas', title= scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail, fulltitle=scrapedtitle, context = "2", contentTitle=scrapedtitle, folder=True) )

    patron = '<a class="nextpostslink".*?href="([^"]+)"'
    matches = scrapertools.find_single_match(data, patron)
    if len(matches) > 0:
        npage = scrapertools.find_single_match(matches,"page/(.*?)/")
        if DEBUG: logger.info("url=["+matches+"]")
        itemlist.append( Item(channel=__channel__, action='busqueda', title= "Página "+npage , url=matches ,folder=True) )

    return itemlist
Ejemplo n.º 12
0
def mainlist(item):
    logger.info("[descargas.py] mainlist")
    itemlist=[]
    
    itemlist.append( Item( channel="descargas", action="pendientes", title="Descargas pendientes"))
    itemlist.append( Item( channel="descargas", action="errores", title="Descargas con error"))
    itemlist.append( Item( channel="descargas", action="torrent", title="Descargas Torrent"))

    if usingsamba(DOWNLOAD_PATH):
        ficheros = samba.get_files(DOWNLOAD_PATH)
    else:
        ficheros = os.listdir(filesystem.EncodePath(DOWNLOAD_PATH))
    for fichero in ficheros:
      fichero = filesystem.DecodePath(fichero)
      url = os.path.join(DOWNLOAD_PATH, fichero)
      if not os.path.isdir(url) and not fichero.endswith(".nfo") and not fichero.endswith(".tbn"):
        fileName, fileExtension = os.path.splitext(url)
        if os.path.exists(fileName + ".nfo"):
          Archivo = open(fileName + ".nfo","rb")
          lines = Archivo.read()
          Archivo.close();
          print lines
          title = scrapertools.find_single_match(lines,'<title>\((.*?)\)</title>')
          thumbnail = scrapertools.find_single_match(lines,'<thumb>(.*?)</thumb>')
          plot = scrapertools.find_single_match(lines,'<plot>(.*?)</plot>')
          itemlist.append( Item(channel="descargas", action="play", title=title, url=url, thumbnail=thumbnail, plot=plot, server="local", folder=False))
        else:
          itemlist.append( Item(channel="descargas", action="play", title=fichero, url=url, server="local", folder=False))

    return itemlist
Ejemplo n.º 13
0
def mainlist(item):
    
    if item.url=="":
        item.url = host
    
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    patron ='<a href="([^"]+)" title="([^"]+)" rel="nofollow" class="post-image post-image-left".*?\s*<div class="featured-thumbnail"><img width="203" height="150" src="([^"]+)" class="attachment-featured size-featured wp-post-image" alt="" title="" \/><\/div>'
    matches = re.compile(patron,re.DOTALL).findall(data)
    
    for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
       url = scrapedurl
       title = scrapedtitle.decode('utf-8')
       thumbnail = scrapedthumbnail
       fanart = ''
        
       itemlist.append( Item(channel= item.channel, action="findvideos" ,title=title , url=url, thumbnail=thumbnail, fanart=fanart ))
        
#Paginacion
    title=''
    siguiente = scrapertools.find_single_match(data,"<a rel='nofollow' href='([^']+)' class='inactive'>Next <")
    ultima = scrapertools.find_single_match(data,"<a rel='nofollow' class='inactive' href='([^']+)'>Last <")
    if siguiente != ultima:
       titlen = 'Pagina Siguiente >>> '
       fanart = ''
       itemlist.append(Item(channel = item.channel, action = "mainlist", title =titlen, url = siguiente, fanart = fanart))
    return itemlist
Ejemplo n.º 14
0
def findvideos(item):
    logger.info ("pelisalacarta.channels.metaserie findvideos")
    itemlist=[]
    audio = {'la':'[COLOR limegreen]LATINO[/COLOR]','es':'[COLOR yellow]ESPAÑOL[/COLOR]','sub':'[COLOR red]ORIGINAL SUBTITULADO[/COLOR]'}
    data=scrapertools.cache_page(item.url)
    patron ='<td><img src="http:\/\/metaserie\.com\/wp-content\/themes\/mstheme\/gt\/assets\/img\/([^\.]+).png" width="20".*?<\/td>.*?<td><img src="http:\/\/www\.google\.com\/s2\/favicons\?domain=([^"]+)" \/>&nbsp;([^<]+)<\/td>'
    matches = re.compile(patron,re.DOTALL).findall(data)
    
    anterior = scrapertools.find_single_match(data,'<th scope="col"><a href="([^"]+)" rel="prev" class="local-link">Anterior</a></th>')
    siguiente = scrapertools.find_single_match(data,'<th scope="col"><a href="([^"]+)" rel="next" class="local-link">Siguiente</a></th>')
    titulo = scrapertools.find_single_match(data,'<h1 class="entry-title">([^<]+)</h1>		</header>')
    

    for scrapedid, scrapedurl, scrapedserv in matches:
        url = scrapedurl
        title = titulo+' audio '+audio[scrapedid]+' en '+scrapedserv
        extra = item.thumbnail
        thumbnail = servertools.guess_server_thumbnail(scrapedserv)
        if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"])")
        itemlist.append( Item(channel=item.channel, action="play" , title=title, fulltitle=titulo, url=url, thumbnail=thumbnail, extra=extra))

    if anterior !='':
        itemlist.append( Item(channel=item.channel, action="findvideos" , title='Capitulo Anterior' , url=anterior, thumbnail='https://s31.postimg.org/k5kpwyrgb/anterior.png', folder ="true" ))
    if siguiente !='':
        itemlist.append( Item(channel=item.channel, action="findvideos" , title='Capitulo Siguiente' , url=siguiente, thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png', folder ="true" ))
    return itemlist
Ejemplo n.º 15
0
def episodios(item):
    logger.info("tvalacarta.channels.rtspan episodios")    
    itemlist = []

    data = scrapertools.cachePage(item.url)

    patron  = '<figure class="media">.*?'
    patron += '<a href="([^"]+)".*?'
    patron += '<img src="([^"]+)".*?'
    patron += '<time class="date">([^<]+)</time.*?'
    patron += '<h3[^<]+'
    patron += '<a[^>]+>([^<]+)</a'
    matches = re.compile(patron,re.DOTALL).findall(data)
    if DEBUG: scrapertools.printMatches(matches)
	
    for scrapedurl,scrapedthumbnail,fecha,scrapedtitle in matches:
        scrapedday = scrapertools.find_single_match(fecha,'(\d+)\.\d+\.\d+')
        scrapedmonth = scrapertools.find_single_match(fecha,'\d+\.(\d+)\.\d+')
        scrapedyear = scrapertools.find_single_match(fecha,'\d+\.\d+\.(\d+)')
        scrapeddate = scrapedyear + "-" + scrapedmonth + "-" + scrapedday

        title = fecha.strip() + " - " + scrapedtitle.strip()
        url = urlparse.urljoin(item.url,scrapedurl)
        thumbnail = scrapedthumbnail
        itemlist.append( Item(channel=__channel__, action="play", title=title, url=url, thumbnail=thumbnail, aired_date=scrapeddate, folder=False) )

    next_page_url = scrapertools.find_single_match(data,'<button class="button-grey js-listing-more".*?data-href="([^"]+)">')
    if next_page_url!="":
        itemlist.append( Item(channel=__channel__, action="episodios", title=">> Página siguiente" , url=urlparse.urljoin(item.url,next_page_url) ,  folder=True) )    

    return itemlist
Ejemplo n.º 16
0
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("streamondemand.servers.vidgg get_video_url(page_url='%s')" % page_url)

    video_urls = []
    data = scrapertools.cache_page(page_url)

    mediaurls = scrapertools.find_multiple_matches(data, '<source src="([^"]+)"')
    if not mediaurls:
        id_file = page_url.rsplit("/",1)[1]
        key = scrapertools.find_single_match(data, 'flashvars\.filekey\s*=\s*"([^"]+)"')
        if not key:
            varkey = scrapertools.find_single_match(data, 'flashvars\.filekey\s*=\s*([^;]+);')
            key = scrapertools.find_single_match(data, varkey+'\s*=\s*"([^"]+)"')

        # Primera url, se extrae una url erronea necesaria para sacar el enlace
        url = "http://www.vidgg.to//api/player.api.php?cid2=undefined&cid=undefined&numOfErrors=0&user=undefined&cid3=undefined&key=%s&file=%s&pass=undefined" % (key, id_file)
        data = scrapertools.cache_page(url)
        
        url_error = scrapertools.find_single_match(data, 'url=([^&]+)&')
        url = "http://www.vidgg.to//api/player.api.php?cid2=undefined&cid=undefined&numOfErrors=1&errorUrl=%s&errorCode=404&user=undefined&cid3=undefined&key=%s&file=%s&pass=undefined" % (url_error, key, id_file)
        data = scrapertools.cache_page(url)
        mediaurls = scrapertools.find_multiple_matches(data, 'url=([^&]+)&')

    for i, mediaurl in enumerate(mediaurls):
        title = scrapertools.get_filename_from_url(mediaurl)[-4:]+" Mirror %s [vidgg]" % str(i+1)
        video_urls.append( [title, mediaurl])

    for video_url in video_urls:
        logger.info("[vidgg.py] %s - %s" % (video_url[0],video_url[1]))

    return video_urls
Ejemplo n.º 17
0
def find_url_play(data, headers):
    logger.info("[jjcast.py] find_url_play")

    cid = scrapertools.find_single_match (data, '["\']http://nn.jjcast.com/embed.php([^"\']+)"')
    if cid == '':
        return ''
    pageurl = 'http://jjcast.com/player.php' + cid

    data2 = scrapertools.cachePage(pageurl, headers=headers)
    if (DEBUG): logger.info("data2="+data2)

    # desofuscar datos
    data2 = unpackerjs2.unpackjs(data2)
    data2 = re.sub(r"myScrT.push\(\\'([^\\']+)\\'\);", r"\1", data2)
    data2 = re.sub(r"myRtk.push\(\\'([^\\']+)\\'\);", r"\1", data2)
    data2 = urllib.unquote(data2)
    if (DEBUG): logger.info("data2clean="+urllib.unquote(data2))


    rtmpurl = scrapertools.find_single_match (data2, '(rtmp://[^/]+/lives)')
    filevalue = scrapertools.find_single_match (data2, 'var myRtk=new Array\(\);(.*?)rtmp://')
    swfurl = 'http://jjcast.com/jw5/5.10.swf'

    url = '%s playpath=%s swfUrl=%s swfVfy=1 live=1 timeout=15 pageUrl=%s' % (rtmpurl, filevalue, swfurl, pageurl)
    
    return url

    '''
Ejemplo n.º 18
0
def programas(item):
    logger.info("tvalacarta.channels.upvtv programas")
    itemlist = []

    data = scrapertools.cache_page(item.url)
    patron = '<ul data-categoria(.*?)</ul>'
    bloques = re.compile(patron,re.DOTALL).findall(data)
    if DEBUG: scrapertools.printMatches(bloques)

    for bloque in bloques:

        patron  = '<li(.*?)</li'
        matches = re.compile(patron,re.DOTALL).findall(bloque)
        if DEBUG: scrapertools.printMatches(matches)

        for match in matches:
            title = scrapertools.find_single_match(match,'title="([^"]+)"')
            if title=="":
                title = scrapertools.find_single_match(match,'<a[^>]+>([^<]+)</a>')
            title = title.decode('iso-8859-1').encode("utf8","ignore")
            thumbnail = scrapertools.find_single_match(match,'<img.*?src="([^"]+)"')
            plot = scrapertools.find_single_match(match,'<span class="tex_imagen"[^<]+<br />([^<]+)</span>')
            url = scrapertools.find_single_match(match,'a href="([^"]+)"')
            url = urlparse.urljoin(item.url,url)
            if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
            itemlist.append( Item( channel=CHANNELNAME , title=title , action="episodios" , url=url , thumbnail=thumbnail , plot=plot , show=title , fanart=thumbnail , folder=True ) )

    return itemlist
def infosinopsis(item):
    logger.info("pelisalacarta.channels.pordede infosinopsis")

    url_aux = item.url.replace("/links/view/slug/", "/peli/").replace("/what/peli", "")
    # Descarga la pagina
    headers = DEFAULT_HEADERS[:]
    #headers.append(["Referer",item.extra])
    #headers.append(["X-Requested-With","XMLHttpRequest"])
    data = scrapertools.cache_page(url_aux,headers=headers)
    if (DEBUG): logger.info("data="+data)

    scrapedtitle = scrapertools.find_single_match(data,'<h1>([^<]+)</h1>')
    scrapedvalue = scrapertools.find_single_match(data,'<span class="puntuationValue" data-value="([^"]+)"')
    scrapedyear = scrapertools.find_single_match(data,'<h2 class="info">[^<]+</h2>\s*<p class="info">([^<]+)</p>')
    scrapedduration = scrapertools.find_single_match(data,'<h2 class="info">[^<]+</h2>\s*<p class="info">([^<]+)</p>', 1)
    scrapedplot = scrapertools.find_single_match(data,'<div class="info text"[^>]+>([^<]+)</div>')
    #scrapedthumbnail = scrapertools.find_single_match(data,'<meta property="og:image" content="([^"]+)"')
    #thumbnail = scrapedthumbnail.replace("http://www.pordede.comhttp://", "http://").replace("mediacover", "mediathumb")
    scrapedgenres = re.compile('href="/pelis/index/genre/[^"]+">([^<]+)</a>',re.DOTALL).findall(data)
    scrapedcasting = re.compile('href="/star/[^"]+">([^<]+)</a><br/><span>([^<]+)</span>',re.DOTALL).findall(data)

    title = scrapertools.htmlclean(scrapedtitle)
    plot = "Año: [B]"+scrapedyear+"[/B]"
    plot += " , Duración: [B]"+scrapedduration+"[/B]"
    plot += " , Puntuación usuarios: [B]"+scrapedvalue+"[/B]"
    plot += "\nGéneros: "+", ".join(scrapedgenres)
    plot += "\n\nSinopsis:\n"+scrapertools.htmlclean(scrapedplot)
    plot += "\n\nCasting:\n"
    for actor,papel in scrapedcasting:
    	plot += actor+" ("+papel+"). "

    tbd = TextBox("DialogTextViewer.xml", os.getcwd(), "Default")
    tbd.ask(title, plot)
    del tbd
    return
Ejemplo n.º 20
0
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("pelisalacarta.servers.clicknupload url="+page_url)
    
    data = scrapertools.cache_page( page_url )
    data = data.replace("\n","").replace("\t","")
    post = ""
    block = scrapertools.find_single_match(data, '<Form method="POST"(.*?)</Form>')
    matches = scrapertools.find_multiple_matches(block, 'input.*?name="([^"]+)".*?value="([^"]*)"')
    for inputname, inputvalue in matches:
        post += inputname + "=" + inputvalue + "&"
    #Primera solicitud post
    data = scrapertools.cache_page( page_url , post=post)
    data = data.replace("\n","").replace("\t","")
    import time
    time.sleep(5)
    post = ""
    block = scrapertools.find_single_match(data, '<Form name="F1" method="POST"(.*?)</Form>')
    matches = scrapertools.find_multiple_matches(block, '<input.*?name="([^"]+)".*?value="([^"]*)">')
    for inputname, inputvalue in matches:
        post += inputname + "=" + inputvalue + "&"
    #Segunda solicitud post tras 5 segundos de espera
    data = scrapertools.cache_page( page_url , post=post)

    video_urls = []
    media = scrapertools.find_single_match(data,"onClick=\"window.open\('([^']+)'")
    #Solo es necesario codificar la ultima parte de la url
    url_strip = urllib.quote(media.rsplit('/', 1)[1])
    media_url = media.rsplit('/', 1)[0] +"/"+url_strip
    video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:]+" [clicknupload]",media_url])
    for video_url in video_urls:
        logger.info("pelisalacarta.servers.clicknupload %s - %s" % (video_url[0],video_url[1]))

    return video_urls
Ejemplo n.º 21
0
def detalle_episodio(item):

    data = scrapertools.cache_page(item.url)

    item.plot = scrapertools.find_single_match(data,'<meta name="description" content="([^"]+)">')
    item.thumbnail = scrapertools.find_single_match(data,'<meta property="og:image" content="([^"]+)">')

    try:
        item.duration = parse_duration_secs(scrapertools.find_single_match(data,'<meta property="video:duration" content="([^"]+)">'))
    except:
        item.duration = ""

    item.geolocked = "1"

    '''
    try:
        from servers import aragontv as servermodule
        video_urls = servermodule.get_video_url(item.url)
        item.media_url = video_urls[-1][1]
    except:
        import traceback
        print traceback.format_exc()
        item.media_url = ""
    '''

    return item
def findvideos(item):
    logger.info("[BleachPortal.py]==> findvideos")
    itemlist = []

    if "bleach//" in item.url:
        newurl = re.sub(r'\w+//', "", item.url)
    else:
        newurl = item.url

    data = scrapertools.cache_page(newurl)

    if "bleach" in item.extra:
        video = scrapertools.find_single_match(data, 'file: "(.*?)",')
    else:
        video = scrapertools.find_single_match(data, 'file=(.*?)&')
        video = video.rsplit('/', 1)[-1]

    #newurl = re.sub(r'[^/]+$', video, newurl)
    newurl = newurl.replace(newurl.split("/")[-1], "/" + video)
    estensionevideo = video.split(".")[1] if item.extra == "bleach" else video.split(".")[2]
    #estensionevideo = re.sub(r'^[^\d]+[^\.]+', "", video)

    itemlist.append(Item(channel=__channel__,
                         action="play",
                         title=item.title + "[." + estensionevideo + "]",
                         url=newurl,
                         thumbnail=item.thumbnail,
                         fulltitle=item.fulltitle,
                         show=item.fulltitle))
    return itemlist
Ejemplo n.º 23
0
def play(item):
    logger.info("{0} - {1} = {2}".format(item.show, item.title, item.url))

    if item.url.startswith(HOST):
        data = httptools.downloadpage(item.url).data

        ajaxLink = re.findall("loadEnlace\((\d+),(\d+),(\d+),(\d+)\)", data)
        ajaxData = ""
        for serie, temp, cap, linkID in ajaxLink:
            logger.debug("Ajax link request: Sherie = {0} - Temp = {1} - Cap = {2} - Link = {3}".format(serie, temp, cap, linkID))
            ajaxData += httptools.downloadpage(HOST + '/ajax/load_enlace.php?serie=' + serie + '&temp=' + temp + '&cap=' + cap + '&id=' + linkID).data

        if ajaxData:
            data = ajaxData

        patron = "onclick='window.open\(\"([^\"]+)\"\);'/>"
        url = scrapertools.find_single_match(data, patron)
    else:
        url = item.url

    itemlist = servertools.find_video_items(data=url)

    titulo = scrapertools.find_single_match(item.fulltitle, "^(.*?)\s\[.+?$")
    if titulo:
        titulo += " [{language}]".format(language=item.language)

    for videoitem in itemlist:
        if titulo:
            videoitem.title = titulo
        else:
            videoitem.title = item.title
        videoitem.channel = item.channel

    return itemlist
Ejemplo n.º 24
0
def peliculas(item):
    logger.info("[italiafilm.py] peliculas")
    itemlist = []

    data = scrapertools.cachePage(item.url)
    patron = '<article(.*?)</article>'
    matches = re.compile(patron,re.DOTALL).findall(data)

    for match in matches:

        title = scrapertools.find_single_match(match,'<h3[^<]+<a href="[^"]+"[^<]+>([^<]+)</a>')
        title = scrapertools.htmlclean(title).strip()
        url = scrapertools.find_single_match(match,'<h3[^<]+<a href="([^"]+)"')
        plot = scrapertools.find_single_match(match,'<p class="summary">(.*?)</p>')
        plot = scrapertools.htmlclean(plot).strip()
        thumbnail = scrapertools.find_single_match(match,'data-echo="([^"]+)"')

        if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")

        # Añade al listado de XBMC
        itemlist.append( Item(channel=__channel__, action='findvideos', title=title , url=url , thumbnail=thumbnail , fanart=thumbnail, plot=plot , viewmode="movie_with_plot", folder=True) )

    # Siguiente
    try:
        pagina_siguiente = scrapertools.get_match(data,'<a class="next page-numbers" href="([^"]+)"')
        itemlist.append( Item(channel=__channel__, action="peliculas", title=">> Pagina seguente" , url=pagina_siguiente , folder=True) )
    except:
        pass

    return itemlist
Ejemplo n.º 25
0
def programas(item):
    logger.info("tvalacarta.channels.discoverymax programas")

    itemlist = []
    if item.title == "Programas":
        itemlist.append( Item(channel=CHANNELNAME, title="[COLOR gold][B]>> Lista A-Z[/B][/COLOR]", action="alfabetico", url="http://www.discoverymax.marca.com/player/#id=6667aa&view_type=list", thumbnail="", folder=True) )
    data = scrapertools.cache_page(item.url).replace("\\","").replace("u00","\\u00")

    if scrapertools.find_single_match(data, '"items_count":(\d+)') == "0":
        itemlist.append(Item(channel=CHANNELNAME, title="Sin contenido", action="", url="", thumbnail="", folder=False))
        return itemlist
    
    patron = '<a href="([^"]+)".*?src="([^"]+)".*?<h3>([^<]+)</h3>'
    matches = scrapertools.find_multiple_matches(data, patron)
    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        scrapedtitle = scrapedtitle.decode('unicode_escape').encode('utf8')
        scrapedthumbnail = scrapedthumbnail.decode('unicode_escape').encode('utf8')
        itemlist.append( Item(channel=CHANNELNAME, title=scrapedtitle, action="episodios", url=scrapedurl, thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, folder=True) )

    total_pages = int(scrapertools.find_single_match(data, '"total_pages":(\d+)'))
    current_page = int(scrapertools.find_single_match(data, '"current_page":"([^"]+)"'))

    if current_page < total_pages:
        url = re.sub(r'page=(\d+)',"page="+str(current_page+1), item.url)
        itemlist.append( Item(channel=CHANNELNAME, title=">> Página siguiente", action="programas", url=url, thumbnail="", folder=True) )
    return itemlist
Ejemplo n.º 26
0
def busqueda(item, texto=""):
    logger.info()
    itemlist = []
    item.text_color = color2

    data = httptools.downloadpage(item.url).data
    data = data.replace("\n", "").replace("\t", "")

    bloque = scrapertools.find_single_match(data, '<ul class="list-paginacion">(.*?)</section>')
    bloque = scrapertools.find_multiple_matches(bloque, '<li><a href=(.*?)</li>')
    for match in bloque:
        patron = '([^"]+)".*?<img class="fade" src="([^"]+)".*?<h2>(.*?)</h2>'
        matches = scrapertools.find_multiple_matches(match, patron)
        for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
            # fix para el buscador para que no muestre entradas con texto que no es correcto
            if unicode(texto, "utf8").lower().encode("utf8") not in \
                unicode(scrapedtitle, "utf8").lower().encode("utf8"):
                continue

            scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).replace(" online", "")
            titleinfo = re.sub(r'(?i)((primera|segunda|tercera|cuarta|quinta|sexta) temporada)', "Temporada",
                               scrapedtitle)
            titleinfo = titleinfo.split("Temporada")[0].strip()
            titleinfo = re.sub(r'(\(\d{4}\))|(\(\d{4}\s*-\s*\d{4}\))', '', titleinfo)

            itemlist.append(item.clone(action="episodios", title=scrapedtitle, url=scrapedurl,
                                       thumbnail=scrapedthumbnail, fulltitle=scrapedtitle, show=titleinfo,
                                       contentType="tvshow", contentTitle=titleinfo))
    # Paginación
    next_page = scrapertools.find_single_match(data, '<a class="nextpostslink".*?href="([^"]+)">')
    if next_page != "":
        itemlist.append(item.clone(title=">> Siguiente", url=next_page))

    return itemlist
Ejemplo n.º 27
0
def play(item):
    logger.info()
    itemlist = []
    
    location = ""
    i = 0
    while not location:
        try:
            data = httptools.downloadpage(item.url).data
            url_redirect = scrapertools.find_single_match(data, 'href="(http://www.verseriesynovelas.tv/link/enlace.php\?u=[^"]+)"')
            if not url_redirect:
                import StringIO
                compressedstream = StringIO.StringIO(data)
                import gzip
                gzipper = gzip.GzipFile(fileobj=compressedstream)
                data = gzipper.read()
                gzipper.close()
                url_redirect = scrapertools.find_single_match(data, 'href="(http://www.verseriesynovelas.tv/link/enlace.php\?u=[^"]+)"')
            location = httptools.downloadpage(url_redirect, follow_redirects=False).headers["location"]
        except:
            pass
        i += 1
        if i == 6:
            return itemlist

    enlaces = servertools.findvideosbyserver(location, item.server)
    if len(enlaces) > 0:
        itemlist.append(item.clone(action="play", server=enlaces[0][2], url=enlaces[0][1]))

    return itemlist
Ejemplo n.º 28
0
def episodios(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)

    if item.plot == "":
        item.plot = scrapertools.find_single_match(data, 'Description[^>]+><p>(.*?)</p>')

    data = scrapertools.find_single_match(data, '<div class="Sect Episodes full">(.*?)</div>')
    matches = re.compile('<a href="([^"]+)"[^>]+>(.+?)</a', re.DOTALL).findall(data)

    for url, title in matches:
        title = title.strip()
        url = urlparse.urljoin(item.url, url)
        thumbnail = item.thumbnail

        try:
            episode = int(scrapertools.find_single_match(title, "Episodio (\d+)"))
        except ValueError:
            season = 1
            episode = 1
        else:
            season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, episode)

        title = "%s: %sx%s" % (item.title, season, str(episode).zfill(2))

        itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumbnail, fulltitle=title,
                                   fanart=thumbnail, contentType="episode"))

    return itemlist
Ejemplo n.º 29
0
def get_video_url( page_url , premium = False , user="" , password="", video_password="", page_data="" ):
    logger.info("tvalacarta.server.rtvcm get_video_url page_url"+page_url)

    data = scrapertools.cache_page(page_url)

    #<script src="http://cdnapi.kaltura.com/p/2288691/sp/228869100/embedIframeJs/uiconf_id/39784151/partner_id/2288691?autoembed=true&playerId=kaltura_player_1496914486&entry_id=0_3e1eijre&flashvars[streamerType]=auto&width=640&height=360&flashvars[streamerType]=auto"></script> </div>
    partner_id = scrapertools.find_single_match(data,'<script src="http://cdnapi.kaltura.com/p/\d+/sp/\d+/embedIframeJs/uiconf_id/\d+/partner_id/(\d+)')
    logger.info("tvalacarta.server.rtvcm get_video_url partner_id="+partner_id)
    video_id = scrapertools.find_single_match(data,'<script src="http://cdnapi.kaltura.com/p/\d+/sp/\d+/embedIframeJs/uiconf_id/\d+/partner_id/\d+.autoembed=true&playerId=kaltura_player_\d+&entry_id=([^\&]+)\&')
    logger.info("tvalacarta.server.rtvcm get_video_url video_id="+video_id)

    media_url = "kaltura:"+partner_id+":"+video_id
    logger.info("tvalacarta.server.rtvcm get_video_url media_url="+media_url)

    ydl = youtube_dl.YoutubeDL({'outtmpl': u'%(id)s%(ext)s'})
    result = ydl.extract_info(media_url, download=False)
    logger.info("tvalacarta.server.rtvcm get_video_url result="+repr(result))

    video_urls = []

    if "ext" in result and "url" in result:
        video_urls.append(["[rtvcm]", scrapertools.safe_unicode(result['url']).encode('utf-8')+"|User-Agent=Mozilla/5.0"])
    else:

        if "entries" in result:
            for entry in result["entries"]:
                video_urls.append(["[rtvcm]", scrapertools.safe_unicode(entry['url']).encode('utf-8')+"|User-Agent=Mozilla/5.0"])

    return video_urls
Ejemplo n.º 30
0
def findvideos(item):
    logger.info("pelisalacarta.channels.reyanime findvideos")
    itemlist = []

    # Descarga la página
    data = scrapertools.cachePage(item.url)
    data = scrapertools.find_single_match(data,"<!--reproductor-->(.*?)<!--!reproductor-->")

    patron = '<iframe src="([^"]+)"'
    matches = re.compile(patron,re.DOTALL).findall(data)
    if (DEBUG): scrapertools.printMatches(matches)

    for page_url in matches:
        logger.info("page_url="+page_url)
        servercode = scrapertools.find_single_match(page_url,'http.//ozhe.larata.in/repro-rc/([^\?]+)')
        logger.info("servercode="+servercode)
        #videoid = scrapertools.find_single_match(page_url,'http://ypfserviclubs.org/repro-rc/[a-z0-9]+\?v\=(.*?)$')
        servername = servercode_to_name(servercode)
        logger.info("servername="+servername)
        #page_url = build_video_url(servername,videoid)

        title = "Ver en "+servername
        itemlist.append( Item(channel=item.channel, action="play", title=title , url=page_url , folder=False) )

    return itemlist
Ejemplo n.º 31
0
def acciones_cuenta(item):
    logger.info()
    itemlist = []

    if "Tus fichas" in item.title:
        itemlist.append(
            item.clone(title="Capítulos",
                       url="tf_block_c a",
                       contentType="tvshow"))
        itemlist.append(
            item.clone(title="Series", url="tf_block_s", contentType="tvshow"))
        itemlist.append(item.clone(title="Películas", url="tf_block_p"))
        itemlist.append(item.clone(title="Documentales", url="tf_block_d"))
        return itemlist
    elif "Añadir a una lista" in item.title:
        data = httptools.downloadpage(host + "/c_listas.php?apikey=%s&sid=%s" %
                                      (apikey, sid)).data
        data = xml2dict(data)
        itemlist.append(item.clone(title="Crear nueva lista", folder=False))
        if data["Data"]["TusListas"] != "\t":
            import random
            data = data["Data"]["TusListas"]["Item"]
            if type(data) is not list:
                data = [data]
            for child in data:
                image = ""
                title = "%s (%s fichas)" % (child["Title"],
                                            child["FichasInList"])
                images = []
                for i in range(1, 5):
                    if "sinimagen.png" not in child["Poster%s" % i]:
                        images.append(child["Poster%s" % i].replace(
                            "/100/", "/400/"))
                if images:
                    image = images[random.randint(0, len(images) - 1)]
                url = host + "/data.php?mode=add_listas&apikey=%s&sid=%s&ficha_id=%s" % (
                    apikey, sid, item.ficha)
                post = "lista_id[]=%s" % child["Id"]
                itemlist.append(
                    item.clone(title=title,
                               url=url,
                               post=post,
                               thumbnail=image,
                               folder=False))

        return itemlist
    elif "Crear nueva lista" in item.title:
        from platformcode import platformtools
        nombre = platformtools.dialog_input(
            "", "Introduce un nombre para la lista")
        if nombre:
            dict_priv = {0: 'Pública', 1: 'Privada'}
            priv = platformtools.dialog_select("Privacidad de la lista",
                                               ['Pública', 'Privada'])
            if priv != -1:
                url = host + "/data.php?mode=create_list&apikey=%s&sid=%s" % (
                    apikey, sid)
                post = "name=%s&private=%s" % (nombre, priv)
                data = httptools.downloadpage(url, post)
                platformtools.dialog_notification(
                    "Lista creada correctamente",
                    "Nombre: %s - %s" % (nombre, dict_priv[priv]))
                platformtools.itemlist_refresh()
        return
    elif re.search(r"(?i)Seguir Lista", item.title):
        from platformcode import platformtools
        data = httptools.downloadpage(item.url)
        platformtools.dialog_notification("Operación realizada con éxito",
                                          "Lista: %s" % item.lista)
        return
    elif item.post:
        from platformcode import platformtools
        data = httptools.downloadpage(item.url, item.post).data
        platformtools.dialog_notification("Ficha añadida a la lista",
                                          "Lista: %s" % item.title)
        platformtools.itemlist_refresh()
        return

    data = httptools.downloadpage("https://playmax.mx/tusfichas.php").data
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)

    bloque = scrapertools.find_single_match(
        data,
        item.url + '">(.*?)(?:<div class="tf_blocks|<div class="tf_o_move">)')
    matches = scrapertools.find_multiple_matches(
        bloque, '<div class="tf_menu_mini">([^<]+)<(.*?)<cb></cb></div>')
    for category, contenido in matches:
        itemlist.append(
            item.clone(action="", title=category, text_color=color3))

        patron = '<div class="c_fichas_image"[^>]*>[^<]*<[^>]+href="\.([^"]+)".*?src="([^"]+)".*?serie="([^"]*)".*?' \
                 '<div class="c_fichas_title">(?:<div class="c_fichas_episode">([^<]+)</div>|)([^<]+)</div>'
        entradas = scrapertools.find_multiple_matches(contenido, patron)
        for scrapedurl, scrapedthumbnail, serie, episodio, scrapedtitle in entradas:
            tipo = "movie"
            scrapedurl = host + scrapedurl
            scrapedthumbnail = host + scrapedthumbnail
            action = "findvideos"
            if __menu_info__:
                action = "menu_info"
            if serie:
                tipo = "tvshow"
            if episodio:
                title = "      %s - %s" % (episodio.replace("X",
                                                            "x"), scrapedtitle)
            else:
                title = "      " + scrapedtitle

            new_item = Item(channel=item.channel,
                            action=action,
                            title=title,
                            url=scrapedurl,
                            thumbnail=scrapedthumbnail,
                            contentTitle=scrapedtitle,
                            contentType=tipo,
                            text_color=color2)
            if new_item.contentType == "tvshow":
                new_item.show = scrapedtitle
                if not __menu_info__:
                    new_item.action = "episodios"

            itemlist.append(new_item)

    return itemlist
Ejemplo n.º 32
0
def video(item):
    logger.info('[filmsenzalimiticc.py] video')
    itemlist = []

    # Carica la pagina
    data = httptools.downloadpage(item.url).data.replace('\n',
                                                         '').replace('\t', '')

    # Estrae i contenuti
    patron = r'<div class="mediaWrap mediaWrapAlt">.*?<a href="([^"]+)".*?src="([^"]+)".*?<p>([^"]+) (\(.*?)streaming<\/p>.*?<p>\s*(\S+).*?<\/p>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedquality in matches:
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        scrapedyear = scrapertools.decodeHtmlentities(scrapedyear)
        scrapedquality = scrapertools.decodeHtmlentities(scrapedquality)

        year = scrapedyear.replace('(', '').replace(')', '')
        infolabels = {}
        if year:
            infolabels['year'] = year

        title = scrapedtitle + ' ' + scrapedyear + ' [' + scrapedquality + ']'

        # Seleziona fra Serie TV e Film
        if item.contentType == 'movie':
            azione = 'findvideos'
            tipologia = 'movie'
        if item.contentType == 'tvshow':
            azione = 'episodios'
            tipologia = 'tv'

        itemlist.append(
            Item(channel=item.channel,
                 action=azione,
                 contentType=item.contentType,
                 title=title,
                 fulltitle=scrapedtitle,
                 text_color='azure',
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 infoLabels=infolabels,
                 show=scrapedtitle))

    # Next page
    next_page = scrapertools.find_single_match(
        data, '<a class="nextpostslink".*?href="([^"]+)">')

    if next_page != '':
        itemlist.append(
            Item(
                channel=item.channel,
                action='film',
                title='[COLOR lightgreen]' +
                config.get_localized_string(30992) + '[/COLOR]',
                url=next_page,
                contentType=item.contentType,
                thumbnail=
                'http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png'
            ))

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
    return itemlist
Ejemplo n.º 33
0
def lista(item):
    logger.info()
    itemlist = []

    # Descarga la pagina
    data = httptools.downloadpage(item.url).data

    action = "play"
    if config.get_setting("menu_info", "freecambay"):
        action = "menu_info"

    # Extrae las entradas
    patron = '<div class="item.*?href="([^"]+)" title="([^"]+)".*?data-original="([^"]+)"(.*?)<div class="duration">([^<]+)<'
    matches = scrapertools.find_multiple_matches(data, patron)
    for scrapedurl, scrapedtitle, scrapedthumbnail, quality, duration in matches:
        if duration:
            scrapedtitle = "%s - %s" % (duration, scrapedtitle)
        if '>HD<' in quality:
            scrapedtitle += "  [COLOR red][HD][/COLOR]"

        itemlist.append(
            item.clone(action=action,
                       title=scrapedtitle,
                       url=scrapedurl,
                       thumbnail=scrapedthumbnail,
                       fanart=scrapedthumbnail))

    # Extrae la marca de siguiente página
    if item.extra:
        next_page = scrapertools.find_single_match(
            data, '<li class="next">.*?from_videos\+from_albums:(\d+)')
        if next_page:
            if "from_videos=" in item.url:
                next_page = re.sub(r'&from_videos=(\d+)',
                                   '&from_videos=%s' % next_page, item.url)
            else:
                next_page = "%s?mode=async&function=get_block&block_id=list_videos_videos_list_search_result" \
                            "&q=%s&category_ids=&sort_by=post_date&from_videos=%s" % (item.url, item.extra, next_page)
            itemlist.append(
                item.clone(action="lista",
                           title=">> Página Siguiente",
                           url=next_page))
    else:
        next_page = scrapertools.find_single_match(
            data, '<li class="next">.*?href="([^"]*)"')
        if next_page and not next_page.startswith("#"):
            next_page = urlparse.urljoin(host, next_page)
            itemlist.append(
                item.clone(action="lista",
                           title=">> Página Siguiente",
                           url=next_page))
        else:
            next_page = scrapertools.find_single_match(
                data, '<li class="next">.*?from:(\d+)')
            if next_page:
                if "from=" in item.url:
                    next_page = re.sub(r'&from=(\d+)', '&from=%s' % next_page,
                                       item.url)
                else:
                    next_page = "%s?mode=async&function=get_block&block_id=list_videos_common_videos_list&sort_by=post_date&from=%s" % (
                        item.url, next_page)
                itemlist.append(
                    item.clone(action="lista",
                               title=">> Página Siguiente",
                               url=next_page))

    return itemlist
Ejemplo n.º 34
0
def calendario(item):
    logger.info()
    itemlist = []
    data = get_source(item.url).data
    patron = '<div class="specific-date">.*?datetime="\d+-(\d+)-(\d+).*?class="day-name">.*?>\s*([^<]+)</time>(.*?)</section>'
    bloques = scrapertools.find_multiple_matches(data, patron)
    for mes, dia, title, b in bloques:
        patron = 'class="available-time">([^<]+)<.*?<cite itemprop="name">(.*?)</cite>.*?href="([^"]+)"' \
                 '.*?>\s*(.*?)\s*</a>(.*?)</article>'
        matches = scrapertools.find_multiple_matches(b, patron)
        if matches:
            title = "%s/%s - %s" % (dia, mes, title.strip())
            itemlist.append(item.clone(action="", title=title))
        for hora, title, url, subt, datos in matches:
            subt = subt.replace("Available", "Disponible").replace(
                "Episode", "Episodio").replace("in ", "en ")
            subt = re.sub(r"\s{2,}", " ", subt)
            if "<time" in subt:
                subt = re.sub(r"<time.*?>", "", subt).replace("</time>", "")
            scrapedtitle = "   [%s] %s - %s" % (
                hora, scrapertools.htmlclean(title), subt)
            scrapedtitle = re.sub(
                r"\[email&#160;protected\]|\[email\xc2\xa0protected\]",
                "Idolm@ster", scrapedtitle)
            if "Disponible" in scrapedtitle:
                if item.proxy:
                    url = urllib.unquote(
                        url.replace("/browse.php?u=",
                                    "").replace("&amp;b=4", ""))
                action = "play"
                server = "crunchyroll"
            else:
                action = ""
                server = ""
            thumb = scrapertools.find_single_match(
                datos, '<img class="thumbnail" src="([^"]+)"')
            if not thumb:
                thumb = scrapertools.find_single_match(datos, 'src="([^"]+)"')
            if thumb:
                thumb = urllib.unquote(thumb.replace("/browse.php?u=", "").replace("_thumb", "_full") \
                                       .replace("&amp;b=4", "").replace("_large", "_full"))
            itemlist.append(
                item.clone(action=action,
                           url=url,
                           title=scrapedtitle,
                           contentTitle=title,
                           thumbnail=thumb,
                           text_color=color2,
                           contentSerieName=title,
                           server=server))
    next = scrapertools.find_single_match(
        data, 'js-pagination-next"\s*href="([^"]+)"')
    if next:
        if item.proxy:
            next = proxy_i + url.replace("&amp;b=4", "")
        else:
            next = host + next
        itemlist.append(
            item.clone(action="calendario",
                       url=next,
                       title=">> Siguiente Semana"))
    prev = scrapertools.find_single_match(
        data, 'js-pagination-last"\s*href="([^"]+)"')
    if prev:
        if item.proxy:
            prev = proxy_i + url.replace("&amp;b=4", "")
        else:
            prev = host + prev
        itemlist.append(
            item.clone(action="calendario",
                       url=prev,
                       title="<< Semana Anterior"))
    return itemlist
Ejemplo n.º 35
0
def episodios(item):
    logger.info()
    itemlist = []
    episodes_list = []
    _season = 1

    data = get_source(item.url).data
    data = re.sub(r'\n|\t|\s{2,}', '', data)
    patron = '<li id="showview_videos.*?href="([^"]+)".*?(?:src|data-thumbnailUrl)="([^"]+)".*?media_id="([^"]+)"' \
             'style="width:(.*?)%.*?<span class="series-title.*?>\s*(.*?)</span>.*?<p class="short-desc".*?>' \
             '\s*(.*?)</p>.*?description":"(.*?)"'
    if data.count('class="season-dropdown') > 1:
        bloques = scrapertools.find_multiple_matches(
            data, 'class="season-dropdown[^"]+".*?title="([^"]+)"(.*?)</ul>')
        bloques.reverse()
        for season, b in bloques:
            matches = scrapertools.find_multiple_matches(b, patron)
            matches.reverse()
            if matches:
                itemlist.append(
                    item.clone(action="", title=season, text_color=color3))
            for url, thumb, media_id, visto, scrapedtitle, subt, plot in matches:

                if item.proxy:
                    url = urllib.unquote(
                        url.replace("/browse.php?u=",
                                    "").replace("&amp;b=4", ""))
                elif not item.proxy:
                    url = host + url
                url = url.replace(proxy, "")
                thumb = urllib.unquote(
                    thumb.replace("/browse.php?u=", "").replace(
                        "_wide.", "_full.").replace("&amp;b=4", ""))

                episode = scrapertools.find_single_match(scrapedtitle, '(\d+)')
                '''_season = scrapertools.find_single_match(season, '(\d+)$')
                if not _season:
                    _season = '1'
                '''
                title_s = scrapertools.find_single_match(season, '\((.*?)\)')

                count_title = '%sx%s %s' % (_season, episode, title_s)
                if count_title in episodes_list:
                    _season += 1

                title = '%sx%s' % (_season, episode)
                title = "     %s - %s" % (title, subt)

                if not episode:
                    title = "     %s" % (scrapedtitle)

                count_title = '%sx%s %s' % (_season, episode, title_s)
                episodes_list.append(count_title)

                if visto.strip() != "0":
                    title += " [COLOR %s][V][/COLOR]" % color5
                plot = unicode(plot, 'unicode-escape', "ignore")
                if not thumb.startswith('http'):
                    thumb = host + thumb

                if config.get_setting('unify'):
                    title += "[COLOR grey] [online][/COLOR]"

                itemlist.append(
                    Item(channel=item.channel,
                         action="play",
                         title=title,
                         url=url,
                         thumbnail=thumb,
                         media_id=media_id,
                         server="crunchyroll",
                         text_color=item.text_color,
                         contentTitle=item.contentTitle,
                         contentSerieName=item.contentSerieName,
                         plot=plot))
    else:
        matches = scrapertools.find_multiple_matches(data, patron)
        matches.reverse()
        for url, thumb, media_id, visto, title, subt, plot in matches:
            if item.proxy:
                url = urllib.unquote(
                    url.replace("/browse.php?u=", "").replace("&amp;b=4", ""))
            elif not item.proxy:
                url = host + url
            url = url.replace(proxy, "")
            thumb = urllib.unquote(
                thumb.replace("/browse.php?u=",
                              "").replace("_wide.",
                                          "_full.").replace("&amp;b=4", ""))

            episode = scrapertools.find_single_match(title, '(\d+)')
            title = '1x%s' % episode
            title = "%s - %s" % (title, subt)

            if visto.strip() != "0":
                title += " [COLOR %s][V][/COLOR]" % color5

            plot = unicode(plot, 'unicode-escape', "ignore")

            if not thumb.startswith('http'):
                thumb = host + thumb

            if config.get_setting('unify'):
                title += "[COLOR grey] [online][/COLOR]"

            itemlist.append(
                Item(channel=item.channel,
                     action="play",
                     title=title,
                     url=url,
                     thumbnail=thumb,
                     media_id=media_id,
                     server="crunchyroll",
                     text_color=item.text_color,
                     contentTitle=item.contentTitle,
                     contentSerieName=item.contentSerieName,
                     plot=plot))

    if config.get_videolibrary_support() and len(itemlist) > 0:
        itemlist.append(
            Item(channel=item.channel,
                 title="Añadir esta serie a la videoteca",
                 url=item.url,
                 text_color=color1,
                 action="add_serie_to_library",
                 extra="episodios",
                 contentSerieName=item.contentSerieName))
    return itemlist
Ejemplo n.º 36
0
def findvideos(item):
    logger.info()
    if item.contentSeason != '':
        return episode_links(item)

    itemlist = []
    item.text_color = color3

    data = get_data(item.url)

    item.plot = scrapertools.find_single_match(data, 'SINOPSIS(?:</span>|</strong>):(.*?)</p>')
    year = scrapertools.find_single_match(data, '(?:<span class="bold">|<strong>)AÑO(?:</span>|</strong>):\s*(\d+)')
    if year:
        try:
            from core import tmdb
            item.infoLabels['year'] = year
            tmdb.set_infoLabels_item(item, __modo_grafico__)
        except:
            pass

    old_format = False
    # Patron torrent antiguo formato
    if "Enlaces de descarga</div>" in data:
        old_format = True
        matches = scrapertools.find_multiple_matches(data, 'class="separate3 magnet".*?href="([^"]+)"')
        for scrapedurl in matches:
            scrapedurl = scrapertools.find_single_match(scrapedurl, '(magnet.*)')
            scrapedurl = urllib.unquote(re.sub(r'&amp;b=4', '', scrapedurl))
            title = "[Torrent] "
            title += urllib.unquote(scrapertools.find_single_match(scrapedurl, 'dn=(.*?)(?i)WWW.DescargasMix'))
            itemlist.append(item.clone(action="play", server="torrent", title=title, url=scrapedurl,
                                       text_color="green"))

    # Patron online
    data_online = scrapertools.find_single_match(data, 'Ver online</div>(.*?)<div class="section-box related-posts">')
    if data_online:
        title = "Enlaces Online"
        if '"l-latino2"' in data_online:
            title += " [LAT]"
        elif '"l-esp2"' in data_online:
            title += " [ESP]"
        elif '"l-vose2"' in data_online:
            title += " [VOSE]"

        patron = 'make_links.*?,[\'"]([^"\']+)["\']'
        matches = scrapertools.find_multiple_matches(data_online, patron)
        for i, code in enumerate(matches):
            enlace = show_links(code)
            links = servertools.findvideos(data=enlace[0])
            if links and "peliculas.nu" not in links:
                if i == 0:
                    extra_info = scrapertools.find_single_match(data_online, '<span class="tooltiptext">(.*?)</span>')
                    size = scrapertools.find_single_match(data_online, '(?i)TAMAÑO:\s*(.*?)<').strip()

                    if size:
                        title += " [%s]" % size
                    new_item = item.clone(title=title, action="", text_color=color1)
                    if extra_info:
                        extra_info = scrapertools.htmlclean(extra_info)
                        new_item.infoLabels["plot"] = extra_info
                        new_item.title += " +INFO"
                    itemlist.append(new_item)

                title = "   Ver vídeo en " + links[0][2]
                itemlist.append(item.clone(action="play", server=links[0][2], title=title, url=links[0][1]))
    scriptg = scrapertools.find_single_match(data, "<script type='text/javascript'>str='([^']+)'")
    if scriptg:
        gvideo = urllib.unquote_plus(scriptg.replace("@", "%"))
        url = scrapertools.find_single_match(gvideo, 'src="([^"]+)"')
        if url:
            itemlist.append(item.clone(action="play", server="directo", url=url, extra=item.url,
                                       title="   Ver vídeo en Googlevideo (Máxima calidad)"))

    # Patron descarga
    patron = '<div class="(?:floatLeft |)double(?:nuevo|)">(.*?)</div>(.*?)' \
             '(?:<div(?: id="mirrors"|) class="(?:contentModuleSmall |)mirrors">|<div class="section-box related-' \
             'posts">)'
    bloques_descarga = scrapertools.find_multiple_matches(data, patron)
    for title_bloque, bloque in bloques_descarga:
        if title_bloque == "Ver online":
            continue
        if '"l-latino2"' in bloque:
            title_bloque += " [LAT]"
        elif '"l-esp2"' in bloque:
            title_bloque += " [ESP]"
        elif '"l-vose2"' in bloque:
            title_bloque += " [VOSE]"

        extra_info = scrapertools.find_single_match(bloque, '<span class="tooltiptext">(.*?)</span>')
        size = scrapertools.find_single_match(bloque, '(?i)TAMAÑO:\s*(.*?)<').strip()

        if size:
            title_bloque += " [%s]" % size
        new_item = item.clone(title=title_bloque, action="", text_color=color1)
        if extra_info:
            extra_info = scrapertools.htmlclean(extra_info)
            new_item.infoLabels["plot"] = extra_info
            new_item.title += " +INFO"
        itemlist.append(new_item)

        if '<div class="subiendo">' in bloque:
            itemlist.append(item.clone(title="   Los enlaces se están subiendo", action=""))
            continue
        patron = 'class="separate.*? ([^"]+)".*?(?:make_links.*?,|href=)[\'"]([^"\']+)["\']'
        matches = scrapertools.find_multiple_matches(bloque, patron)
        for scrapedserver, scrapedurl in matches:
            if (scrapedserver == "ul") | (scrapedserver == "uploaded"):
                scrapedserver = "uploadedto"
            titulo = unicode(scrapedserver, "utf-8").capitalize().encode("utf-8")
            if titulo == "Magnet" and old_format:
                continue
            elif titulo == "Magnet" and not old_format:
                title = "   Enlace Torrent"
                scrapedurl = scrapertools.find_single_match(scrapedurl, '(magnet.*)')
                scrapedurl = urllib.unquote(re.sub(r'&amp;b=4', '', scrapedurl))
                itemlist.append(item.clone(action="play", server="torrent", title=title, url=scrapedurl,
                                           text_color="green"))
                continue
            if servertools.is_server_enabled(scrapedserver):
                try:
                    # servers_module = __import__("servers." + scrapedserver)
                    # Saca numero de enlaces
                    urls = show_links(scrapedurl)
                    numero = str(len(urls))
                    titulo = "   %s - Nº enlaces: %s" % (titulo, numero)
                    itemlist.append(item.clone(action="enlaces", title=titulo, extra=scrapedurl, server=scrapedserver))
                except:
                    pass

    itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
                               text_color="magenta"))
    if item.extra != "findvideos" and config.get_videolibrary_support():
        itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", action="add_pelicula_to_library",
                             extra="findvideos", url=item.url, infoLabels={'title': item.fulltitle},
                             fulltitle=item.fulltitle, text_color="green"))

    return itemlist
Ejemplo n.º 37
0
def entradas(item):
    logger.info()
    itemlist = []
    item.text_color = color2

    data = get_data(item.url)
    bloque = scrapertools.find_single_match(data, '<div id="content" role="main">(.*?)<div id="sidebar" '
                                                  'role="complementary">')
    contenido = ["series", "deportes", "anime", 'miniseries', 'programas']
    c_match = [True for match in contenido if match in item.url]
    # Patron dependiendo del contenido
    if True in c_match:
        patron = '<a class="clip-link".*?href="([^"]+)".*?<img alt="([^"]+)" src="([^"]+)"' \
                 '.*?<span class="overlay(|[^"]+)">'
        matches = scrapertools.find_multiple_matches(bloque, patron)
        for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedinfo in matches:
            scrapedurl = urllib.unquote(re.sub(r'&amp;b=4|/go\.php\?u=', '', scrapedurl))
            if scrapedinfo != "":
                scrapedinfo = scrapedinfo.replace(" ", "").replace("-", " ")

                scrapedinfo = "  [%s]" % unicode(scrapedinfo, "utf-8").capitalize().encode("utf-8")
            titulo = scrapedtitle + scrapedinfo
            titulo = scrapertools.decodeHtmlentities(titulo)
            scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)

            scrapedthumbnail = urllib.unquote(re.sub(r'&amp;b=4|/go\.php\?u=', '', scrapedthumbnail))
            if not scrapedthumbnail.startswith("http"):
                scrapedthumbnail = "http:" + scrapedthumbnail
            scrapedthumbnail = scrapedthumbnail.replace("-129x180", "")
            scrapedthumbnail = scrapedthumbnail.rsplit("/", 1)[0] + "/" + \
                               urllib.quote(scrapedthumbnail.rsplit("/", 1)[1])
            if "series" in item.url or "anime" in item.url:
                item.show = scrapedtitle
            itemlist.append(item.clone(action="episodios", title=titulo, url=scrapedurl, thumbnail=scrapedthumbnail,
                                       fulltitle=scrapedtitle, contentTitle=scrapedtitle, contentType="tvshow"))
    else:
        patron = '<a class="clip-link".*?href="([^"]+)".*?<img alt="([^"]+)" src="([^"]+)"' \
                 '.*?<span class="overlay.*?>(.*?)<.*?<p class="stats">(.*?)</p>'
        matches = scrapertools.find_multiple_matches(bloque, patron)
        for scrapedurl, scrapedtitle, scrapedthumbnail, info, categoria in matches:
            scrapedurl = urllib.unquote(re.sub(r'&amp;b=4|/go\.php\?u=', '', scrapedurl))
            titulo = scrapertools.decodeHtmlentities(scrapedtitle)
            scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.split("[")[0])
            action = "findvideos"
            show = ""
            if "Series" in categoria:
                action = "episodios"
                show = scrapedtitle
            elif categoria and categoria != "Películas" and categoria != "Documentales":
                try:
                    titulo += " [%s]" % categoria.rsplit(", ", 1)[1]
                except:
                    titulo += " [%s]" % categoria
                if 'l-espmini' in info:
                    titulo += " [ESP]"
                if 'l-latmini' in info:
                    titulo += " [LAT]"
                if 'l-vosemini' in info:
                    titulo += " [VOSE]"

            if info:
                titulo += " [%s]" % unicode(info, "utf-8").capitalize().encode("utf-8")
            year = scrapertools.find_single_match(titulo,'\[\d{4}\]')
            scrapedthumbnail = urllib.unquote(re.sub(r'&amp;b=4|/go\.php\?u=', '', scrapedthumbnail))
            if not scrapedthumbnail.startswith("http"):
                scrapedthumbnail = "http:" + scrapedthumbnail
            scrapedthumbnail = scrapedthumbnail.replace("-129x180", "")
            scrapedthumbnail = scrapedthumbnail.rsplit("/", 1)[0] + "/" + \
                               urllib.quote(scrapedthumbnail.rsplit("/", 1)[1])

            itemlist.append(item.clone(action=action, title=titulo, url=scrapedurl, thumbnail=scrapedthumbnail,
                                       fulltitle=scrapedtitle, contentTitle=scrapedtitle, viewmode="movie_with_plot",
                                       show=show, contentType="movie", infoLabels={'year':year}))
    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
    # Paginación
    next_page = scrapertools.find_single_match(data, '<a class="nextpostslink".*?href="([^"]+)"')
    if next_page:
        next_page = urllib.unquote(re.sub(r'&amp;b=4|/go\.php\?u=', '', next_page))
        itemlist.append(item.clone(title=">> Siguiente", url=next_page, text_color=color3))

    return itemlist
Ejemplo n.º 38
0
def findvideos(item):
    logger.info()
    itemlist = []

    if item.contentType == "movie":
        # Descarga la página
        data = httptools.downloadpage(item.url).data
        data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)

        if not item.infoLabels["tmdb_id"]:
            item.infoLabels["tmdb_id"] = scrapertools.find_single_match(
                data, '<a href="https://www.themoviedb.org/'
                '[^/]+/(\d+)')
            item.infoLabels["year"] = scrapertools.find_single_match(
                data, 'class="e_new">(\d{4})')

        if __modo_grafico__:
            tmdb.set_infoLabels_item(item, __modo_grafico__)
        if not item.infoLabels["plot"]:
            item.infoLabels["plot"] = scrapertools.find_single_match(
                data, 'itemprop="description">([^<]+)</div>')
        if not item.infoLabels["genre"]:
            item.infoLabels["genre"] = ", ".join(
                scrapertools.find_multiple_matches(
                    data, '<a itemprop="genre"[^>]+>'
                    '([^<]+)</a>'))

        ficha = scrapertools.find_single_match(item.url, '-f(\d+)-')
        if not ficha:
            ficha = scrapertools.find_single_match(item.url, 'f=(\d+)')
        cid = "0"
    else:
        ficha, cid = scrapertools.find_single_match(item.url,
                                                    'ficha=(\d+)&c_id=(\d+)')

    url = "https://playmax.mx/c_enlaces_n.php?apikey=%s&sid=%s&ficha=%s&cid=%s" % (
        apikey, sid, ficha, cid)
    data = httptools.downloadpage(url).data
    data = xml2dict(data)

    for k, v in data["Data"].items():
        try:
            if type(v) is dict:
                if k == "Online":
                    order = 1
                elif k == "Download":
                    order = 0
                else:
                    order = 2

                itemlist.append(
                    item.clone(action="",
                               title=k,
                               text_color=color3,
                               order=order))
                if type(v["Item"]) is str:
                    continue
                elif type(v["Item"]) is dict:
                    v["Item"] = [v["Item"]]
                for it in v["Item"]:
                    try:
                        thumbnail = "%s/styles/prosilver/imageset/%s.png" % (
                            host, it['Host'])
                        title = "   %s - %s/%s" % (it['Host'].capitalize(),
                                                   it['Quality'], it['Lang'])
                        calidad = int(
                            scrapertools.find_single_match(
                                it['Quality'], '(\d+)p'))
                        calidadaudio = it['QualityA'].replace("...", "")
                        subtitulos = it['Subtitles'].replace(
                            "Sin subtítulos", "")
                        if subtitulos:
                            title += " (%s)" % subtitulos
                        if calidadaudio:
                            title += "  [Audio:%s]" % calidadaudio

                        likes = 0
                        if it["Likes"] != "0" or it["Dislikes"] != "0":
                            likes = int(it["Likes"]) - int(it["Dislikes"])
                            title += "  (%s ok, %s ko)" % (it["Likes"],
                                                           it["Dislikes"])
                        if type(it["Url"]) is dict:
                            for i, enlace in enumerate(it["Url"]["Item"]):
                                titulo = title + "  (Parte %s)" % (i + 1)
                                itemlist.append(
                                    item.clone(title=titulo,
                                               url=enlace,
                                               action="play",
                                               calidad=calidad,
                                               thumbnail=thumbnail,
                                               order=order,
                                               like=likes,
                                               ficha=ficha,
                                               cid=cid,
                                               folder=False))
                        else:
                            url = it["Url"]
                            itemlist.append(
                                item.clone(title=title,
                                           url=url,
                                           action="play",
                                           calidad=calidad,
                                           thumbnail=thumbnail,
                                           order=order,
                                           like=likes,
                                           ficha=ficha,
                                           cid=cid,
                                           folder=False))
                    except:
                        pass
        except:
            pass

    if not config.get_setting("order_web", "playmax"):
        itemlist.sort(key=lambda it: (it.order, it.calidad, it.like),
                      reverse=True)
    else:
        itemlist.sort(key=lambda it: it.order, reverse=True)
    if itemlist:
        itemlist.extend(acciones_fichas(item, sid, ficha))

    if not itemlist and item.contentType != "movie":
        url = url.replace("apikey=%s&" % apikey, "")
        data = httptools.downloadpage(url).data
        data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)

        patron = '<div id="f_fde_c"[^>]+>(.*?update_fecha\(\d+\)">)</div>'
        estrenos = scrapertools.find_multiple_matches(data, patron)
        for info in estrenos:
            info = "Estreno en " + scrapertools.htmlclean(info)
            itemlist.append(item.clone(action="", title=info))

    if not itemlist:
        itemlist.append(
            item.clone(action="", title="No hay enlaces disponibles"))

    return itemlist
Ejemplo n.º 39
0
def fichas(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    # data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)

    fichas_marca = {
        '1': 'Siguiendo',
        '2': 'Pendiente',
        '3': 'Favorita',
        '4': 'Vista',
        '5': 'Abandonada'
    }
    patron = '<div class="c_fichas_image"[^>]*>[^<]*<[^>]+href="\.([^"]+)".*?src-data="([^"]+)".*?' \
             '<div class="c_fichas_data".*?marked="([^"]*)".*?serie="([^"]*)".*?' \
             '<div class="c_fichas_title">(?:<div class="c_fichas_episode">([^<]+)</div>|)([^<]+)</div>'
    matches = scrapertools.find_multiple_matches(data, patron)
    for scrapedurl, scrapedthumbnail, marca, serie, episodio, scrapedtitle in matches:
        tipo = "movie"
        scrapedurl = host + scrapedurl.rsplit("-dc=")[0]
        if not "-dc=" in scrapedurl:
            scrapedurl += "-dc="
        action = "findvideos"
        if __menu_info__:
            action = "menu_info"
        if serie:
            tipo = "tvshow"
        if episodio:
            title = "%s - %s" % (episodio.replace("X", "x"), scrapedtitle)
        else:
            title = scrapedtitle

        if marca:
            title += "  [COLOR %s][%s][/COLOR]" % (color4, fichas_marca[marca])

        new_item = Item(channel=item.channel,
                        action=action,
                        title=title,
                        url=scrapedurl,
                        thumbnail=scrapedthumbnail,
                        contentTitle=scrapedtitle,
                        contentType=tipo,
                        text_color=color2)
        if new_item.contentType == "tvshow":
            new_item.show = scrapedtitle
            if not __menu_info__:
                new_item.action = "episodios"

        itemlist.append(new_item)

    if itemlist and (item.extra == "listas_plus" or item.extra == "sigo"):
        follow = scrapertools.find_single_match(
            data, '<div onclick="seguir_lista.*?>(.*?)<')
        title = "Seguir Lista"
        if follow == "Siguiendo":
            title = "Dejar de seguir lista"
        item.extra = ""
        url = host + "/data.php?mode=seguir_lista&apikey=%s&sid=%s&lista=%s" % (
            apikey, sid, item.url.rsplit("/l", 1)[1])
        itemlist.insert(
            0,
            item.clone(action="acciones_cuenta",
                       title=title,
                       url=url,
                       text_color=color4,
                       lista=item.title,
                       folder=False))

    next_page = scrapertools.find_single_match(data,
                                               'href="([^"]+)" class="next"')
    if next_page:
        next_page = host + next_page.replace("&amp;", "&")
        itemlist.append(
            Item(channel=item.channel,
                 action="fichas",
                 title=">> Página Siguiente",
                 url=next_page))

        try:
            total = int(
                scrapertools.find_single_match(
                    data, '<span class="page-dots">.*href.*?>(\d+)'))
        except:
            total = 0
        if not config.get_setting("last_page", item.channel) and config.is_xbmc() and total > 2 \
                and item.extra != "newest":
            itemlist.append(
                item.clone(action="select_page",
                           title="Ir a página... (Total:%s)" % total,
                           url=next_page,
                           text_color=color5))

    return itemlist
Ejemplo n.º 40
0
def menu_info(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)

    item.infoLabels["tmdb_id"] = scrapertools.find_single_match(
        data, '<a href="https://www.themoviedb.org/[^/]+/(\d+)')
    item.infoLabels["year"] = scrapertools.find_single_match(
        data, 'class="e_new">(\d{4})')
    item.infoLabels["plot"] = scrapertools.find_single_match(
        data, 'itemprop="description">([^<]+)</div>')
    item.infoLabels["genre"] = ", ".join(
        scrapertools.find_multiple_matches(
            data, '<a itemprop="genre"[^>]+>([^<]+)</a>'))
    if __modo_grafico__:
        tmdb.set_infoLabels_item(item, __modo_grafico__)

    action = "findvideos"
    title = "Ver enlaces"
    if item.contentType == "tvshow":
        action = "episodios"
        title = "Ver capítulos"
    itemlist.append(item.clone(action=action, title=title))

    carpeta = "CINE"
    tipo = "película"
    action = "add_pelicula_to_library"
    extra = ""
    if item.contentType == "tvshow":
        carpeta = "SERIES"
        tipo = "serie"
        action = "add_serie_to_library"
        extra = "episodios###library"

    library_path = config.get_videolibrary_path()
    if config.get_videolibrary_support():
        title = "Añadir %s a la videoteca" % tipo
        if item.infoLabels["imdb_id"] and not library_path.lower().startswith(
                "smb://"):
            try:
                from core import filetools
                path = filetools.join(library_path, carpeta)
                files = filetools.walk(path)
                for dirpath, dirname, filename in files:
                    if item.infoLabels["imdb_id"] in dirpath:
                        namedir = dirpath.replace(path, '')[1:]
                        for f in filename:
                            if f != namedir + ".nfo" and f != "tvshow.nfo":
                                continue
                            from core import videolibrarytools
                            head_nfo, it = videolibrarytools.read_nfo(
                                filetools.join(dirpath, f))
                            canales = it.library_urls.keys()
                            canales.sort()
                            if "playmax" in canales:
                                canales.pop(canales.index("playmax"))
                                canales.insert(0, "[COLOR red]playmax[/COLOR]")
                            title = "%s ya en tu videoteca. [%s] ¿Añadir?" % (
                                tipo.capitalize(), ",".join(canales))
                            break
            except:
                import traceback
                logger.error(traceback.format_exc())
                pass

        itemlist.append(
            item.clone(action=action,
                       title=title,
                       text_color=color5,
                       extra=extra))

    token_auth = config.get_setting("token_trakt", "tvmoviedb")
    if token_auth and item.infoLabels["tmdb_id"]:
        extra = "movie"
        if item.contentType != "movie":
            extra = "tv"
        itemlist.append(
            item.clone(channel="tvmoviedb",
                       title="[Trakt] Gestionar con tu cuenta",
                       action="menu_trakt",
                       extra=extra))
    itemlist.append(
        item.clone(channel="trailertools",
                   action="buscartrailer",
                   title="Buscar Tráiler",
                   text_color="magenta",
                   context=""))

    itemlist.append(item.clone(action="", title=""))
    ficha = scrapertools.find_single_match(item.url, '-f(\d+)-')
    if not ficha:
        ficha = scrapertools.find_single_match(item.url, 'f=(\d+)')

    itemlist.extend(acciones_fichas(item, sid, ficha, season=True))
    itemlist.append(
        item.clone(action="acciones_cuenta",
                   title="Añadir a una lista",
                   text_color=color3,
                   ficha=ficha))

    return itemlist
Ejemplo n.º 41
0
def listas(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = xml2dict(data)
    if item.extra == "listas":
        itemlist.append(
            Item(channel=item.channel,
                 title="Listas más seguidas",
                 action="listas",
                 text_color=color1,
                 url=item.url + "&orden=1",
                 extra="listas_plus"))
        itemlist.append(
            Item(channel=item.channel,
                 title="Listas con más fichas",
                 action="listas",
                 text_color=color1,
                 url=item.url + "&orden=2",
                 extra="listas_plus"))
        itemlist.append(
            Item(channel=item.channel,
                 title="Listas aleatorias",
                 action="listas",
                 text_color=color1,
                 url=item.url + "&orden=3",
                 extra="listas_plus"))
        if data["Data"]["ListasSiguiendo"] != "\t":
            itemlist.append(
                Item(channel=item.channel,
                     title="Listas que sigo",
                     action="listas",
                     text_color=color1,
                     url=item.url,
                     extra="sigo"))
        if data["Data"]["TusListas"] != "\t":
            itemlist.append(
                Item(channel=item.channel,
                     title="Mis listas",
                     action="listas",
                     text_color=color1,
                     url=item.url,
                     extra="mislistas"))

        return itemlist

    elif item.extra == "sigo":
        data = data["Data"]["ListasSiguiendo"]["Item"]
    elif item.extra == "mislistas":
        data = data["Data"]["TusListas"]["Item"]
    else:
        data = data["Data"]["Listas"]["Item"]

    if type(data) is not list:
        data = [data]
    import random
    for child in data:
        image = ""
        title = "%s (%s fichas)" % (child["Title"], child["FichasInList"])
        images = []
        for i in range(1, 5):
            if "sinimagen.png" not in child["Poster%s" % i]:
                images.append(child["Poster%s" % i].replace("/100/", "/400/"))
        if images:
            image = images[random.randint(0, len(images) - 1)]
        url = host + "/l%s" % child["Id"]
        itemlist.append(
            Item(channel=item.channel,
                 action="fichas",
                 url=url,
                 text_color=color3,
                 thumbnail=image,
                 title=title,
                 extra=item.extra))

    if len(itemlist) == 20:
        start = scrapertools.find_single_match(item.url, 'start=(\d+)')
        end = int(start) + 20
        url = re.sub(r'start=%s' % start, 'start=%s' % end, item.url)
        itemlist.append(item.clone(title=">> Página Siguiente", url=url))

    return itemlist
Ejemplo n.º 42
0
def episodios(item):
    logger.info()
    itemlist = []

    # Descarga la página
    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)

    if not item.infoLabels["tmdb_id"]:
        item.infoLabels["tmdb_id"] = scrapertools.find_single_match(
            data, '<a href="https://www.themoviedb.org/[^/]+/(\d+)')
        item.infoLabels["year"] = scrapertools.find_single_match(
            data, 'class="e_new">(\d{4})')
    if not item.infoLabels["genre"]:
        item.infoLabels["genre"] = ", ".join(
            scrapertools.find_multiple_matches(
                data, '<a itemprop="genre"[^>]+>([^<]+)</a>'))
    if not item.infoLabels["plot"]:
        item.infoLabels["plot"] = scrapertools.find_single_match(
            data, 'itemprop="description">([^<]+)</div>')

    dc = scrapertools.find_single_match(data, "var dc_ic = '\?dc=([^']+)'")
    patron = '<div class="f_cl_l_c f_cl_l_c_id[^"]+" c_id="([^"]+)" .*?c_num="([^"]+)" c_name="([^"]+)"' \
             '.*?load_f_links\(\d+\s*,\s*(\d+).*?<div class="([^"]+)" onclick="marcar_capitulo'
    matches = scrapertools.find_multiple_matches(data, patron)
    lista_epis = []
    for c_id, episodio, title, ficha, status in matches:
        episodio = episodio.replace("X", "x")
        if episodio in lista_epis:
            continue
        lista_epis.append(episodio)
        url = "https://playmax.mx/c_enlaces_n.php?ficha=%s&c_id=%s&dc=%s" % (
            ficha, c_id, dc)
        title = "%s - %s" % (episodio, title)
        if "_mc a" in status:
            title = "[COLOR %s]%s[/COLOR] %s" % (
                color5, u"\u0474".encode('utf-8'), title)

        new_item = Item(channel=item.channel,
                        action="findvideos",
                        title=title,
                        url=url,
                        thumbnail=item.thumbnail,
                        fanart=item.fanart,
                        show=item.show,
                        infoLabels=item.infoLabels,
                        text_color=color2,
                        referer=item.url,
                        contentType="episode")
        try:
            new_item.infoLabels["season"], new_item.infoLabels[
                "episode"] = episodio.split('x', 1)
        except:
            pass
        itemlist.append(new_item)

    itemlist.sort(key=lambda it:
                  (it.infoLabels["season"], it.infoLabels["episode"]),
                  reverse=True)
    if __modo_grafico__:
        tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)

    library_path = config.get_videolibrary_path()
    if config.get_videolibrary_support() and not item.extra:
        title = "Añadir serie a la videoteca"
        if item.infoLabels["imdb_id"] and not library_path.lower().startswith(
                "smb://"):
            try:
                from core import filetools
                path = filetools.join(library_path, "SERIES")
                files = filetools.walk(path)
                for dirpath, dirname, filename in files:
                    if item.infoLabels["imdb_id"] in dirpath:
                        for f in filename:
                            if f != "tvshow.nfo":
                                continue
                            from core import videolibrarytools
                            head_nfo, it = videolibrarytools.read_nfo(
                                filetools.join(dirpath, dirname, f))
                            canales = it.library_urls.keys()
                            canales.sort()
                            if "playmax" in canales:
                                canales.pop(canales.index("playmax"))
                                canales.insert(0, "[COLOR red]playmax[/COLOR]")
                            title = "Serie ya en tu videoteca. [%s] ¿Añadir?" % ",".join(
                                canales)
                            break
            except:
                import traceback
                logger.error(traceback.format_exc())
                pass

        itemlist.append(
            item.clone(action="add_serie_to_library",
                       title=title,
                       text_color=color5,
                       extra="episodios###library"))
    if itemlist and not __menu_info__:
        ficha = scrapertools.find_single_match(item.url, '-f(\d+)-')
        itemlist.extend(acciones_fichas(item, sid, ficha))

    return itemlist
Ejemplo n.º 43
0
def play(item):
    logger.info("url=%s" % item.url)
    itemlist = []

    if item.url.startswith("https://pelispedia.video/v.php"):

        headers = {'Referer': item.referer}
        resp = httptools.downloadpage(item.url, headers=headers, cookies=False)
        
        for h in resp.headers:
            ck = scrapertools.find_single_match(resp.headers[h], '__cfduid=([^;]*)')
            if ck:
                gsv = scrapertools.find_single_match(resp.data, '<meta name="google-site-verification" content="([^"]*)"')
                token = generar_token(gsv, 'b0a8c83650f18ccc7c87b16e3c460474'+'yt'+'b0a8c83650f18ccc7c87b16e3c460474'+'2653')
                playparms = scrapertools.find_single_match(resp.data, 'Play\("([^"]*)","([^"]*)","([^"]*)"')
                if playparms:
                    link = playparms[0]
                    subtitle = '' if playparms[1] == '' or playparms[2] == '' else playparms[2] + playparms[1] + '.srt'
                else:
                    link = scrapertools.find_single_match(item.url, 'id=([^;]*)')
                    subtitle = ''
                # ~ logger.info("gsv: %s token: %s ck: %s link: %s" % (gsv, token, ck, link))

                post = "link=%s&token=%s" % (link, token)
                headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Cookie': '__cfduid=' + ck}
                data = httptools.downloadpage("https://pelispedia.video/plugins/gkpedia.php", post=post, headers=headers, cookies=False).data
                
                mp4 = scrapertools.find_single_match(data, '"link":"([^"]*)')
                if mp4:
                    mp4 = mp4.replace('\/', '/')
                    if 'chomikuj.pl/' in mp4: mp4 += "|Referer=%s" % item.referer
                    itemlist.append(['.mp4', mp4, 0, subtitle])
                
                break


    elif item.url.startswith("https://load.pelispedia.vip/embed/"):
        
        headers = {'Referer': item.referer}
        resp = httptools.downloadpage(item.url, headers=headers, cookies=False)

        for h in resp.headers:
            ck = scrapertools.find_single_match(resp.headers[h], '__cfduid=([^;]*)')
            if ck:
                gsv = scrapertools.find_single_match(resp.data, '<meta name="google-site-verification" content="([^"]*)"')
                token = generar_token(gsv, '4fe554b59d760c9986c903b07af8b7a4'+'yt'+'4fe554b59d760c9986c903b07af8b7a4'+'785446346')
                url = item.url.replace('/embed/', '/stream/') + '/' + token
                # ~ logger.info("gsv: %s token: %s ck: %s" % (gsv, token, ck))

                headers = {'Referer': item.url, 'Cookie': '__cfduid=' + ck}
                data = httptools.downloadpage(url, headers=headers, cookies=False).data
                
                url = scrapertools.find_single_match(data, '<meta (?:name|property)="og:url" content="([^"]+)"')
                srv = scrapertools.find_single_match(data, '<meta (?:name|property)="og:sitename" content="([^"]+)"')
                if srv == '' and 'rapidvideo.com/' in url: srv = 'rapidvideo'

                if url != '' and srv != '':
                    itemlist.append(item.clone(url=url, server=srv.lower()))

                elif '<title>Vidoza</title>' in data or '|fastplay|' in data:
                    if '|fastplay|' in data:
                        packed = scrapertools.find_single_match(data, "<script type='text/javascript'>(eval\(.*?)</script>")
                        from lib import jsunpack
                        data = jsunpack.unpack(packed)
                        data = data.replace("\\'", "'")

                    matches = scrapertools.find_multiple_matches(data, 'file\s*:\s*"([^"]+)"\s*,\s*label\s*:\s*"([^"]+)"')
                    subtitle = ''
                    for fil, lbl in matches:
                        if fil.endswith('.srt') and not fil.endswith('empty.srt'):
                            subtitle = fil
                            if not subtitle.startswith('http'):
                                domi = scrapertools.find_single_match(data, 'aboutlink\s*:\s*"([^"]*)')
                                subtitle = domi + subtitle
                            break

                    for fil, lbl in matches:
                        if not fil.endswith('.srt'):
                            itemlist.append([lbl, fil, 0, subtitle])

                break


    else:
        itemlist = servertools.find_video_items(data=item.url)
        for videoitem in itemlist:
            videoitem.title = item.title
            videoitem.channel = __channel__

    logger.info("retorna itemlist: %s" % itemlist)
    return itemlist
Ejemplo n.º 44
0
def indices(item):
    logger.info()
    itemlist = []

    tipo = "2"
    if item.contentType == "tvshow":
        tipo = "1"
    if "Índices" in item.title:
        if item.contentType == "tvshow":
            itemlist.append(
                item.clone(title="Populares",
                           action="fichas",
                           url=host + "/catalogo.php?tipo[]=1&ad=2&"
                           "ordenar=pop&con_dis=on"))
        itemlist.append(
            item.clone(title="Más vistas",
                       action="fichas",
                       url=host + "/catalogo.php?tipo[]=%s&ad=2&"
                       "ordenar=siempre&con_dis=on" % tipo))
        itemlist.append(
            item.clone(title="Mejor valoradas",
                       action="fichas",
                       url=host + "/catalogo.php?tipo[]=%s&ad=2&"
                       "ordenar=valoracion&con_dis=on" % tipo))
        itemlist.append(item.clone(title="Géneros",
                                   url=host + "/catalogo.php"))
        itemlist.append(item.clone(title="Idiomas",
                                   url=host + "/catalogo.php"))
        if item.contentType == "movie":
            itemlist.append(
                item.clone(title="Por calidad", url=host + "/catalogo.php"))
        itemlist.append(item.clone(title="Por año"))
        itemlist.append(
            item.clone(title="Por país", url=host + "/catalogo.php"))

        return itemlist

    if "Géneros" in item.title:
        data = httptools.downloadpage(item.url).data
        patron = '<div class="sel gen" value="([^"]+)">([^<]+)</div>'
        matches = scrapertools.find_multiple_matches(data, patron)
        for value, genero in matches:
            url = item.url + "?tipo[]=%s&generos[]=%s&ad=2&ordenar=novedades&con_dis=on" % (
                tipo, value)
            itemlist.append(item.clone(action="fichas", title=genero, url=url))
    elif "Idiomas" in item.title:
        data = httptools.downloadpage(item.url).data
        bloque = scrapertools.find_single_match(
            data, 'oname="Idioma">Cualquier(.*?)<input')
        patron = '<div class="sel" value="([^"]+)">([^<]+)</div>'
        matches = scrapertools.find_multiple_matches(bloque, patron)
        for value, idioma in matches:
            url = item.url + "?tipo[]=%s&ad=2&ordenar=novedades&con_dis=on&e_idioma=%s" % (
                tipo, value)
            itemlist.append(item.clone(action="fichas", title=idioma, url=url))
    elif "calidad" in item.title:
        data = httptools.downloadpage(item.url).data
        bloque = scrapertools.find_single_match(
            data, 'oname="Calidad">Cualquier(.*?)<input')
        patron = '<div class="sel" value="([^"]+)">([^<]+)</div>'
        matches = scrapertools.find_multiple_matches(bloque, patron)
        for value, calidad in matches:
            url = item.url + "?tipo[]=%s&ad=2&ordenar=novedades&con_dis=on&e_calidad=%s" % (
                tipo, value)
            itemlist.append(item.clone(action="fichas", title=calidad,
                                       url=url))
    elif "país" in item.title:
        data = httptools.downloadpage(item.url).data
        bloque = scrapertools.find_single_match(
            data, 'oname="País">Todos(.*?)<input')
        patron = '<div class="sel" value="([^"]+)">([^<]+)</div>'
        matches = scrapertools.find_multiple_matches(bloque, patron)
        for value, pais in matches:
            url = item.url + "?tipo[]=%s&ad=2&ordenar=novedades&con_dis=on&pais=%s" % (
                tipo, value)
            itemlist.append(item.clone(action="fichas", title=pais, url=url))
    else:
        from datetime import datetime
        year = datetime.now().year
        for i in range(year, 1899, -1):
            url = "%s/catalogo.php?tipo[]=%s&del=%s&al=%s&año=personal&ad=2&ordenar=novedades&con_dis=on" \
                  % (host, tipo, i, i)
            itemlist.append(item.clone(action="fichas", title=str(i), url=url))

    return itemlist
Ejemplo n.º 45
0
def lista(item):
    logger.info()
    itemlist = []
    if item.post:
        soup = create_soup(item.url, item.post)
        offset = item.offset
    else:
        soup = create_soup(item.url)
    matches = soup.find_all('li', class_='poptrigger')
    for elem in matches:
        url = elem.a['href']
        title = elem.img['alt']
        thumbnail = elem.img['src']
        if ".gif" in thumbnail:
            thumbnail = elem.img['data-src']
        time = elem.find('span', class_='duration').text.strip()
        quality = elem.find('h3', class_='video-thumb-title  hd')
        if quality:
            title = "[COLOR yellow]%s[/COLOR] [COLOR red]HD[/COLOR] %s" % (
                time, title)
        else:
            title = "[COLOR yellow]%s[/COLOR] %s" % (time, title)
        if not thumbnail.startswith("https"):
            thumbnail = "https:%s" % thumbnail
        plot = ""
        itemlist.append(
            item.clone(
                action="play",
                title=title,
                url=url,
                thumbnail=thumbnail,
                fanart=thumbnail,
                plot=plot,
            ))
    next_page = soup.find('link', rel='next')
    next = soup.find('button', id='show-more-videos-btn')
    if next:
        total = next.find('span', id='remaining-video-num').text
        offset = 41
    if next_page:
        next_page = next_page['href']
        next_page = urlparse.urljoin(host, next_page)
        itemlist.append(
            item.clone(action="lista",
                       title="[COLOR blue]Página Siguiente >>[/COLOR]",
                       url=next_page))
    if "/channels/" in item.url:
        if next:
            next_page = "https://www.gotporn.com/channels/%s/get-more-videos " % next[
                'data-id']
        else:
            id = scrapertools.find_single_match(
                item.url, 'https://www.gotporn.com/channels/(\d+)')
            next_page = "https://www.gotporn.com/channels/%s/get-more-videos " % id
        post = {"offset": "%s" % offset}
        offset += 15
        itemlist.append(
            item.clone(action="lista",
                       title="[COLOR blue]Página Siguiente >>[/COLOR]",
                       url=next_page,
                       post=post,
                       offset=offset))
    return itemlist
Ejemplo n.º 46
0
def mainlist(item):
    logger.info("deportesalacarta.livesportsws lista")
    itemlist = []
    import xbmc
    check=xbmc.getInfoLabel('ListItem.Title')
    
    if item.channel != __channel__:
        item.channel = __channel__
    else:
       if not xbmc.Player().isPlaying():
          xbmc.executebuiltin('xbmc.PlayMedia('+song+')')
    
    
    
    """
        Lo que ocurre con
        url = http://translate.googleusercontent.com/translate_c?depth=1&nv=1&rurl=translate.google.com&sl=ru&tl=es&u=http://lfootball.ws/&usg=ALkJrhgzJfI1TDn3BxGgPbjgAHHS7J0i9g
        Redirecciones:
        1. http://translate.google.com/translate?depth=1&nv=1&rurl=translate.google.com&sl=ru&tl=es&u=http://lfootball.ws/
        2. http://translate.googleusercontent.com/translate_p?nv=1&rurl=translate.google.com&sl=ru&tl=es&u=http://lfootball.ws/&depth=2&usg=ALkJrhgAAAAAVupk4tLINTbmU7JrcQdl0G4V3LtnRM1n
        3. http://translate.googleusercontent.com/translate_c?depth=2&nv=1&rurl=translate.google.com&sl=ru&tl=es&u=http://lfootball.ws/&usg=ALkJrhhhRDwHSDRDN4t27cX5CYZLFFQtmA
        Lo que significa que necesitamos una key nueva cada vez en el argumento "usg" y para llegar a la url 3 debemos hacer la petición 1 y 2 con 'follow_redirects=False' o con la convinación de 'follow_redirects=False' y 'header_to_get="location"'
        """
    
    #### Opción 1: 'follow_redirects=False'
    ## Petición 1
    url = "http://translate.google.com/translate?depth=1&nv=1&rurl=translate.google.com&sl=ru&tl=es&u=http://livesport.ws/football"
    data = dhe(httptools.downloadpage(url, follow_redirects=False).data)#.decode('cp1251').encode('utf8')
    ## Petición 2
    url = scrapertools.get_match(data, ' src="([^"]+)" name=c ')
    data = dhe(httptools.downloadpage(url, follow_redirects=False).data)#.decode('cp1251').encode('utf8')
    ## Petición 3
    url = scrapertools.get_match(data, 'URL=([^"]+)"')
    data = dhe(httptools.downloadpage(url).data)#.decode('cp1251').encode('utf8')
    """
        #### Opción 2: 'follow_redirects=False' y 'header_to_get="location"'
        ## Petición 1
        url = "http://translate.google.com/translate?depth=1&nv=1&rurl=translate.google.com&sl=ru&tl=es&u=http://lfootball.ws/"
        data = dhe( scrapertools.downloadpage(url,follow_redirects=False) )#.decode('cp1251').encode('utf8')
        ## Petición 2
        url = scrapertools.get_match(data, ' src="([^"]+)" name=c ')
        url = scrapertools.get_header_from_response(url, header_to_get="location")
        ## Petición 3
        data = dhe( scrapertools.cachePage(url ) )#.decode('cp1251').encode('utf8')
        """
    
    
    
    patrondata = '</h1></div>(.*?)</h2>'
    matchesdata = re.compile(patrondata,re.DOTALL).findall(data)
    for bloque_data in matchesdata:
        
        for bloque_data in matchesdata:
            patrondaygame = '<span class=text>.*?<span class=text>(.*?)</span></a>(.*?)</span> --></li></ul></div>'
            matchesdaygame = re.compile(patrondaygame,re.DOTALL).findall(bloque_data)

            for day , bloque_games in matchesdaygame:
                day = re.sub(r"</span>|<i class=ico><span>de</span></i>|<span class=text>|de","",day)
                day = day.replace("actuales","Hoy")
                day = scrapertools.htmlclean(day)
                dia = scrapertools.get_match(day, '(\d+)')
                mes = re.sub(r"(?i)de |hoy |ayer |mañana |el |día ", "", day)
                mes_ = scrapertools.find_single_match(mes, '\d+\s*([A-z]+)')
                if not mes_:
                    mes_ = scrapertools.find_single_match(mes, '([A-z]+)\s*\d+,')
                mes = mes_.title()
                mes = month_convert(mes)
                mes = str(mes).zfill(2)
                
                if "hoy" in day or "Hoy" in day:
                    day = day.replace(day,"[COLOR yellow][B]"+day+"[/B][/COLOR]")
                elif "Ayer" in day or "ayer" in day:
                      day = day.replace(day,"[COLOR darkgoldenrod][B]"+day+"[/B][/COLOR]")
                else:
                     day = day.replace(day,"[COLOR greenyellow][B]"+day+"[/B][/COLOR]")
                itemlist.append( Item(channel=__channel__, title=day,action="mainlist",url="",fanart="http://www.easywallprints.com/upload/designs/background-with-soccer-balls-zoom-1.jpg",thumbnail="http://s6.postimg.org/3yl2y4adt/livesportagenda.png",folder=False) )
                
                patron = 'es&u=(.*?)&usg.*?id=event-(.*?)>(.*?)</i>.*?<span class=competition>.*?<span class=competition>(.*?)</span></a>.*?<i class="separator">.*?</span>(.*?)</span>.*?src=(.*?)>.*?src=(.*?)>.*?text-align: left">.*?</span>(.*?)</span>.*?<i class="live-broadcasting-status-(\d)"'#'<a class="link" href="([^"]+)" title="(.*?)".*?<span class="liga"><span>(.*?)</span></span>.*?<span class="date"><span>(.*?)</span></span>'
                matches = re.compile(patron,re.DOTALL).findall(bloque_games)
                for url_info,id_event, hora,competition,team1,thumbnail,fanart,team2 , status in matches:
                    team1 = re.sub(r"-"," ",team1)
                    team2=  re.sub(r"-"," ",team2)
                    competition = re.sub(r"\.","",competition)
                    
                    
                    if status == "4":
                        continue
                    
                    if "00:" in hora:
                        hora = hora.replace("00:","24:")
                    
                    if not "LIVE" in hora:
                       time= re.compile('(\d+):(\d+)',re.DOTALL).findall(hora)
                       for horas, minutos in time:
                           wrong_time =int(horas)
                           value = 1
                           correct_time = wrong_time - value
                           correct_time = str(correct_time)
                           hora = correct_time +":"+ minutos
                           
                           
                    
                
                    if "OFFLINE" in hora:
                        extra = hora
                        title = team1+"-"+team2+"____"
                        title = title.title()
                        fulltitle =title.replace(title,"[COLOR burlywood][B]"+title+"[/B][/COLOR]")
                        title= title.replace(title,"[COLOR burlywood]"+title+"[/COLOR]")
                        action = "mainlist"
                        folder = False
                        evento = ""
                        time = ""
                        fecha = ""
                    else:
                        if "hoy" in day or "Hoy" in day:
                            title = team1+" - "+team2
                            title = title.title()
                            fulltitle =title.replace(title,"[COLOR deepskyblue][B]"+title+"[/B][/COLOR]")
                            if "LIVE" in hora:
                               import time
                               
                               time = "live"
                               fecha = dia+"/"+str(mes)
                               fecha = fecha.strip()
                               evento = team1+" vs "+team2
                               extra= hora
                               hora = u'\u006C\u0456\u0475\u04BC!!'.encode('utf-8')
                               hora = hora.replace(hora,"[COLOR crimson][B]"+hora+"[/B][/COLOR]")
                               
                            else:
                                evento = team1+" vs "+team2
                                time = hora.strip()
                                fecha = dia+"/"+str(mes)
                                fecha = fecha.strip()
                                extra = hora
                                hora = hora.replace(hora,"[COLOR aquamarine][B]"+hora+"[/B][/COLOR]")
                          
                            title = hora+ "  " + title.replace(title,"[COLOR deepskyblue]"+title+"[/COLOR]")+ "[COLOR floralwhite]"+" "+"("+competition+")"+"[/COLOR]"
                            action = "enlaces"
                            folder = True
                        else:
                            title = team1+" - "+team2
                            evento = team1+" vs "+team2
                            time = hora
                            fecha = dia+"/"+mes
                            title = title.title()
                            fulltitle =title.replace(title,"[COLOR mediumaquamarine][B]"+title+"[/B][/COLOR]")
                            title = "[COLOR aquamarine][B]"+hora+"[/B][/COLOR]"+ "  " + title.replace(title,"[COLOR mediumaquamarine]"+title+"[/COLOR]")+ "[COLOR paleturquoise]"+" "+"("+competition+")"+"[/COLOR]"
                            action = "enlaces"
                            folder = True
                            extra = hora
                            
                    post_id = scrapertools.get_match(url_info,'http.*?livesport.ws\/(.*?)-')
                    url = "http://livesport.ws/engine/modules/sports/sport_refresh.php?from=event&event_id="+id_event+"&tab_id=0&post_id="+post_id
                    
                    itemlist.append( Item(channel=__channel__, title="     "+title,action=action,url=url,thumbnail =urlparse.urljoin(host,thumbnail),fanart =urlparse.urljoin(host,fanart),fulltitle = fulltitle,extra =extra,date=fecha, time=time, evento=evento, context="info_partido",deporte="futbol",folder=folder) )

    return itemlist
Ejemplo n.º 47
0
def play(item):
    logger.info("[thegroove360.cineblog01] play")
    itemlist = []

    ### Handling new cb01 wrapper
    if host[9:] + "/film/" in item.url:
        iurl = httptools.downloadpage(item.url,
                                      only_headers=True,
                                      follow_redirects=False).headers.get(
                                          "location", "")
        logger.info("/film/ wrapper: %s" % iurl)
        if iurl:
            item.url = iurl

    if '/goto/' in item.url:
        item.url = item.url.split('/goto/')[-1].decode('base64')

    item.url = item.url.replace('http://cineblog01.uno', 'http://k4pp4.pw')

    logger.debug(
        "##############################################################")
    if "go.php" in item.url:
        data = httptools.downloadpage(item.url, headers=headers).data
        try:
            data = scrapertools.get_match(data,
                                          'window.location.href = "([^"]+)";')
        except IndexError:
            try:
                # data = scrapertools.get_match(data, r'<a href="([^"]+)">clicca qui</a>')
                # In alternativa, dato che a volte compare "Clicca qui per proseguire":
                data = scrapertools.get_match(
                    data,
                    r'<a href="([^"]+)".*?class="btn-wrapper">.*?licca.*?</a>')
            except IndexError:
                data = httptools.downloadpage(
                    item.url, only_headers=True,
                    follow_redirects=False).headers.get("location", "")
        logger.debug("##### play go.php data ##\n%s\n##" % data)
    elif "/link/" in item.url:
        data = httptools.downloadpage(item.url, headers=headers).data
        from lib import jsunpack

        try:
            data = scrapertools.get_match(
                data, "(eval\(function\(p,a,c,k,e,d.*?)</script>")
            data = jsunpack.unpack(data)
            logger.debug("##### play /link/ unpack ##\n%s\n##" % data)
        except IndexError:
            logger.debug("##### The content is yet unpacked ##\n%s\n##" % data)

        data = scrapertools.find_single_match(
            data, 'var link(?:\s)?=(?:\s)?"([^"]+)";')

        if data.startswith('/'):
            data = urlparse.urljoin("http://swzz.xyz", data)
            data = httptools.downloadpage(data, headers=headers).data
        logger.debug("##### play /link/ data ##\n%s\n##" % data)
    else:
        data = item.url
        logger.debug("##### play else data ##\n%s\n##" % data)
    logger.debug(
        "##############################################################")

    try:
        itemlist = servertools.find_video_items(data=data)

        for videoitem in itemlist:
            videoitem.title = item.show
            videoitem.fulltitle = item.fulltitle
            videoitem.show = item.show
            videoitem.thumbnail = item.thumbnail
            videoitem.channel = __channel__
    except AttributeError:
        logger.error("vcrypt data doesn't contain expected URL")

    return itemlist
Ejemplo n.º 48
0
def listado(item):
    logger.info()
    itemlist = []

    action = "findvideos"
    content_type = "movie"

    if item.extra == 'serie':
        action = "temporadas"
        content_type = "tvshow"

    # ~ data = httptools.downloadpage(item.url).data
    data = obtener_data(item.url)
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
    # logger.info("data -- {}".format(data))

    patron = '<li[^>]+><a href="([^"]+)" alt="([^<|\(]+).*?<img src="([^"]+).*?>.*?<span>\(([^)]+).*?' \
             '<p class="font12">(.*?)</p>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedyear, scrapedplot in matches[:28]:
        title = "%s (%s)" % (scrapertools.unescape(scrapedtitle.strip()), scrapedyear)
        plot = scrapertools.entityunescape(scrapedplot)

        new_item = Item(channel=__channel__, title=title, url=urlparse.urljoin(CHANNEL_HOST, scrapedurl), action=action,
                        thumbnail=scrapedthumbnail, plot=plot, context="", extra=item.extra,
                        contentType=content_type, fulltitle=title)

        if item.extra == 'serie':
            new_item.show = scrapertools.unescape(scrapedtitle.strip())
            # fix en algunos casos la url está mal
            new_item.url = new_item.url.replace(CHANNEL_HOST + "pelicula", CHANNEL_HOST + "serie")
        else:
            new_item.fulltitle = scrapertools.unescape(scrapedtitle.strip())
            new_item.infoLabels = {'year': scrapedyear}
            # logger.debug(new_item.tostring())

        itemlist.append(new_item)

    # Obtenemos los datos basicos de todas las peliculas mediante multihilos
    tmdb.set_infoLabels(itemlist, __modo_grafico__)

    # numero de registros que se muestran por página, se fija a 28 por cada paginación
    if len(matches) >= 28 and '/buscar/?' not in item.url:

        file_php = "666more"
        tipo_serie = ""

        if item.extra == "movies":
            anio = scrapertools.find_single_match(item.url, "(?:year=)(\w+)")
            letra = scrapertools.find_single_match(item.url, "(?:letra=)(\w+)")
            genero = scrapertools.find_single_match(item.url, "(?:gender=|genre=)(\w+)")
            params = "letra=%s&year=%s&genre=%s" % (letra, anio, genero)

        else:
            tipo2 = scrapertools.find_single_match(item.url, "(?:series/|tipo2=)(\w+)")
            tipo_serie = "&tipo=serie"

            if tipo2 != "all":
                file_php = "letra"
                tipo_serie += "&tipo2=" + tipo2

            genero = ""
            if tipo2 == "anio":
                genero = scrapertools.find_single_match(item.url, "(?:anio/|genre=)(\w+)")
            if tipo2 == "genero":
                genero = scrapertools.find_single_match(item.url, "(?:genero/|genre=)(\w+)")
            if tipo2 == "letra":
                genero = scrapertools.find_single_match(item.url, "(?:letra/|genre=)(\w+)")

            params = "genre=%s" % genero

        url = "http://www.pelispedia.tv/api/%s.php?rangeStart=28&rangeEnd=28%s&%s" % (file_php, tipo_serie, params)

        if "rangeStart" in item.url:
            ant_inicio = scrapertools.find_single_match(item.url, "rangeStart=(\d+)&")
            inicio = str(int(ant_inicio) + 28)
            url = item.url.replace("rangeStart=" + ant_inicio, "rangeStart=" + inicio)

        itemlist.append(Item(channel=__channel__, action="listado", title=">> Página siguiente", extra=item.extra,
                             url=url, thumbnail=thumbnail_host, fanart=fanart_host))

    return itemlist
Ejemplo n.º 49
0
def findvideos(item):
    logger.info()

    itemlist = []
    data = get_source(item.url)
    selector_url = scrapertools.find_multiple_matches(
        data, 'class="metaframe rptss" src="([^"]+)"')

    for lang in selector_url:
        data = get_source('https:' + lang)
        urls = scrapertools.find_multiple_matches(data,
                                                  'data-playerid="([^"]+)">')
        subs = ''
        lang = scrapertools.find_single_match(lang, 'lang=(.*)?')
        language = IDIOMAS[lang]

        if item.contentType == 'episode':
            quality = 'SD'
        else:
            quality = item.quality

        for url in urls:
            final_url = httptools.downloadpage('https:' + url).data
            if language == 'VOSE':
                sub = scrapertools.find_single_match(url, 'sub=(.*?)&')
                subs = 'https:%s' % sub
            if 'index' in url:
                try:
                    file_id = scrapertools.find_single_match(
                        url, 'file=(.*?)&')
                    post = {'link': file_id}
                    post = urllib.urlencode(post)
                    hidden_url = 'https://streamango.poseidonhd.co/repro/plugins/gkpluginsphp.php'
                    dict_vip_url = httptools.downloadpage(hidden_url,
                                                          post=post).json
                    url = dict_vip_url['link']
                except:
                    pass
            else:
                try:

                    if 'openload' in url:
                        file_id = scrapertools.find_single_match(
                            url, 'h=(\w+)')
                        post = {'h': file_id}
                        post = urllib.urlencode(post)
                        hidden_url = 'https://streamango.poseidonhd.co/repro/openload/api.php'
                        json_data = httptools.downloadpage(
                            hidden_url, post=post, follow_redirects=False).json
                        url = scrapertools.find_single_match(
                            data_url, "VALUES \('[^']+','([^']+)'")
                        if not url:
                            url = json_data['url']
                        if not url:
                            continue
                    else:
                        new_data = httptools.downloadpage('https:' + url).data
                        file_id = scrapertools.find_single_match(
                            new_data, 'value="([^"]+)"')
                        post = {'url': file_id}
                        post = urllib.urlencode(post)
                        hidden_url = 'https://streamango.poseidonhd.co/repro/r.php'
                        data_url = httptools.downloadpage(
                            hidden_url, post=post, follow_redirects=False)
                        url = data_url.headers['location']
                except:
                    pass
            url = url.replace(" ", "%20")
            itemlist.append(
                item.clone(title='[%s] [%s]',
                           url=url,
                           action='play',
                           subtitle=subs,
                           language=language,
                           quality=quality,
                           infoLabels=item.infoLabels))

    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda x: x.title % (x.server.capitalize(), x.language))

    # Requerido para Filtrar enlaces

    if __comprueba_enlaces__:
        itemlist = servertools.check_list_links(itemlist,
                                                __comprueba_enlaces_num__)

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    itemlist = sorted(itemlist, key=lambda it: it.language)

    if item.contentType != 'episode':
        if config.get_videolibrary_support(
        ) and len(itemlist) > 0 and item.extra != 'findvideos':
            itemlist.append(
                Item(
                    channel=item.channel,
                    title=
                    '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                    url=item.url,
                    action="add_pelicula_to_library",
                    extra="findvideos",
                    contentTitle=item.contentTitle))

    return itemlist
Ejemplo n.º 50
0
def listado(item):
    logger.info()
    itemlist = []

    try:
        data_dict = jsontools.load_json(httptools.downloadpage(item.url).data)
    except:
        return itemlist # Devolvemos lista vacia

    #Filtrado y busqueda
    if item.filtro:
        for i in data_dict["result"][:]:
            if (item.filtro[0] == "genero" and item.filtro[1] not in i['genre'].lower()) or \
                (item.filtro[0] == "search" and item.filtro[1] not in i['title'].lower()):
                    data_dict["result"].remove(i)


    if not item.page:
        item.page = 0

    offset= int(item.page) * 60
    limit= offset + 60
       
    for i in data_dict["result"][offset:limit]:
        infoLabels = InfoLabels()
        idioma = ''

        if item.extra == "movie":
            action= "get_movie"
            #viewcontent = 'movies'
            infoLabels["title"]= i["title"]
            title= '%s (%s)' % (i["title"], i['year'] )
            url= urlparse.urljoin(__url_base__,"ver-pelicula-online/" + str(i["id"]))

        elif item.extra=="series": 
            action="get_temporadas"
            #viewcontent = 'seasons'
            title= i["title"]
            infoLabels['tvshowtitle']= i["title"]
            url= urlparse.urljoin(__url_base__,"episodio-online/" + str(i["id"]))

        else: #item.extra=="series_novedades": 
            action="get_only_episodio"
            #viewcontent = 'episodes'
            infoLabels['season']=i['season']
            infoLabels['episode']=i['episode'].zfill(2)
            item.extra= "%sx%s" %(infoLabels["season"], infoLabels["episode"])
            infoLabels['tvshowtitle']= i["title"]
            flag= scrapertools.find_single_match(i["label"],'(\s*\<img src=.*\>)')
            idioma=i["label"].replace(flag,"")
            title = '%s %s (%s)' %(i["title"], item.extra, idioma)
            url= urlparse.urljoin(__url_base__,"episodio-online/" + str(i["id"]))
        
        if i.has_key("poster") and i["poster"]: 
            thumbnail=re.compile("/w\d{3}/").sub("/w500/",i["poster"])
        else:
            thumbnail= item.thumbnail
        if i.has_key("background") and i["background"]: 
            fanart= i["background"]
        else:
            fanart= item.fanart
        
        # Rellenamos el diccionario de infoLabels
        infoLabels['title_id']=i['id'] # title_id: identificador de la pelicula/serie en pepecine.com
        infoLabels['titleraw']= i["title"] # titleraw: titulo de la pelicula/serie sin formato
        if i['genre']: infoLabels['genre']=i['genre']
        if i['year']: infoLabels['year']=i['year']
        if i['tagline']: infoLabels['plotoutline']=i['tagline']
        if i['plot']: 
            infoLabels['plot']=i['plot']
        else:
            infoLabels['plot']=""
        if i['runtime']: infoLabels['duration']=int(i['runtime'])*60
        if i['imdb_rating']:
            infoLabels['rating']=i['imdb_rating']
        elif i['tmdb_rating']:
            infoLabels['rating']=i['tmdb_rating']
        if i['tmdb_id']: infoLabels['tmdb_id'] = i['tmdb_id']
        if i['imdb_id']: infoLabels['imdb_id'] = i['imdb_id']



        newItem = Item(channel=item.channel, action=action, title=title, url=url, extra=item.extra,
                         fanart=fanart, thumbnail=thumbnail, viewmode="movie_with_plot", #viewcontent=viewcontent,
                         language=idioma, text_color="0xFFFFCE9C", infoLabels=infoLabels)
        newItem.year=i['year']
        newItem.contentTitle=i['title']
        if 'season' in infoLabels and infoLabels['season']:
            newItem.contentSeason = infoLabels['season']
        if 'episode' in infoLabels and infoLabels['episode']:
            newItem.contentEpisodeNumber = infoLabels['episode']
        itemlist.append(newItem)
    
    # Paginacion
    if len(data_dict["result"]) > limit:
        itemlist.append(item.clone(text_color="0xFF994D00", title=">> Pagina siguiente >>", page=item.page + 1) )
    
    return itemlist      
Ejemplo n.º 51
0
def menu_info_episode(item):
    logger.info("pelisalacarta.channels.cinefox menu_info_episode")
    itemlist = []

    data = scrapertools.downloadpage(item.url, headers=headers.items())
    if item.show == "":
        item.show = scrapertools.find_single_match(
            data, 'class="h1-like media-title".*?>([^<]+)</a>')

    episode = scrapertools.find_single_match(
        data, '<span class="indicator">([^<]+)</span>')
    item.infoLabels["season"] = episode.split("x")[0]
    item.infoLabels["episode"] = episode.split("x")[1]

    try:
        from core import tmdb
        tmdb.set_infoLabels_item(item, __modo_grafico__)
    except:
        pass

    if item.infoLabels["plot"] == "":
        sinopsis = scrapertools.find_single_match(
            data, 'id="episode-plot">(.*?)</p>')
        if not "No hay sinopsis" in sinopsis:
            item.infoLabels["plot"] = scrapertools.htmlclean(sinopsis)

    title = "Ver enlaces %s - [" + item.show + " " + episode + "]"
    itemlist.append(
        item.clone(action="findvideos",
                   title=title % "Online",
                   extra="episode",
                   type="streaming"))
    itemlist.append(
        item.clone(action="findvideos",
                   title=title % "de Descarga",
                   extra="episode",
                   type="download"))

    siguiente = scrapertools.find_single_match(
        data,
        '<a class="episode-nav-arrow next" href="([^"]+)" title="([^"]+)"')
    if siguiente:
        titulo = ">> Siguiente Episodio - [" + siguiente[1] + "]"
        itemlist.append(
            item.clone(action="menu_info_episode",
                       title=titulo,
                       url=siguiente[0],
                       extra="",
                       text_color=color1))

    patron = '<a class="episode-nav-arrow previous" href="([^"]+)" title="([^"]+)"'
    anterior = scrapertools.find_single_match(data, patron)
    if anterior:
        titulo = "<< Episodio Anterior - [" + anterior[1] + "]"
        itemlist.append(
            item.clone(action="menu_info_episode",
                       title=titulo,
                       url=anterior[0],
                       extra="",
                       text_color=color3))

    url_serie = scrapertools.find_single_match(
        data, '<a href="([^"]+)" class="h1-like media-title"')
    url_serie += "/episodios"
    itemlist.append(
        item.clone(title="Ir a la lista de capítulos",
                   action="episodios",
                   url=url_serie,
                   extra="",
                   text_color=color4))

    itemlist.append(
        item.clone(channel="trailertools",
                   action="buscartrailer",
                   title="Buscar Tráiler",
                   text_color="magenta",
                   context=""))

    return itemlist
Ejemplo n.º 52
0
def anime(item):
    logger.info("[italiafilm.py] anime")
    itemlist = []

    data = scrapertools.anti_cloudflare(item.url, headers)
    patron = '<li class="cat_19(.*?)</li>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for match in matches:
        title = scrapertools.find_single_match(
            match, '<span class="tvseries_name">(.*?)</span>')
        t = scrapertools.find_single_match(match, '</i>(.*?)</a>')
        t = scrapertools.decodeHtmlentities(t).strip()
        title = title.replace("Streaming", "")
        title = scrapertools.decodeHtmlentities(title).strip()
        title = title + " - " + t
        url = scrapertools.find_single_match(match, '<a href="([^"]+)"')
        plot = ""
        thumbnail = scrapertools.find_single_match(match,
                                                   'data-echo="([^"]+)"')

        if (DEBUG):
            logger.info("title=[" + title + "], url=[" + url +
                        "], thumbnail=[" + thumbnail + "]")

        itemlist.append(
            infoSod(Item(
                channel=__channel__,
                extra=item.extra,
                action='episodios' if item.extra == 'serie' else 'findvideos',
                fulltitle=title,
                show=title,
                title="[COLOR azure]" + title + "[/COLOR]",
                url=url,
                thumbnail=thumbnail,
                plot=plot,
                viewmode="movie_with_plot",
                folder=True),
                    tipo='tv'))

    # Siguiente
    try:
        pagina_siguiente = scrapertools.get_match(
            data, '<a class="next page-numbers" href="([^"]+)"')
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Torna Home[/COLOR]",
                 folder=True)),
        itemlist.append(
            Item(
                channel=__channel__,
                action="anime",
                extra=item.extra,
                title="[COLOR orange]Successivo >> [/COLOR]",
                url=pagina_siguiente,
                thumbnail=
                "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                folder=True))
    except:
        pass

    return itemlist
Ejemplo n.º 53
0
def peliculas(item):
    logger.info("pelisalacarta.channels.cinefox peliculas")

    itemlist = []
    if "valores" in item and item.valores:
        itemlist.append(
            item.clone(action="", title=item.valores, text_color=color4))

    if __menu_info__:
        action = "menu_info"
    else:
        action = "findvideos"

    data = scrapertools.downloadpage(item.url)
    bloque = scrapertools.find_multiple_matches(
        data, '<div class="media-card "(.*?)<div class="hidden-info">')
    for match in bloque:
        if item.extra == "mapa":
            patron = '.*?src="([^"]+)".*?href="([^"]+)">([^<]+)</a>'
            matches = scrapertools.find_multiple_matches(match, patron)
            for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
                url = urlparse.urljoin(host, scrapedurl)
                itemlist.append(
                    Item(channel=item.channel,
                         action=action,
                         title=scrapedtitle,
                         url=url,
                         extra="media",
                         thumbnail=scrapedthumbnail,
                         contentTitle=scrapedtitle,
                         fulltitle=scrapedtitle,
                         text_color=color2,
                         context="05"))
        else:
            patron = '<div class="audio-info">(.*?)<div class="quality-info".*?>([^<]+)</div>' \
                     '.*?src="([^"]+)".*?href="([^"]+)">([^<]+)</a>'
            matches = scrapertools.find_multiple_matches(match, patron)

            for idiomas, calidad, scrapedthumbnail, scrapedurl, scrapedtitle in matches:
                calidad = calidad.capitalize().replace("Hd", "HD")
                audios = []
                if "medium-es" in idiomas: audios.append('CAST')
                if "medium-vs" in idiomas: audios.append('VOSE')
                if "medium-la" in idiomas: audios.append('LAT')
                if "medium-en" in idiomas: audios.append('V.O')
                title = scrapedtitle + "  [" + "/".join(
                    audios) + "] (" + calidad + ")"
                url = urlparse.urljoin(host, scrapedurl)

                itemlist.append(
                    Item(channel=item.channel,
                         action=action,
                         title=title,
                         url=url,
                         extra="media",
                         thumbnail=scrapedthumbnail,
                         contentTitle=scrapedtitle,
                         fulltitle=scrapedtitle,
                         text_color=color2,
                         context="05"))

    next_page = scrapertools.find_single_match(
        data, 'href="([^"]+)"[^>]+>Siguiente')
    if next_page != "" and item.title != "":
        itemlist.append(
            Item(channel=item.channel,
                 action="peliculas",
                 title=">> Siguiente",
                 url=next_page,
                 thumbnail=item.thumbnail,
                 extra=item.extra,
                 text_color=color3))

        if not config.get_setting("last_page",
                                  item.channel) and config.is_xbmc():
            itemlist.append(
                Item(channel=item.channel,
                     action="select_page",
                     title="Ir a página...",
                     url=next_page,
                     thumbnail=item.thumbnail,
                     text_color=color5))

    return itemlist
Ejemplo n.º 54
0
def findvideos(item):
    logger.info("pelisalacarta.channels.cinefox findvideos")
    itemlist = []

    if not "|" in item.extra and not __menu_info__:
        data = scrapertools.downloadpage(item.url, headers=headers.items())
        year = scrapertools.find_single_match(
            data, '<div class="media-summary">.*?release.*?>(\d+)<')
        if year != "" and not "tmdb_id" in item.infoLabels:
            try:
                from core import tmdb
                item.infoLabels["year"] = year
                tmdb.set_infoLabels_item(item, __modo_grafico__)
            except:
                pass

        if item.infoLabels["plot"] == "":
            sinopsis = scrapertools.find_single_match(
                data, '<p id="media-plot".*?>.*?\.\.\.(.*?)Si te parece')
            item.infoLabels["plot"] = scrapertools.htmlclean(sinopsis)

    id = scrapertools.find_single_match(item.url, '/(\d+)/')
    if "|" in item.extra or not __menu_info__:
        extra = item.extra
        if "|" in item.extra:
            extra = item.extra[:-1]
        url = "http://www.cinefox.tv/sources/list?id=%s&type=%s&order=%s" % (
            id, extra, "streaming")
        itemlist.extend(get_enlaces(item, url, "Online"))
        url = "http://www.cinefox.tv/sources/list?id=%s&type=%s&order=%s" % (
            id, extra, "download")
        itemlist.extend(get_enlaces(item, url, "de Descarga"))

        if extra == "media":
            data_trailer = scrapertools.downloadpage(
                "http://www.cinefox.tv/media/trailer?idm=%s&mediaType=1" % id,
                headers=headers.items())
            trailer_url = jsontools.load_json(data_trailer)["video"]["url"]
            if trailer_url != "":
                item.infoLabels["trailer"] = trailer_url

            title = "Ver enlaces %s - [" + item.contentTitle + "]"
            itemlist.append(
                item.clone(channel="trailertools",
                           action="buscartrailer",
                           title="Buscar Tráiler",
                           text_color="magenta",
                           context=""))

            if config.get_library_support() and not "|" in item.extra:
                itemlist.append(
                    Item(channel=item.channel,
                         action="add_pelicula_to_library",
                         text_color=color5,
                         title="Añadir película a la biblioteca",
                         url=item.url,
                         thumbnail=item.thumbnail,
                         fanart=item.fanart,
                         fulltitle=item.fulltitle,
                         extra="media|"))
    else:
        url = "http://www.cinefox.tv/sources/list?id=%s&type=%s&order=%s" % (
            id, item.extra, item.type)
        type = item.type.replace("streaming",
                                 "Online").replace("download", "de Descarga")
        itemlist.extend(get_enlaces(item, url, type))

    return itemlist
Ejemplo n.º 55
0
def findvideos(item):
    logger.info()

    itemlist = []
    itemlist2 = []
    headers = {'Referer': item.url}

    server_url = {
        'gamovideo': 'http://gamovideo.com/embed-%s.html',
        'gounlimited': 'https://gounlimited.to/embed-%s.html',
        'streamplay': 'https://streamp1ay.me/player-%s.html',
        'powvideo': 'https://powvldeo.net/iframe-%s-1536x701.html',
        'vidcloud': 'https://vidcloud.co/player?fid=%s&page=embed',
        'vidlox': 'https://vidlox.me/embed-%s.html',
        'clipwatching': 'https://clipwatching.com/embed-%s.html',
        'jetload': 'https://jetload.net/e/%s',
        'mixdrop': 'https://mixdrop.co/e/%s'
    }

    data = get_source(item.url)
    s_id = scrapertools.find_single_match(
        data, r'id="loadVideos".*?secid="(\w\d+)"')

    if s_id:
        import requests
        url = host + 'json/loadVIDEOS'
        header = {
            'User-Agent':
            'Mozilla/5.0 (Android 10; Mobile; rv:70.0) Gecko/70.0 Firefox/70.0'
        }
        session = requests.Session()
        page = session.post(url, data={'id': s_id}, headers=header).json()

        if page.get('status', '') == 200:
            data2 = page['result']
            patron = r"C_One\(this, (\d+), '([^']+)'.*?"
            patron += r'src=".*?/img/(\w+)'
            matches = re.compile(patron, re.DOTALL).findall(data2)
            for language, url, server in matches:

                req = httptools.downloadpage(url,
                                             headers=headers,
                                             follow_redirects=False)
                location = req.headers.get('location', None)

                if location:
                    url = location
                else:
                    new_data = req.data.replace("'", '"')
                    url = scrapertools.find_single_match(
                        new_data, 'file": "([^"]+)"')
                if not url:
                    continue
                try:
                    server = server.split(".")[0]
                except:
                    server = ""

                if 'betaserver' in server:
                    server = 'directo'

                lang = IDIOMAS.get(language, 'VO')

                quality = 'Oficial'

                title = '%s [%s] [%s]' % (server.capitalize(), lang, quality)

                itemlist.append(
                    Item(channel=item.channel,
                         title=title,
                         url=url,
                         action='play',
                         language=lang,
                         quality=quality,
                         server=server,
                         headers=headers,
                         infoLabels=item.infoLabels,
                         p_lang=language))

    patron = '<li><a href="([^"]+)".*?<img.*?>([^<]+)<b>([^<]+)<.*?src="([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for url, server, quality, language in matches:
        if '/sc_' in url:
            continue
        if url != '':

            try:
                server = server.split(".")[0].replace('1', 'l')
            except:
                continue

            _id = scrapertools.find_single_match(url, r'link/\w+_(.*)')

            url = server_url.get(server, url)

            if not url.startswith(host):
                url = url % _id

            language = scrapertools.find_single_match(language, r'/(\d+)\.png')
            lang = IDIOMAS.get(language, 'VO')

            title = '%s [%s] [%s]' % (server.capitalize(), lang, quality)

            itemlist2.append(
                Item(channel=item.channel,
                     title=title,
                     url=url,
                     action='play',
                     language=lang,
                     quality=quality,
                     server=server,
                     headers=headers,
                     infoLabels=item.infoLabels,
                     p_lang=language))

    itemlist2.sort(key=lambda i: (i.p_lang, i.server))

    itemlist.extend(itemlist2)

    if not itemlist:
        itemlist.append(
            Item(channel=item.channel,
                 folder=False,
                 text_color='tomato',
                 title='[I] Aún no hay enlaces disponibles [/I]'))
        return itemlist

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay
    autoplay.start(itemlist, item)

    return itemlist
Ejemplo n.º 56
0
def episodios(item):
    logger.info("pelisalacarta.channels.cinefox episodios")
    itemlist = []

    if item.extra == "ultimos":
        data = scrapertools.downloadpage(item.url, headers=headers.items())
        item.url = scrapertools.find_single_match(
            data, '<a href="([^"]+)" class="h1-like media-title"')
        item.url += "/episodios"

    data = scrapertools.downloadpage(item.url, headers=headers.items())

    data_season = data[:]
    headers["Referer"] = item.url

    if item.extra == "episodios" or not __menu_info__:
        action = "findvideos"
    else:
        action = "menu_info_episode"

    seasons = scrapertools.find_multiple_matches(
        data, '<a href="([^"]+)"[^>]+><span class="season-toggle')
    for i, url in enumerate(seasons):
        if i != 0:
            data_season = scrapertools.downloadpage(url,
                                                    headers=headers.items())
        patron = '<div class="ep-list-number">.*?href="([^"]+)">([^<]+)</a>.*?<span class="name">([^<]+)</span>'
        matches = scrapertools.find_multiple_matches(data_season, patron)
        for scrapedurl, episode, scrapedtitle in matches:
            item.contentSeason = episode.split("x")[0]
            item.contentEpisodeNumber = episode.split("x")[1]

            title = episode + " - " + scrapedtitle
            extra = "episode"
            if item.extra == "episodios":
                extra = "episode|"
            itemlist.append(
                item.clone(action=action,
                           title=title,
                           url=scrapedurl,
                           text_color=color2,
                           extra=extra))

    if item.extra != "episodios":
        try:
            from core import tmdb
            tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
        except:
            pass

    itemlist.reverse()
    if item.extra != "episodios":
        id = scrapertools.find_single_match(item.url, '/(\d+)/')
        data_trailer = scrapertools.downloadpage(
            "http://www.cinefox.tv/media/trailer?idm=%s&mediaType=1" % id,
            headers=headers.items())
        item.infoLabels["trailer"] = jsontools.load_json(
            data_trailer)["video"]["url"]
        itemlist.append(
            item.clone(channel="trailertools",
                       action="buscartrailer",
                       title="Buscar Tráiler",
                       text_color="magenta"))
        if config.get_library_support():
            itemlist.append(
                Item(channel=item.channel,
                     action="add_serie_to_library",
                     text_color=color5,
                     title="Añadir serie a la biblioteca",
                     show=item.show,
                     thumbnail=item.thumbnail,
                     url=item.url,
                     fulltitle=item.fulltitle,
                     fanart=item.fanart,
                     extra="episodios"))

    return itemlist
Ejemplo n.º 57
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("streamondemand.servers.flashx url=" + page_url)

    # Lo pide una vez
    data = scrapertools.downloadpageWithoutCookies(page_url)
    # Si salta aviso, se carga la pagina de comprobacion y luego la inicial
    if "You try to access this video with Kodi" in data:
        url_reload = scrapertools.find_single_match(
            data, 'try to reload the page.*?href="([^"]+)"')
        url_reload = "http://www.flashx.tv" + url_reload[1:]
        try:
            data = scrapertools.downloadpageWithoutCookies(url_reload)
            data = scrapertools.downloadpageWithoutCookies(page_url)
        except:
            pass

    matches = scrapertools.find_multiple_matches(
        data, "<script type='text/javascript'>(.*?)</script>")
    for n, m in enumerate(matches):
        if m.startswith("eval"):
            try:
                m = jsunpack.unpack(m)
                fake = (scrapertools.find_single_match(m, "(\w{40,})") == "")
                if fake:
                    m = ""
                else:
                    break
            except:
                m = ""
    match = m
    if not "sources:[{file:" in match:
        page_url = page_url.replace("playvid-", "")

        headers = {
            'Host': 'www.flashx.tv',
            'User-Agent':
            'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/537.36',
            'Accept':
            'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'en-US,en;q=0.5',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
            'Cookie': ''
        }
        data = scrapertools.downloadpage(page_url, headers=headers.items())
        flashx_id = scrapertools.find_single_match(
            data, 'name="id" value="([^"]+)"')
        fname = scrapertools.find_single_match(data,
                                               'name="fname" value="([^"]+)"')
        hash_f = scrapertools.find_single_match(data,
                                                'name="hash" value="([^"]+)"')
        post = 'op=download1&usr_login=&id=%s&fname=%s&referer=&hash=%s&imhuman=Proceed+to+video' % (
            flashx_id, urllib.quote(fname), hash_f)
        wait_time = scrapertools.find_single_match(data,
                                                   "<span id='xxc2'>(\d+)")

        file_id = scrapertools.find_single_match(data, "'file_id', '([^']+)'")
        coding_url = 'https://files.fx.fastcontentdelivery.com/jquery2.js?fx=%s' % base64.encodestring(
            file_id)
        headers['Host'] = "files.fx.fastcontentdelivery.com"
        headers['Referer'] = "https://www.flashx.tv/"
        headers['Accept'] = "*/*"
        coding = scrapertools.downloadpage(coding_url, headers=headers.items())

        coding_url = 'https://www.flashx.tv/counter.cgi?fx=%s' % base64.encodestring(
            file_id)
        headers['Host'] = "www.flashx.tv"
        coding = scrapertools.downloadpage(coding_url, headers=headers.items())

        coding_url = 'https://www.flashx.tv/flashx.php?fxfx=3'
        headers['X-Requested-With'] = 'XMLHttpRequest'
        coding = scrapertools.downloadpage(coding_url, headers=headers.items())

        try:
            time.sleep(int(wait_time) + 1)
        except:
            time.sleep(6)

        headers.pop('X-Requested-With')
        headers['Content-Type'] = 'application/x-www-form-urlencoded'
        data = scrapertools.downloadpage('https://www.flashx.tv/dl?playthis',
                                         post=post,
                                         headers=headers.items())

        matches = scrapertools.find_multiple_matches(
            data, "(eval\(function\(p,a,c,k.*?)\s+</script>")
        for match in matches:
            if match.startswith("eval"):
                try:
                    match = jsunpack.unpack(match)
                    fake = (scrapertools.find_single_match(match,
                                                           "(\w{40,})") == "")
                    if fake:
                        match = ""
                    else:
                        break
                except:
                    match = ""

        if not match:
            match = data

    # Extrae la URL
    # {file:"http://f11-play.flashx.tv/luq4gfc7gxixexzw6v4lhz4xqslgqmqku7gxjf4bk43u4qvwzsadrjsozxoa/video1.mp4"}
    video_urls = []
    media_urls = scrapertools.find_multiple_matches(
        match, '\{file\:"([^"]+)",label:"([^"]+)"')
    subtitle = ""
    for media_url, label in media_urls:
        if media_url.endswith(".srt") and label == "Italian":
            try:
                from core import filetools
                data = scrapertools.downloadpage(media_url)
                subtitle = os.path.join(config.get_data_path(),
                                        'sub_flashx.srt')
                filetools.write(subtitle, data)
            except:
                import traceback
                logger.info(
                    "streamondemand.servers.flashx Error al descargar el subtítulo: "
                    + traceback.format_exc())

    for media_url, label in media_urls:
        if not media_url.endswith("png") and not media_url.endswith(".srt"):
            video_urls.append([
                "." + media_url.rsplit('.', 1)[1] + " [flashx]", media_url, 0,
                subtitle
            ])

    for video_url in video_urls:
        logger.info("streamondemand.servers.flashx %s - %s" %
                    (video_url[0], video_url[1]))

    return video_urls
Ejemplo n.º 58
0
def busqueda(item):
    logger.info("pelisalacarta.channels.cinefox busqueda")
    itemlist = []

    data = scrapertools.downloadpage(item.url)
    patron = '<div class="poster-media-card">(.*?)(?:<li class="search-results-item media-item">|<footer>)'
    bloque = scrapertools.find_multiple_matches(data, patron)
    for match in bloque:
        patron = 'href="([^"]+)" title="([^"]+)".*?src="([^"]+)".*?' \
                 '<p class="search-results-main-info">.*?del año (\d+).*?' \
                 'p class.*?>(.*?)<'
        matches = scrapertools.find_multiple_matches(match, patron)
        for scrapedurl, scrapedtitle, scrapedthumbnail, year, plot in matches:
            scrapedtitle = scrapedtitle.capitalize()
            item.infoLabels["year"] = year
            plot = scrapertools.htmlclean(plot)
            if "/serie/" in scrapedurl:
                action = "episodios"
                context = "25"
                show = scrapedtitle
                scrapedurl += "/episodios"
                title = " [Serie]"
            elif "/pelicula/" in scrapedurl:
                action = "menu_info"
                context = "05"
                show = ""
                title = " [Película]"
            else:
                continue
            title = scrapedtitle + title + " (" + year + ")"
            itemlist.append(
                item.clone(action=action,
                           title=title,
                           url=scrapedurl,
                           thumbnail=scrapedthumbnail,
                           contentTitle=scrapedtitle,
                           fulltitle=scrapedtitle,
                           context=context,
                           plot=plot,
                           show=show,
                           text_color=color2))

    try:
        from core import tmdb
        tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
    except:
        pass

    next_page = scrapertools.find_single_match(
        data, 'href="([^"]+)"[^>]+>Más resultados')
    if next_page != "":
        next_page = urlparse.urljoin(host, next_page)
        itemlist.append(
            Item(channel=item.channel,
                 action="busqueda",
                 title=">> Siguiente",
                 url=next_page,
                 thumbnail=item.thumbnail,
                 text_color=color3))

    return itemlist
Ejemplo n.º 59
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("url=" + page_url)
    pfxfx = ""
    data = flashx_data
    data = data.replace("\n", "")
    cgi_counter = scrapertools.find_single_match(
        data,
        """(?is)src=.(https://www.flashx.../counter.cgi.*?[^(?:'|")]+)""")
    cgi_counter = cgi_counter.replace("%0A", "").replace("%22", "")
    playnow = scrapertools.find_single_match(data,
                                             'https://www.flashx.../dl[^"]+')
    # Para obtener el f y el fxfx
    js_fxfx = "https://www." + scrapertools.find_single_match(
        data.replace("//", "/"),
        """(?is)(flashx.../js\w+/c\w+.*?[^(?:'|")]+)""")
    if len(js_fxfx) > 15:
        data_fxfx = httptools.downloadpage(js_fxfx).data
        mfxfx = scrapertools.find_single_match(data_fxfx,
                                               'get.*?({.*?})').replace(
                                                   "'", "").replace(" ", "")
        matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)')
        for f, v in matches:
            pfxfx += f + "=" + v + "&"
    logger.info("mfxfxfx1= %s" % js_fxfx)
    logger.info("mfxfxfx2= %s" % pfxfx)
    if pfxfx == "":
        pfxfx = "f=fail&fxfx=6"
    coding_url = 'https://www.flashx.co/flashx.php?%s' % pfxfx

    # Obligatorio descargar estos 2 archivos, porque si no, muestra error
    httptools.downloadpage(coding_url, cookies=False)
    httptools.downloadpage(cgi_counter, cookies=False)

    ts = int(time.time())
    flash_ts = scrapertools.find_single_match(flashx_hash_f, '-(\d{10})-')
    wait_time = int(flash_ts) - ts
    platformtools.dialog_notification(
        'Cargando flashx', 'Espera de %s segundos requerida' % wait_time)

    try:
        time.sleep(wait_time)
    except:
        time.sleep(6)

    data = httptools.downloadpage(playnow, post=flashx_post).data
    # Si salta aviso, se carga la pagina de comprobacion y luego la inicial
    # LICENSE GPL3, de alfa-addon: https://github.com/alfa-addon/ ES OBLIGATORIO AÑADIR ESTAS LÍNEAS
    if "You try to access this video with Kodi" in data:
        url_reload = scrapertools.find_single_match(
            data, 'try to reload the page.*?href="([^"]+)"')
        try:
            data = httptools.downloadpage(url_reload).data
            data = httptools.downloadpage(playnow, post=flashx_post).data
        # LICENSE GPL3, de alfa-addon: https://github.com/alfa-addon/ ES OBLIGATORIO AÑADIR ESTAS LÍNEAS
        except:
            pass

    matches = scrapertools.find_multiple_matches(
        data, "(eval\(function\(p,a,c,k.*?)\s+</script>")
    video_urls = []
    for match in matches:
        try:
            match = jsunpack.unpack(match)
            match = match.replace("\\'", "'")
            media_urls = scrapertools.find_multiple_matches(
                match, "{src:'([^']+)'.*?,label:'([^']+)'")
            subtitle = ""
            for media_url, label in media_urls:
                if media_url.endswith(".srt") and label == "Spanish":
                    try:
                        from core import filetools
                        data = httptools.downloadpage(media_url)
                        subtitle = os.path.join(config.get_data_path(),
                                                'sub_flashx.srt')
                        filetools.write(subtitle, data)
                    except:
                        import traceback
                        logger.info("Error al descargar el subtítulo: " +
                                    traceback.format_exc())

            for media_url, label in media_urls:
                if not media_url.endswith("png") and not media_url.endswith(
                        ".srt"):
                    video_urls.append([
                        "." + media_url.rsplit('.', 1)[1] + " [flashx]",
                        media_url, 0, subtitle
                    ])

            for video_url in video_urls:
                logger.info("%s - %s" % (video_url[0], video_url[1]))
        except:
            pass

    return video_urls
Ejemplo n.º 60
0
def list_all(item):
    logger.info()
    itemlist = []

    data = get_source(item.url)
    patron = '<article class="Cards.*?href="([^"]+)"(.*?)<img.*?'
    patron += 'data-echo="([^"]+)" alt="([^"]+)"'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedinfo, scrapedthumbnail, scrapedtitle in matches:

        title = scrapedtitle
        scrapedtitle = re.sub(r' \((.*?)\)$', '', scrapedtitle)
        thumbnail = scrapedthumbnail.strip()
        url = scrapedurl
        tmdb_id = scrapertools.find_single_match(url, r'/\w(\d+)-')

        thumbnail = re.sub(r'p/w\d+', 'p/original', thumbnail)

        if item.type == 'search':
            s_title = scrapertools.find_single_match(url, host + '(\w+)')
            if not unify:
                title += ' [COLOR grey][I](%s)[/I][/COLOR]' % s_title.capitalize(
                )[:-1]

        new_item = Item(channel=item.channel,
                        title=title,
                        url=url,
                        thumbnail=thumbnail,
                        infoLabels={'tmdb_id': tmdb_id})

        if item.type == 'peliculas' or 'peliculas' in url:
            new_item.action = 'findvideos'
            new_item.contentTitle = scrapedtitle
            new_item.type = 1

            calidad_baja = scrapertools.find_single_match(
                scrapedinfo, r'>(\w+\s\w+)</div>$')

            if calidad_baja:
                new_item.title += '[COLOR tomato] (Calidad Baja)[/COLOR]'
        else:
            new_item.action = 'seasons'
            new_item.contentSerieName = scrapedtitle
            new_item.type = 0

            sesxep = scrapertools.find_single_match(url, r'/(\d+x\d+)$')

            if sesxep:
                new_item.title += ' ' + sesxep
                new_item.action = 'findvideos'

        itemlist.append(new_item)

    tmdb.set_infoLabels(itemlist, seekTmdb=True)
    if item.type == 'search':
        itemlist.sort(key=lambda i: (i.type, i.title))
    #  Paginación

    url_next_page = scrapertools.find_single_match(
        data, '<a href="([^"]+)" up-target="body">Pagina s')
    if url_next_page:
        itemlist.append(
            item.clone(title="Siguiente >>",
                       url=url_next_page,
                       action='list_all',
                       text_color='gold'))

    return itemlist