def cerca(item):
    logger.info("streamondemand.channels.guardaserie fichas")

    itemlist = []

    ## Descarga la página
    data = re.sub(
        r'\t|\n|\r',
        '',
        anti_cloudflare(item.url)
    )
	
    patron = '<div class="search_thumbnail">.*?<a class="search_link" href="([^"]+)" rel="bookmark" title="([^"]+)">.*?<img src="([^"]+)" />.*?</a>'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)

        if scrapedtitle.startswith("Guarda "):
            scrapedtitle = scrapedtitle[7:]

        itemlist.append(
                Item(channel=__channel__,
                     action="episodios",
                     title=scrapedtitle,
                     fulltitle=scrapedtitle,
                     url=scrapedurl,
                     show=scrapedtitle,
                     thumbnail=scrapedthumbnail))

    return itemlist
def categorias(item):
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url, headers=headers)
    bloque = scrapertools.get_match(data, '<ul>(.*?)</ul>')

    # Extrae las entradas (carpetas)
    patron = '<a href="([^"]+)" >(.*?)</a>(.*?)\s*</li>'
    matches = re.compile(patron, re.DOTALL).findall(bloque)
    scrapertools.printMatches(matches)

    for scrapedurl, scrapedtitle, scrapedtot in matches:
        scrapedplot = ""
        scrapedthumbnail = ""
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.replace("Animazione", ""))
        scrapedurl = scrapertools.decodeHtmlentities(scrapedurl.replace("%s/category/animazione/" % host, ""))
        if (DEBUG): logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl + "]")
        itemlist.append(
            Item(channel=__channel__,
                 action="pelicat",
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR][COLOR gray]" + scrapedtot + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail="http://xbmc-repo-ackbarr.googlecode.com/svn/trunk/dev/skin.cirrus%20extended%20v2/extras/moviegenres/All%20Movies%20by%20Genre.png",
                 folder=True))

    return itemlist
def mainlist(item):
    logger.info("[libreriaXBMC.py] mainlist")

    itemlist = []
    # Listar entradas y meterlas en "files"
    c = conn.cursor()

    c.execute(
        """select sh.idShow, sh.c00 as title,
             sh.c06 as thumb, max(ep.c12)
            from tvshow sh, episode ep, TVSHOWLINKEPISODE rel
            where sh.idShow = rel.idShow 
            and      ep.idEpisode = rel.idEpisode
            group by sh.idShow"""
    )

    for row in c:
        urlR = "idshow=" + str(row[0]) + "&temps=" + str(row[3])
        thumb = getThumb(row[2])
        titulo = row[1].encode("iso-8859-1")
        itemlist.append(
            Item(channel=CHANNELNAME, title=titulo, action="listseries", url=urlR, thumbnail=thumb, folder=True)
        )

    # conn.close()
    return itemlist
def mainlist(item):
    logger.info("streamondemand.channels.guardaserie mainlist")

    itemlist = [Item(channel=__channel__,
                     action="ultimi",
                     title="[COLOR azure]Ultimi Episodi Aggiunti[/COLOR]",
                     url=host + "/aggiornamenti-serie-tv/",
                     thumbnail="http://i58.tinypic.com/2zs64cz.jpg"),
                Item(channel=__channel__,
                     action="fichas",
                     title="[COLOR azure]Lista Serie TV[/COLOR]",
                     url=host + "/lista-serie-tv-guardaserie/",
                     thumbnail="http://i58.tinypic.com/2zs64cz.jpg"),
                Item(channel=__channel__,
                     action="anime",
                     title="[COLOR azure]Anime[/COLOR]",
                     url=host + "/lista-serie-tv-guardaserie/",
                     thumbnail="http://2.bp.blogspot.com/-4AeDx37c3uQ/VAxIHDhm-9I/AAAAAAAABRA/BUnctEGpVYM/s1600/528900971.gif"),
                Item(channel=__channel__,
                     action="cartoni",
                     title="[COLOR azure]Cartoni Animati[/COLOR]",
                     url=host + "/lista-serie-tv-guardaserie/",
                     thumbnail="http://i.imgur.com/d9GffYm.png"),
                Item(channel=__channel__,
                     action="progs",
                     title="[COLOR azure]Programmi TV[/COLOR]",
                     url=host + "/lista-serie-tv-guardaserie/",
                     thumbnail="http://mujakovic.weebly.com/uploads/1/4/7/9/14799472/3787546.png"),
                Item(channel=__channel__,
                     action="search",
                     title="[COLOR yellow]Cerca...[/COLOR]",
                     thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]

    return itemlist
def play(item):
    logger.info("streamondemand.streamingfilmit play")

    data = scrapertools.cache_page(item.url, headers=headers)
    data = scrapertools.decodeHtmlentities(data).replace('http://cineblog01.pw', 'http://k4pp4.pw')

    url = scrapertools.find_single_match(data, r'<a\s*href="([^"]+)"><h1')

    data = scrapertools.cache_page(url, headers=headers)

    if "go.php" in url:
        data = scrapertools.get_match(data, 'window.location.href = "([^"]+)";')
    elif "/link/" in url:
        from lib.jsbeautifier.unpackers import packer
        try:
            data = scrapertools.get_match(data, "(eval.function.p,a,c,k,e,.*?)</script>")
            data = packer.unpack(data)
        except IndexError:
            pass

        data = scrapertools.get_match(data, 'var link(?:\s)?=(?:\s)?"([^"]+)";')
    else:
        data = url

    itemlist = servertools.find_video_items(data=data)

    for videoitem in itemlist:
        videoitem.title = item.show
        videoitem.fulltitle = item.fulltitle
        videoitem.thumbnail = item.thumbnail
        videoitem.channel = __channel__

    return itemlist
def categorias(item):
    logger.info("[documentalesatonline2.py] novedades")
    itemlist=[]

    # Descarga la página
    data = scrapertools.cache_page(item.url)
    #logger.info(data)

    # Extrae las entradas (carpetas)
    #<li class="jcl_category" style="display:none;" >
    #<a href="http://www.bizzentte.com/categoria/categorias-en-general-para-todo/arte-y-cultura/" >Arte y Cultura (80)</a>
    #<a class="jcl_link" href="#jcl" title="Ver Sub-Categor&iacute;as">
    #<span class="jcl_symbol" style="padding-left:5px">(+)</span></a>
    #<ul>
    #<li class="jcl_category" style="display:none;" ><a href="http://www.bizzentte.com/categoria/categorias-en-general-para-todo/arte-y-cultura/fotografia/" >Fotografia (2)</a></li><li class="jcl_category" style="display:none;" ><a href="http://www.bizzentte.com/categoria/categorias-en-general-para-todo/arte-y-cultura/grafiti/" >Grafiti (2)</a></li>
    patronvideos  = '<li class="jcl_category"[^>]+><a href="([^"]+)"[^>]*>([^<]+)</a></li>'
    # '\" url nombre cantidad_entradas
    matches = re.compile(patronvideos).findall(data)
    scrapertools.printMatches(matches)

    for match in matches:
        #xbmctools.addnewfolder( __channel__ , "novedades" , category , match[1] , match[0] + "feed?paged=1" , "" , "")
        itemlist.append( Item(channel=__channel__, action="novedades", title=match[1] , url=match[0] , folder=True) )

    return itemlist
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("pelisalacarta.servers.openload url=" + page_url)
    video_urls = []

    video = True
    data = scrapertools.downloadpageWithoutCookies(page_url)

    if "videocontainer" not in data:
        video = False
        url = page_url.replace("/embed/","/f/")
        data = scrapertools.downloadpageWithoutCookies(url)
        text_encode = scrapertools.get_match(data,"Click to start Download.*?<script[^>]+>(.*?)</script")
        text_decode = decode(data)
    else:
        text_encode = scrapertools.get_match(data,"<video[^<]+<script[^>]+>(.*?)</script>")
        text_decode = decode(data)

    #Header para la descarga
    header_down = "|User-Agent="+headers['User-Agent']+"|"
    if video == True:
        videourl = scrapertools.get_match(text_decode, "(http.*?true)")
        videourl = scrapertools.get_header_from_response(videourl,header_to_get="location")
        videourl = videourl.replace("https://","http://").replace("?mime=true","")
        extension = videourl[-4:]
        video_urls.append([ extension + " [Openload]", videourl+header_down+extension])
    else:
        videourl = scrapertools.find_single_match(text_decode, '"href",(?:\s|)\'([^\']+)\'')
        videourl = videourl.replace("https://","http://")
        extension = videourl[-4:]
        video_urls.append([ extension + " [Openload]", videourl+header_down+extension])

    for video_url in video_urls:
        logger.info("pelisalacarta.servers.openload %s - %s" % (video_url[0],video_url[1]))

    return video_urls
def search_movie_by_collection(item):
    logger.info("streamondemand.channels.database search_movie_by_collection '%s'" % (item.extra))

    collection = tmdb_get_data("collection/%s?" % item.extra)

    # Movie collection list is not paged
    return build_movie_list(item, collection['parts']) if 'parts' in collection else []
def searchresults(params,tecleado,category):
    logger.info("[documentalesatonline2.py] search")

    buscador.salvar_busquedas(params,tecleado,category)
    tecleado = tecleado.replace(" ", "+")
    searchUrl = "http://documentalesatonline.loquenosecuenta.com/search/"+tecleado+"?feed=rss2&paged=1"
    novedades(params,searchUrl,category)
def search_movie_by_title(item, search_terms):
    logger.info("streamondemand.channels.database search_movie_by_title '%s'" % (search_terms))

    return list_movie(
            Item(channel=item.channel,
                 url='search/movie?query=%s&' % url_quote_plus(search_terms),
                 plot="1"))
def search_person_by_name(item, search_terms):
    logger.info("streamondemand.channels.database search_person_by_name '%s'" % (search_terms))

    persons = tmdb_get_data("search/person?query=%s&" % url_quote_plus(search_terms))

    itemlist = []
    for person in persons:
        name = normalize_unicode(tmdb_tag(person, 'name'))
        poster = tmdb_image(person, 'profile_path')
        fanart = ''
        for movie in tmdb_tag(person, 'known_for', []):
            if tmdb_tag_exists(movie, 'backdrop_path'):
                fanart = tmdb_image(movie, 'backdrop_path', 'w1280')
                break

        extracmds = [
            (NLS_Info_Person, "RunScript(script.extendedinfo,info=extendedactorinfo,id=%s)" % str(tmdb_tag(person, 'id')))] \
            if xbmc.getCondVisibility('System.HasAddon(script.extendedinfo)') else []

        itemlist.append(Item(
                channel=item.channel,
                action='search_movie_by_person',
                extra=str(tmdb_tag(person, 'id')),
                title=name,
                thumbnail=poster,
                viewmode='list',
                fanart=fanart,
                extracmds=extracmds
        ))

    return itemlist
def convert_link(html, link_type):

    hash_seed = get_cookie(html);
    logger.info("[seriespepito.py] hash_seed="+hash_seed)

    HASH_PAT = 'CryptoJS\.(\w+)\('
    hash_func = scrapertools.find_single_match(html, HASH_PAT).lower()

    if hash_func == "md5":
        hash = hashlib.md5(hash_seed).hexdigest()
    else:
        hash = hashlib.sha256(hash_seed).hexdigest()

    if link_type == PELICULAS_PEPITO:
        hash += '0'
    logger.info("[seriespepito.py] hash="+hash)

    HREF_SEARCH_PAT = '<a class=".' + hash + '".*?href="http://www.enlacespepito.com\/([^\.]*).html"><i class="icon-(?:play|download)">'
    logger.info("[seriespepito.py] HREF_SEARCH_PAT="+HREF_SEARCH_PAT)

    href = list(scrapertools.find_single_match(html, HREF_SEARCH_PAT))
    logger.info("[seriespepito.py] href="+repr(href))
    CHAR_REPLACE_PAT = '[a-z]\[(\d+)\]="(.)";'

    matches = re.findall(CHAR_REPLACE_PAT , html, flags=re.DOTALL|re.IGNORECASE)
    logger.info("[seriespepito.py] matches="+repr(matches))

    for match in matches:
        href[int(match[0])] = match[1]

    href = ''.join(href)

    return 'http://www.enlacespepito.com/' + href + '.html'
Beispiel #13
0
def findvid_serie(item):
    logger.info("pelisalacarta.filmstream findvideos")

    itemlist = []

    ## Descarga la página
    data = scrapertools.cache_page(item.url)
    data = scrapertools.decodeHtmlentities(data)

    patron1 = '<p style="text-align: center;">(.*?)(<a href="[^"]+" target="_blank">([^<]+)</a>.+?)</p>'
    matches1 = re.compile(patron1).findall(data)
    for titulo1, data, titulo2 in matches1:
        ## Extrae las entradas
        titulo1 = re.sub(r'<[^>]*>', '', titulo1)
        scrapedtitle = titulo2 if titulo1 == "" else titulo1
        li = servertools.find_video_items(data=data)

        for videoitem in li:
            videoitem.title = scrapedtitle + videoitem.title
            videoitem.fulltitle = item.fulltitle
            videoitem.thumbnail = item.thumbnail
            videoitem.show = item.show
            videoitem.plot = item.plot
            videoitem.channel = __channel__

        itemlist.extend(li)

    return itemlist
Beispiel #14
0
def novedades(item):
    logger.info("pelisalacarta.channels.animeflv novedades")

    # Descarga la pagina
    data = scrapertools.cache_page(item.url)

    # Extrae las entradas (carpetas)  
    '''
    <div class="not">
    <a href="/ver/cyclops-shoujo-saipu-12.html" title="Cyclops Shoujo Saipu 12">
    <img class="imglstsr lazy" src="http://cdn.animeflv.net/img/mini/957.jpg" border="0">
    <span class="tit_ep"><span class="tit">Cyclops Shoujo Saipu 12</span></span>
    </a>
    '''
    patronvideos = '<div class="not"[^<]+<a href="([^"]+)" title="([^"]+)"[^<]+<img class="[^"]+" src="([^"]+)"[^<]+' \
                   '<span class="tit_ep"><span class="tit">([^<]+)<'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    itemlist = []
    
    for match in matches:
        scrapedtitle = scrapertools.entityunescape(match[3])
        fulltitle = scrapedtitle
        # directory = match[1]
        scrapedurl = urlparse.urljoin(item.url, match[0])
        scrapedthumbnail = urlparse.urljoin(item.url, match[2].replace("mini", "portada"))
        scrapedplot = ""
        if DEBUG:
            logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

        itemlist.append(Item(channel=__channel__, action="findvideos", title=scrapedtitle, url=scrapedurl,
                             thumbnail=scrapedthumbnail, plot=scrapedplot, fulltitle=fulltitle, viewmode="movie"))

    return itemlist
Beispiel #15
0
def play(item):
    logger.info("[somosmovies.py] play(item.url="+item.url+")")
    itemlist=[]

    if "bit.ly" in item.url:
        logger.info("Acortador bit.ly")
        location = scrapertools.get_header_from_response(item.url,header_to_get="location")
        logger.info("[somosmovies.py] location="+location)
        item.url = location
        return play(item)

    if "goo.gl" in item.url:
        logger.info("Acortador goo.gl")
        location = scrapertools.get_header_from_response(item.url,header_to_get="location")
        item.url = location
        return play(item)

    #adf.ly
    elif "j.gs" in item.url:
        logger.info("Acortador j.gs (adfly)")
        from servers import adfly
        location = adfly.get_long_url(item.url)
        item.url = location
        return play(item)

    else:
        from servers import servertools
        itemlist=servertools.find_video_items(data=item.url)
        for videoitem in itemlist:
            videoitem.channel=__channel__
            videoitem.folder=False

    return itemlist
def episodios(item):
    logger.info("[serieonline.py] episodios")

    # Descarga la página
    data = scrapertools.cachePage(item.url)

    # Extrae las entradas
    patronvideos  = '<div class="serie">(.*?)</div>'
    matches = re.compile(patronvideos,re.DOTALL).findall(data)
    if len(matches)>0:
        cuerpo = matches[0]
    else:
        cuerpo = ""

    patronvideos  = '<a href="([^"]+)">([^<]+)</a>'
    matches = re.compile(patronvideos,re.DOTALL).findall(cuerpo)

    itemlist = []
    for match in matches:
        scrapedtitle = match[1]
        scrapedplot = ""
        scrapedurl = urlparse.urljoin(item.url,match[0])
        scrapedthumbnail = item.thumbnail
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

        # Añade al listado de XBMC
        itemlist.append( Item(channel=CHANNELNAME, action="findvideos", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )

    return itemlist
def novedades(item):
    logger.info("[serieonline.py] novedades")

    # Descarga la página
    data = scrapertools.cachePage(item.url)

    # Extrae las entradas
    patronvideos  = '<a href="([^"]+)" title="([^"]+)"><img src="([^"]+)" alt="([^"]+)" class="captify" /></a>'

    matches = re.compile(patronvideos,re.DOTALL).findall(data)
    if DEBUG: scrapertools.printMatches(matches)

    itemlist = []
    for match in matches:
        scrapedtitle = match[1] + " " + match[3]
        scrapedplot = ""
        scrapedurl = urlparse.urljoin(item.url,match[0])
        scrapedthumbnail = urlparse.urljoin(item.url,match[2])
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

        # Añade al listado de XBMC
        itemlist.append( Item(channel=CHANNELNAME, action="findvideos", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )

    # Extrae el paginador
    patronvideos  = '<div class="paginacion-num"><a href="([^"]+)">'
    matches = re.compile(patronvideos,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    if len(matches)>0:
        scrapedtitle = "Página siguiente"
        scrapedurl = urlparse.urljoin(item.url,matches[0])
        itemlist.append( Item(channel=CHANNELNAME, action="novedades", title=scrapedtitle , url=scrapedurl , folder=True) )

    return itemlist
def info(item):
    logger.info("pelisalacarta.torrentestrenos info")
    
    url=item.url
    data = scrapertools.cachePage(url)
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","",data)
    title= scrapertools.get_match(data,'<h4>(.*?)</h4>')
    title = title.replace(title,"[COLOR aqua][B]"+title+"[/B][/COLOR]")
    scrapedplot = scrapertools.get_match(data,'</p><p>([^<]+)</p><p>')
    scrapedplot = scrapedplot.replace(scrapedplot,"[COLOR white]"+scrapedplot+"[/COLOR]")
    plot_tag="[COLOR green][B]Sinopsis[/B][/COLOR]" + "[CR]"
    scrapedplot= plot_tag + scrapedplot
    scrapedplot = scrapedplot.replace("&aacute;","a")
    scrapedplot = scrapedplot.replace("&iacute;","i")
    scrapedplot = scrapedplot.replace("&eacute;","e")
    scrapedplot = scrapedplot.replace("&oacute;","o")
    scrapedplot = scrapedplot.replace("&uacute;","u")
    scrapedplot = scrapedplot.replace("&ntilde;","–")
    scrapedplot = scrapedplot.replace("&Aacute;","A")
    scrapedplot = scrapedplot.replace("&Iacute;","I")
    scrapedplot = scrapedplot.replace("&Eacute;","E")
    scrapedplot = scrapedplot.replace("&Oacute;","O")
    scrapedplot = scrapedplot.replace("&Uacute;","U")
    scrapedplot = scrapedplot.replace("&Ntilde;","„")
    fanart="http://s11.postimg.org/qu66qpjz7/zentorrentsfanart.jpg"
    tbd = TextBox("DialogTextViewer.xml", os.getcwd(), "Default")
    tbd.ask(title, scrapedplot,fanart)
    del tbd
    return
def buscador(item):
    logger.info("pelisalacarta.torrentstrenos buscador")
    itemlist = []

# Descarga la p‡gina
    data = scrapertools.cache_page(item.url)
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","",data)
    
    #<div class="torrent-container-2 clearfix"><img class="torrent-image" src="uploads/torrents/images/thumbnails2/4441_step--up--all--in----blurayrip.jpg" alt="Imagen de Presentaci&oacute;n" /><div class="torrent-info"><h4><a href ="/descargar_torrent_27233-id_step_up_all_in_microhd_1080p_ac3_5.1--castellano--ac3_5.1_ingles_subs.html">Step Up All In MicroHD 1080p AC3 5.1-Castellano-AC3 5.1 Ingles Subs</a> </h4><p>19-12-2014</p><p>Subido por: <strong>TorrentEstrenos</strong> en <a href="/ver_torrents_41-id_en_peliculas_microhd.html" title="Peliculas MICROHD">Peliculas MICROHD</a><br />Descargas <strong><a href="#" style="cursor:default">46</a></strong></p><a class="btn-download" href ="/descargar_torrent_27233-id_step_up_all_in_microhd_1080p_ac3_5.1--castellano--ac3_5.1_ingles_subs.html">Descargar</a></div></div>
    
    patron =  '<div class="torrent-container-2 clearfix">.*?'
    patron += 'src="([^"]+)".*? '
    patron += 'href ="([^"]+)".*?'
    patron += '>([^<]+)</a>.*?'
    patron += '<p>([^<]+)</p>'
    
    
    matches = re.compile(patron,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    if len(matches)==0 :
        itemlist.append( Item(channel=__channel__, title="[COLOR gold][B]No se encontraron coincidencias...[/B][/COLOR]", thumbnail ="http://s6.postimg.org/w7nc1wh8x/torrnoisethumb.png", fanart ="http://s6.postimg.org/jez81z5n5/torrnoisefan.jpg",folder=False) )
    
    for scrapedthumbnail, scrapedurl, scrapedtitulo, scrapedcreatedate in matches:
        scrapedtitulo = scrapedtitulo + "(Torrent:" + scrapedcreatedate + ")"
        scrapedthumbnail = "http://www.torrentestrenos.com/" + scrapedthumbnail
        scrapedurl = "http://www.torrentestrenos.com" + scrapedurl
        
        
        itemlist.append( Item(channel=__channel__, title=scrapedtitulo, url=scrapedurl, action="findvideos", thumbnail=scrapedthumbnail, fanart="http://s6.postimg.org/44tc7dtg1/tefanartgeneral.jpg", fulltitle=scrapedtitulo, folder=True) )

    return itemlist
Beispiel #20
0
def pendingFile(filepath, filesize=0):
    logger.info('pendingFile........')
    from os.path import getsize

    newsize = getsize(filepath)

    logger.info('{filepath} size is {size}'.format(
        filepath=filepath, size=newsize))
    if newsize == filesize and newsize > 0:
        try:
            logger.info('This is is watch file?')
            f = FileRoute(filepath)
            transtype, ftpname, dest_path, filename = f.routeInfo()
            return True
        except TypeError:
            logger.info('{filepath} is not watch file'.format(
                filepath=filepath))
            return False
        except:
            logger.exception('pendingFile')
            return False
    logger.info('File is not Ok,pending {pendingtime} seconds'.format(
        pendingtime=conf.PENDING_TIME))
    time.sleep(conf.PENDING_TIME)
    return pendingFile(filepath, newsize)
def test_video_exists( page_url ):
    logger.info("pelisalacarta.servers.playwire test_video_exists(page_url='%s')" % page_url)
    
    data = scrapertools.cachePage( page_url )
    if ("File was deleted" or "Not Found") in data: return False, "[playwire] El archivo no existe o ha sido borrado"

    return True,""
def findvideos(item):
    logger.info("pelisalacarta.seriesblanco findvideos")

    itemlist = []

    # Descarga la página
    data = scrapertools.cache_page(item.url)
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s","",data)
    data = re.sub(r"<!--.*?-->","",data)
    data = re.sub(r"<td class='tam12'></td></tr>","<td class='tam12'>SD</td></tr>",data)
    data = re.sub(r"<center>|</center>","",data)

    #<tr><td class='tam12'><img src='/banderas/es.png' width='30' height='20' /></td><td class='tam12'>2014-10-04</td><td class='tam12'><center><a href='/enlace/534/1/01/1445121/' rel='nofollow' target='_blank' alt=''><img src='/servidores/allmyvideos.jpg' width='80' height='25' /></a></center></td><td class='tam12'><center>Darkgames</center></td><td class='tam12'></td></tr>
    

    #<tr><td class='tam12'><img src='/banderas/es.png' width='30' height='20' /></td><td class='tam12'>2014-10-04</td><td class='tam12'><a href='/enlace/534/1/01/1444719/' rel='nofollow' target='_blank' alt=''><img src='/servidores/uploaded.jpg' width='80' height='25' /></a></td><td class='tam12'><center>Darkgames</center></td><td class='tam12'>SD</td></tr>

    patron = "<td class='tam12'><img src='/banderas/([^\.]+)\.[^']+'[^>]+></td>"
    patron+= "<td class='tam12'>([^<]+)</td>"
    patron+= "<td class='tam12'><a href='([^']+)'[^>]+>"
    patron+= "<img src='/servidores/([^\.]+)\.[^']+'[^>]+></a></td>"
    patron+= "<td class='tam12'>[^<]+</td>"
    patron+= "<td class='tam12'>([^<]+)</td>"
    
    matches = re.compile(patron,re.DOTALL).findall(data)
    
    for scrapedidioma, scrapedfecha, scrapedurl, scrapedservidor, scrapedcalidad in matches:
        title = "Ver en " + scrapedservidor + " [" + idiomas[scrapedidioma] + "] [" + scrapedcalidad + "] (" + scrapedfecha + ")"
        itemlist.append( Item(channel=__channel__, title =title , url=urlparse.urljoin(host,scrapedurl), action="play", thumbnail=urlparse.urljoin(host,item.extra), fanart=item.fanart, show=item.show) )

    return itemlist
def quality(item):
    logger.info("[itastreaming.py] genere")
    itemlist = []

    data = scrapertools.anti_cloudflare(item.url, headers)
    patron = '<a>Qualità</a>(.+?)</ul>'
    data = scrapertools.find_single_match(data, patron)

    patron = '<li id=".*?'
    patron += 'href="([^"]+)".*?'
    patron += '>([^"]+)</a>'

    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for scrapedurl, scrapedtitle in matches:
        scrapedtitle = scrapedtitle.replace('&amp;', '-')
        itemlist.append(
            Item(channel=__channel__,
                 action="fichas",
                 title=scrapedtitle,
                 url=scrapedurl,
                 folder=True))

    return itemlist
def findvid_anime(item):
    logger.info("[cineblog01.py] findvideos")

    itemlist = []

    ## Descarga la página
    data = scrapertools.cache_page(item.url)
    data = scrapertools.decodeHtmlentities(data).replace('http://cineblog01.pw', 'http://k4pp4.pw')

    patron1 = '(?:<p>|<td bgcolor="#ECEAE1">)<span class="txt_dow">(.*?)(?:</p>)?(?:\s*</span>)?\s*</td>'
    patron2 = '<a.+?href="([^"]+)"[^>]*>([^<]+)</a>'
    matches1 = re.compile(patron1, re.DOTALL).findall(data)
    if len(matches1) > 0:
        for match1 in re.split('<br />|<p>', matches1[0]):
            if len(match1) > 0:
                ## Extrae las entradas
                titulo = None
                matches2 = re.compile(patron2, re.DOTALL).finditer(match1)
                for match2 in matches2:
                    if titulo is None:
                        titulo = match2.group(2)
                    scrapedurl = match2.group(1)
                    scrapedtitle = match2.group(2)
                    title = item.title + " " + titulo + " [COLOR blue][" + scrapedtitle + "][/COLOR]"
                    itemlist.append(
                        Item(channel=__channel__,
                             action="play",
                             title=title,
                             url=scrapedurl,
                             fulltitle=item.title,
                             show=item.title,
                             folder=False))

    return itemlist
def play(item):
    logger.info("[cineblog01.py] play")

    data = scrapertools.cache_page(item.url)

    print "##############################################################"
    if "go.php" in item.url:
        data = scrapertools.get_match(data, 'window.location.href = "([^"]+)";')
        print "##### play go.php data ##\n%s\n##" % data
    elif "/link/" in item.url:
        from lib.jsbeautifier.unpackers import packer

        try:
            data = scrapertools.get_match(data, "(eval.function.p,a,c,k,e,.*?)</script>")
            data = packer.unpack(data)
            print "##### play /link/ unpack ##\n%s\n##" % data
        except IndexError:
            print "##### The content is yet unpacked"

        data = scrapertools.get_match(data, 'var link(?:\s)?=(?:\s)?"([^"]+)";')
        print "##### play /link/ data ##\n%s\n##" % data
    else:
        data = item.url
        print "##### play else data ##\n%s\n##" % data
    print "##############################################################"

    itemlist = servertools.find_video_items(data=data)

    for videoitem in itemlist:
        videoitem.title = item.show
        videoitem.fulltitle = item.fulltitle
        videoitem.thumbnail = item.thumbnail
        videoitem.channel = __channel__

    return itemlist
def fichas(item):
    logger.info("streamondemand.channels.guardaserie fichas")

    itemlist = []

#    data = scrapertools.cache_page(item.url)

    ## Descarga la página
    data = re.sub(
        r'\t|\n|\r',
        '',
        anti_cloudflare(item.url)
    )

    data = scrapertools.find_single_match(data, '<a[^>]+>Serie Tv</a><ul>(.*?)</ul>')

    patron = '<li><a href="([^"]+)[^>]+>([^<]+)</a></li>'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle in matches:
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)

        itemlist.append(
                Item(channel=__channel__,
                     action="episodios",
                     title=scrapedtitle,
                     fulltitle=scrapedtitle,
                     show=scrapedtitle,
                     url=scrapedurl))

    return itemlist
def findvid_serie(item):
    logger.info("[cineblog01.py] findvideos")

    itemlist = []

    ## Descarga la página
    data = scrapertools.cache_page(item.url)
    data = scrapertools.decodeHtmlentities(data).replace('http://cineblog01.pw', 'http://k4pp4.pw')

    patron1 = '<p(?:\s*style="[^"]*")?>(?:<strong>)?([^<]+)(<a.*?)(?:</strong>)?</p>'
    patron2 = '<a\s*href="([^"]+)"\s*target="_blank">([^<]+)</a>'
    matches1 = re.compile(patron1, re.DOTALL).finditer(data)
    for match1 in matches1:
        titulo = match1.group(1)
        links = match1.group(2)
        ## Extrae las entradas
        matches2 = re.compile(patron2, re.DOTALL).finditer(links)
        for match2 in matches2:
            scrapedurl = match2.group(1)
            scrapedtitle = match2.group(2)
            title = item.title + " " + titulo + " [COLOR blue][" + scrapedtitle + "][/COLOR]"
            itemlist.append(
                Item(channel=__channel__,
                     action="play",
                     title=title,
                     url=scrapedurl,
                     fulltitle=item.title,
                     show=item.title,
                     folder=False))

    return itemlist
def idiomas(item):
    logger.info("[oranline.py] idiomas")
    itemlist = []

    '''
    div class="widget"><h3>&Uacute;ltimos estrenos</h3>

    <ul>
    <li class="cat-item cat-item-84"><a href="http://www.oranline.com/Películas/castellano/" title="Ver todas las entradas archivadas en Castellano">Castellano</a> (585)
    </li>
    <li class="cat-item cat-item-85"><a href="http://www.oranline.com/Películas/latino/" title="Ver todas las entradas archivadas en Latino">Latino</a> (623)
    </li>
    <li class="cat-item cat-item-86"><a href="http://www.oranline.com/Películas/version-original/" title="Ver todas las entradas archivadas en Versión Original">Versión Original</a> (27)
    </li>
    <li class="cat-item cat-item-87"><a href="http://www.oranline.com/Películas/vos/" title="Ver todas las entradas archivadas en VOS">VOS</a> (1471)
    </li>
    '''
    # Descarga la página
    data = get_main_page(item.url)
    data = scrapertools.get_match(data,'<div class="widget"><h3>&Uacute;ltimos estrenos</h3>(.*?)</ul>')

    # Extrae las entradas
    patron  = '<li class="cat-item cat-item-\d+"><a href="([^"]+)"[^>]+>([^<]+)</a>\s+\((\d+)\)'
    matches = re.compile(patron,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for scrapedurl,scrapedtitle,cuantas in matches:
        title=scrapedtitle.strip()+" ("+cuantas+")"
        url=urlparse.urljoin(item.url,scrapedurl)
        thumbnail=""
        plot=""
        if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
        itemlist.append( Item(channel=__channel__, action="novedades", title=title , url=url , thumbnail=thumbnail , plot=plot , folder=True) )

    return itemlist
def findvideos(item):
    logger.info("streamondemand.channels.guardaserie findvideos")

    itemlist = []

    url = item.url.split('?')[0]
    post = item.url.split('?')[1]
    referer = item.url.split('?')[2]

    headers.append(['Referer', referer])

    data = scrapertools.cache_page(url, post=post, headers=headers)

    url = scrapertools.get_match(data.lower(), 'src="([^"]+)"')
    url = re.sub(r'embed\-|\-607x360\.html', '', url)

    server = url.split('/')[2].split('.')
    server = server[1] if len(server) == 3 else server[0]

    title = "[" + server + "] " + item.title

    itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 title=title,
                 url=url,
                 server=server,
                 fulltitle=item.fulltitle,
                 show=item.show,
                 folder=False))

    return itemlist
def search(item,texto):
    logger.info("[pelisalacarta.seriesblanco search texto="+texto)

    itemlist = []

    item.url = urlparse.urljoin(host,"/search.php?q1=%s" % (texto))
    data = scrapertools.cache_page(item.url)
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s","",data)
    data = re.sub(r"<!--.*?-->","",data)

    #<div style='float:left;width: 620px;'><div style='float:left;width: 33%;text-align:center;'><a href='/serie/20/against-the-wall.html' '><img class='ict' src='http://4.bp.blogspot.com/-LBERI18Cq-g/UTendDO7iNI/AAAAAAAAPrk/QGqjmfdDreQ/s320/Against_the_Wall_Seriesdanko.jpg' alt='Capitulos de: Against The Wall' height='184' width='120'></a><br><div style='text-align:center;line-height:20px;height:20px;'><a href='/serie/20/against-the-wall.html' style='font-size: 11px;'> Against The Wall</a></div><br><br>

    patron = "<img class='ict' src='([^']+)'.*?<div style='text-align:center;line-height:20px;height:20px;'><a href='([^']+)' style='font-size: 11px;'>([^<]+)</a>"

    matches = re.compile(patron,re.DOTALL).findall(data)

    for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
        
        
        itemlist.append( Item(channel=__channel__, title =scrapedtitle , url=urlparse.urljoin(host,scrapedurl), action="episodios", thumbnail=scrapedthumbnail, fanart ="http://portfolio.vernier.se/files/2014/03/light-grey-wood-photography-hd-wallpaper-1920x1200-46471.jpg", show=scrapedtitle) )

    try:
        return itemlist
    # Se captura la excepción, para no interrumpir al buscador global si un canal falla
    except:
        import sys
        for line in sys.exc_info():
            logger.error( "%s" % line )
        return []
Beispiel #31
0
def find_videos(data):
    encontrados = set()
    devuelve = []

    # http://vkontakte.ru/video_ext.php?oid=95855298&id=162902512&hash=4f0d023887f3648e
    # http://vk.com/video_ext.php?oid=70712020&amp;id=159787030&amp;hash=88899d94685174af&amp;hd=3"
    # http://vk.com/video_ext.php?oid=161288347&#038;id=162474656&#038;hash=3b4e73a2c282f9b4&#038;sd
    # http://vk.com/video_ext.php?oid=146263567&id=163818182&hash=2dafe3b87a4da653&sd
    # http://vk.com/video_ext.php?oid=146263567&id=163818182&hash=2dafe3b87a4da653
    # http://vk.com/video_ext.php?oid=-34450039&id=161977144&hash=0305047ffe3c55a8&hd=3
    data = data.replace("&amp;", "&")
    data = data.replace("&#038;", "&")
    patronvideos = '(/video_ext.php\?oid=[^&]+&id=[^&]+&hash=[a-z0-9]+)'
    logger.info("#" + patronvideos + "#")
    matches = re.compile(patronvideos).findall(data)

    for match in matches:
        titulo = "[vk]"
        url = "http://vk.com" + match

        if url not in encontrados:
            logger.info("  url=" + url)
            devuelve.append([titulo, url, 'vk'])
            encontrados.add(url)
        else:
            logger.info("  url duplicada=" + url)

    # http://vk.com/video97482389_161509127?section=all
    patronvideos = '(vk\.[a-z]+\/video[0-9]+_[0-9]+)'
    logger.info("#" + patronvideos + "#")
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    for match in matches:
        titulo = "[vk]"
        url = "http://" + match

        if url not in encontrados:
            logger.info("  url=" + url)
            devuelve.append([titulo, url, 'vk'])
            encontrados.add(url)
        else:
            logger.info("  url duplicada=" + url)

    return devuelve
def peliculas(item):
    logger.info("[somosmovies.py] peliculas")
    itemlist=[]

    # Descarga la página
    data = scrapertools.cachePage(item.url)
    logger.info("data="+data)

    # Extrae las entradas
    '''
    <article CLASS='post crp'>
    <header><h3 CLASS='post-title entry-title item_name'>
    <a href='http://www.somosmovies.com/2013/11/elysium-2013_24.html' title='Elysium (2013)'>Elysium (2013)</a>
    </h3>
    </header>
    <section CLASS='post-body entry-content clearfix'>
    <a href='http://www.somosmovies.com/2013/11/elysium-2013_24.html' title='Elysium (2013)'><center>
    <img border="0" src="http://1.bp.blogspot.com/-J15zDm0KXVA/UoOmwu563kI/AAAAAAAALqw/zBww3WoCyEw/s1600/Poster.Elysium.2013.jpg" style="display: block; height: 400px; width: 312px;">
    </center>
    </a>
    <div CLASS='es-LAT'></div>
    <div CLASS='pie-post'>
    <div style='float:left'>
    <div class='fb-like' data-href='http://www.somosmovies.com/2013/11/elysium-2013_24.html' data-layout='button_count' data-send='false' data-show-faces='false' data-width='120'></div>
    </div>
    </div>
    <div STYLE='clear: both;'></div>
    </section>
    </article>
    '''
    patron = "<article(.*?)</article>"
    matches = re.compile(patron,re.DOTALL).findall(data)

    for match in matches:
        logger.info("match="+match)
        scrapedtitle = scrapertools.get_match(match,"<a href='[^']+' title='([^']+)'")
        scrapedurl = urlparse.urljoin(item.url, scrapertools.get_match(match,"<a href='([^']+)' title='[^']+'") )
        scrapedplot = ""
        try:
            scrapedthumbnail = urlparse.urljoin(item.url, scrapertools.get_match(match,'<img border="0" src="([^"]+)"') )
        except:
            scrapedthumbnail = ""
        try:
            idioma = scrapertools.get_match(match,"</center[^<]+</a[^<]+<div CLASS='([^']+)'></div>")
            scrapedtitle = scrapedtitle + " ("+idioma.upper()+")"
        except:
            pass
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

        # Añade a XBMC
        itemlist.append( Item(channel=__channel__, action="enlaces", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )

    # Extrae el paginador
    #<a CLASS='blog-pager-older-link' href='http://www.somosmovies.com/search?updated-max=2012-08-22T23:10:00-05:00&amp;max-results=16' id='Blog1_blog-pager-older-link' title='Siguiente Película'>Siguiente &#187;</a>
    patronvideos  = "<a CLASS='blog-pager-older-link' href='([^']+)' id='Blog1_blog-pager-older-link' title='Siguiente"
    matches = re.compile(patronvideos,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    if len(matches)>0:
        #http://www.somosmovies.com/search/label/Peliculas?updated-max=2010-12-20T08%3A27%3A00-06%3A00&max-results=12
        scrapedurl = urlparse.urljoin(item.url,matches[0])
        scrapedurl = scrapedurl.replace("%3A",":")
        itemlist.append( Item(channel=__channel__, action="peliculas", title=">> Página siguiente" , url=scrapedurl , folder=True) )

    return itemlist
def enlaces(item):
    logger.info("[somosmovies.py] enlaces")
    itemlist = []
    
    data = scrapertools.cachePage(item.url)
    
    '''
    <fieldset id="enlaces">
    <legend>Enlaces</legend><br />
    <div class="clearfix uno">
    <div class="dos"><b> Episodio 1</b>: <small>30 Days Without an Accident</small></div><div class="tres"><a href="http://bit.ly/1aIiGdq" target="_blank">MEGA</a> <b class="sep">|</b> <a href="http://bit.ly/GY8PWg" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/15CGs8G" target="_blank">4Shared</a> <b class="sep">|</b> <a href="http://bit.ly/17RTYZl" target="_blank">180upload</a> <b class="sep">|</b> <a href="http://goo.gl/ognvK7" target="_blank">TurboBit</a></div>
    </div>
    <div class="clearfix uno">
    <div class="dos"><b> Episodio 2</b>: Infected</div><div class="tres"><a href="http://bit.ly/1fyubIg" target="_blank">MEGA</a> <b class="sep">|</b> <a href="http://bit.ly/1a9voBA" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/19pmMpo" target="_blank">4Shared</a> <b class="sep">|</b> <a href="http://bit.ly/1aYd0be" target="_blank">180upload</a> <b class="sep">|</b> <a href="http://goo.gl/rI9OL7" target="_blank">TurboBit</a></div>
    </div>
    <div class="clearfix uno">
    <div class="dos"><b> Episodio 3</b>: Isolation</div><div class="tres"><a href="http://bit.ly/1fyucfd" target="_blank">MEGA</a> <b class="sep">|</b> <a href="http://bit.ly/17UzXLX" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/17tmo9Y" target="_blank">4Shared</a> <b class="sep">|</b> <a href="http://bit.ly/1eqtMEL" target="_blank">180upload</a> <b class="sep">|</b> <a href="http://goo.gl/2f3Jj5" target="_blank">TurboBit</a></div>
    </div>
    <div class="clearfix uno">
    <div class="dos"><b> Episodio 4</b>: Indifference</div><div class="tres"><a href="http://bit.ly/1aPKmwf" target="_blank">MEGA</a> <b class="sep">|</b> <a href="http://bit.ly/185vLcB" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/1iJ5mGm" target="_blank">4Shared</a> <b class="sep">|</b> <a href="http://bit.ly/1hadtPR" target="_blank">180upload</a> <b class="sep">|</b> <a href="http://goo.gl/lYoQoo" target="_blank">TurboBit</a></div>
    </div>
    <div class="clearfix uno">
    <div class="dos"><b> Episodio 5</b>: Internment</div><div class="tres"><a href="http://bit.ly/1aYcERL" target="_blank">MEGA</a> <b class="sep">|</b> <a href="http://bit.ly/HSRa1F" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/1dilJZe" target="_blank">4Shared</a> <b class="sep">|</b> <a href="http://bit.ly/1iG6sWi" target="_blank">180upload</a> <b class="sep">|</b> <a href="http://goo.gl/0tHIKr" target="_blank">TurboBit</a></div>
    </div>
    <div class="clearfix uno">
    <div class="dos"><b> Episodio 6</b>: Live Bait</div><div class="tres"><a href="http://bit.ly/17Z1EUf" target="_blank">MEGA</a> <b class="sep">|</b> <a href="http://bit.ly/1ddc0Ym" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/I0GBKK" target="_blank">4Shared</a> <b class="sep">|</b> <a href="http://bit.ly/1jx50TF" target="_blank">180upload</a> <b class="sep">|</b> <a href="http://goo.gl/mgXyof" target="_blank">TurboBit</a></div>
    </div>
    <div class="clearfix uno">
    <div class="dos"><b> Episodio 7</b>: Dead Weight</div><div class="tres"><a href="http://bit.ly/17UwbIi" target="_blank">MEGA</a> <b class="sep">|</b> <a href="http://bit.ly/17NZj1D" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/1aTE4vw" target="_blank">4Shared</a> <b class="sep">|</b> <a href="http://bit.ly/IhQa8C" target="_blank">180upload</a> <b class="sep">|</b> <a href="http://goo.gl/ZiSH47" target="_blank">TurboBit</a> <b style="font-style:italic;color:red;">Nuevo!</b></div>
    </div>
    <div class="clearfix uno">
    <div class="dos"><b> Episodio 8</b>: Too Far Gone</div><div class="tres"><i style="font-style:italic">Disponible el 02 de Diciembre.</i></div>
    </div>
    </fieldset>
    '''
    '''
    <fieldset id="enlaces">
    <h5 class='h5'>Season 1</h5>
    <div class="clearfix uno">
    <div class="dos"><b> Capítulo 1</b>: Yesterday's Jam</div><div class="tres"><a href="http://bit.ly/14OorEU" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/Z2uWNc" target="_blank">PutLocker</a> <b class="sep">|</b> <a href="http://bit.ly/11nIqHi" target="_blank">SockShare</a> <b class="sep">|</b> <a href="http://bit.ly/XYo0jN" target="_blank">FreakShare</a></div>
    <div class="dos"><b> Capítulo 2</b>: Calamity Jen</div><div class="tres"><a href="http://bit.ly/XecqUq" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/10algD1" target="_blank">PutLocker</a> <b class="sep">|</b> <a href="http://bit.ly/YTsGe4" target="_blank">SockShare</a> <b class="sep">|</b> <a href="http://bit.ly/16xaKYZ" target="_blank">FreakShare</a></div>
    <div class="dos"><b> Capítulo 3</b>: Fifty-Fifty</div><div class="tres"><a href="http://bit.ly/12i5mq8" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/10aljyA" target="_blank">PutLocker</a> <b class="sep">|</b> <a href="http://bit.ly/12gnyo1" target="_blank">SockShare</a> <b class="sep">|</b> <a href="http://bit.ly/10xM8LC" target="_blank">FreakShare</a></div>
    <div class="dos"><b> Capítulo 4</b>: The Red Door</div><div class="tres"><a href="http://bit.ly/10al5Yg" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/10wyHMz" target="_blank">PutLocker</a> <b class="sep">|</b> <a href="http://bit.ly/10rHP5P" target="_blank">SockShare</a> <b class="sep">|</b> <a href="http://bit.ly/10xM9PW" target="_blank">FreakShare</a></div>
    <div class="dos"><b> Capítulo 5</b>: The Haunting of Bill Crouse</div><div class="tres"><a href="http://bit.ly/10wyAjT" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/XecCmO" target="_blank">PutLocker</a> <b class="sep">|</b> <a href="http://bit.ly/XYoPt0" target="_blank">SockShare</a> <b class="sep">|</b> <a href="http://bit.ly/14OpPXW" target="_blank">FreakShare</a></div>
    <div class="dos"><b> Capítulo 6</b>: Aunt Irma Visits</div><div class="tres"><a href="http://bit.ly/17dCeEj" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/12i5JRM" target="_blank">PutLocker</a> <b class="sep">|</b> <a href="http://bit.ly/10amVIA" target="_blank">SockShare</a> <b class="sep">|</b> <a href="http://bit.ly/17dDdUU" target="_blank">FreakShare</a></div>
    </div>
    <h5 class='h5'>Season 2</h5>
    <div class="clearfix uno">
    <div class="dos"><b> Capítulo 1</b>: The Work Outing</div><div class="tres"><a href="http://bit.ly/XOrCcl" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/10wDjCe" target="_blank">PutLocker</a> <b class="sep">|</b> <a href="http://bit.ly/12ibnDi" target="_blank">SockShare</a> <b class="sep">|</b> <a href="http://bit.ly/17dEXgU" target="_blank">FreakShare</a></div>
    <div class="dos"><b> Capítulo 2</b>: Return of the Golden Child</div><div class="tres"><a href="http://bit.ly/16p6Tvh" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/13SeTJq" target="_blank">PutLocker</a> <b class="sep">|</b> <a href="http://bit.ly/10zwtuf" target="_blank">SockShare</a> <b class="sep">|</b> <a href="http://bit.ly/XqnsZ7" target="_blank">FreakShare</a></div>
    '''
    '''
    <fieldset id="enlaces">
    <legend>Enlaces</legend><br />
    <div class="clearfix uno">
    <div class="dos">
    <b>AVI</b> <small>480p</small></div>
    <div class="tres">
    <a href="http://bit.ly/1dQbvlS" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/Nd96Hh" target="_blank">MEGA</a> <b class="sep">|</b> <a href="http://bit.ly/1d3a534" target="_blank">4Shared</a> <b class="sep">|</b> <a href="http://goo.gl/TOipXB" target="_blank">TurboBit</a> <b class="sep">|</b> <a href="http://bit.ly/1oUWtPP" target="_blank">FreakShare</a>
    </div>
    </div>
    <div class="clearfix uno">
    <div class="dos">
    <b>MP4</b> <small>1080p</small></div>
    <div class="tres">
    <a href="http://bit.ly/1c40BEG" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/OcZDki" target="_blank">MEGA</a> <b class="sep">|</b> <a href="http://bit.ly/1gjElZY" target="_blank">4Shared</a> <b class="sep">|</b> <a href="http://goo.gl/fc43B2" target="_blank">TurboBit</a> <b class="sep">|</b> <a href="http://bit.ly/1e9GxAq" target="_blank">FreakShare</a>
    </div>
    </div>
    </fieldset>
    '''
    # Se queda con la caja de enlaces
    data = scrapertools.get_match(data,'<fieldset id="enlaces"[^<]+<legend>Enlaces</legend>(.*?)</fieldset>')
    patron = '<div class="dos"[^<]+<b>([^<]+)</b>'
    matches = re.compile(patron,re.DOTALL).findall(data)
    for title in matches:
        itemlist.append( Item(channel=__channel__, action="findvideos" , title="Enlaces "+title.strip() , url=item.url, extra=title, thumbnail=item.thumbnail, plot=item.plot, folder=True))

    return itemlist
def episodios(item):
    def load_episodios(html, item, itemlist, lang_title):
        patron = '(?:<strong>.*?<a href="[^"]+"[^_]+_blank[^>]+>[^<]+<\/a>[^>]+>.*?<\/strong>)'
        matches = re.compile(patron).findall(html)
        for data in matches:
            # Extrae las entradas
            scrapedtitle = data.split('<a ')[0]
            scrapedtitle = re.sub(r'<[^>]*>', '', scrapedtitle).strip()
            if scrapedtitle != 'Categorie':
                scrapedtitle = scrapedtitle.replace('×', 'x')
                itemlist.append(
                    Item(channel=__channel__,
                         action="findvideos",
                         contentType="episode",
                         title="[COLOR azure]%s[/COLOR]" % (scrapedtitle + " (" + lang_title + ")"),
                         url=data,
                         thumbnail=item.thumbnail,
                         extra=item.extra,
                         fulltitle=scrapedtitle + " (" + lang_title + ")" + ' - ' + item.show,
                         show=item.show))

    logger.info("[filmsenzalimiti.py] episodios")

    itemlist = []

    # Descarga la página
    data = scrapertools.cache_page(item.url)
    data = scrapertools.decodeHtmlentities(data)
    data = scrapertools.get_match(data, '<div class="postcontent">(.*?)<div id="sidebar">')

    lang_titles = []
    starts = []
    patron = r"STAGIONE.*?ITA"
    matches = re.compile(patron, re.IGNORECASE).finditer(data)
    for match in matches:
        season_title = match.group()
        if season_title != '':
            lang_titles.append('SUB ITA' if 'SUB' in season_title.upper() else 'ITA')
            starts.append(match.end())

    i = 1
    len_lang_titles = len(lang_titles)

    while i <= len_lang_titles:
        inizio = starts[i - 1]
        fine = starts[i] if i < len_lang_titles else -1

        html = data[inizio:fine]
        lang_title = lang_titles[i - 1]

        load_episodios(html, item, itemlist, lang_title)

        i += 1

    if config.get_library_support() and len(itemlist) != 0:
        itemlist.append(
            Item(channel=__channel__,
                 title="Aggiungi alla libreria",
                 url=item.url,
                 action="add_serie_to_library",
                 extra="episodios" + "###" + item.extra,
                 show=item.show))
        itemlist.append(
            Item(channel=__channel__,
                 title="Scarica tutti gli episodi della serie",
                 url=item.url,
                 action="download_all_episodes",
                 extra="episodios" + "###" + item.extra,
                 show=item.show))

    return itemlist
def peliculas_tv(item):
    logger.info("streamondemand.filmstream peliculas")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url)

    # Extrae las entradas (carpetas)
    patron = '<div class="galleryitem".*?>\s*'
    patron += '<a href="?([^>"]+)"?.*?title="?([^>"]+)"?.*?<img.*?src="([^>"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
        html = scrapertools.cache_page(scrapedurl)
        start = html.find("</strong></p>")
        end = html.find("<p>&nbsp;</p>", start)
        scrapedplot = html[start:end]
        scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
        scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        scrapedtitle = scrapedtitle.replace("Streaming", "")
        scrapedtitle = scrapedtitle.replace("(Serie Tv)", "{Serie Tv}")
        scrapedtitle = scrapedtitle.replace("(Serie TV)", "{Serie Tv}")
        scrapedtitle = scrapedtitle.replace("(Tv)", "{Tv}")
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.replace("(Miniserie Tv)", "{Miniserie Tv}"))
        if scrapedtitle.startswith("Permanent Link to "):
            scrapedtitle = scrapedtitle[18:]
        if (DEBUG): logger.info(
            "title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="episodios",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 extra=item.extra,
                 folder=True), tipo='tv'))

    # Extrae el paginador
    patronvideos = '<li><a href="([^"]+)">&gt;</a></li>'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Torna Home[/COLOR]",
                 folder=True)),
        itemlist.append(
            Item(channel=__channel__,
                 action="peliculas_tv",
                 title="[COLOR orange]Successivo >>[/COLOR]",
                 url=scrapedurl,
                 thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                 extra=item.extra,
                 folder=True))

    return itemlist
Beispiel #36
0
def find_videos(data):
    encontrados = set()
    devuelve = []

    # veevr http://veevr.com/videos/kgDAMC4Btp"
    patronvideos = 'http://veevr.[\w]+/videos/([\w]+)'
    logger.info("[veevr.py] find_videos #" + patronvideos + "#")
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    for match in matches:
        titulo = "[veevr]"
        url = "http://veevr.com/videos/" + match
        if url not in encontrados:
            logger.info("  url=" + url)
            devuelve.append([titulo, url, 'veevr'])
            encontrados.add(url)
        else:
            logger.info("  url duplicada=" + url)

    # veevr http://veevr.com/embed/kgDAMC4Btp"
    patronvideos = 'http://veevr.[\w]+/embed/([\w]+)'
    logger.info("[veevr.py] find_videos #" + patronvideos + "#")
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    for match in matches:
        titulo = "[veevr]"
        url = "http://veevr.com/videos/" + match
        if url not in encontrados:
            logger.info("  url=" + url)
            devuelve.append([titulo, url, 'veevr'])
            encontrados.add(url)
        else:
            logger.info("  url duplicada=" + url)

    return devuelve
Beispiel #37
0
def test_video_exists(page_url):
    logger.info("[veevr.py] test_video_exists(page_url='%s')" % page_url)

    return True, ""
Beispiel #38
0
def lista(item):
    logger.info()
    itemlist = []

    # Descarga la pagina
    data = httptools.downloadpage(item.url).data

    action = "play"
    if config.get_setting("menu_info", "freecambay"):
        action = "menu_info"

    # Extrae las entradas
    patron = '<div class="item.*?href="([^"]+)" title="([^"]+)".*?data-original="([^"]+)"(.*?)<div class="duration">([^<]+)<'
    matches = scrapertools.find_multiple_matches(data, patron)
    for scrapedurl, scrapedtitle, scrapedthumbnail, quality, duration in matches:
        if duration:
            scrapedtitle = "%s - %s" % (duration, scrapedtitle)
        if '>HD<' in quality:
            scrapedtitle += "  [COLOR red][HD][/COLOR]"

        itemlist.append(
            item.clone(action=action,
                       title=scrapedtitle,
                       url=scrapedurl,
                       thumbnail=scrapedthumbnail,
                       fanart=scrapedthumbnail))

    # Extrae la marca de siguiente página
    if item.extra:
        next_page = scrapertools.find_single_match(
            data, '<li class="next">.*?from_videos\+from_albums:(\d+)')
        if next_page:
            if "from_videos=" in item.url:
                next_page = re.sub(r'&from_videos=(\d+)',
                                   '&from_videos=%s' % next_page, item.url)
            else:
                next_page = "%s?mode=async&function=get_block&block_id=list_videos_videos_list_search_result" \
                            "&q=%s&category_ids=&sort_by=post_date&from_videos=%s" % (item.url, item.extra, next_page)
            itemlist.append(
                item.clone(action="lista",
                           title=">> Página Siguiente",
                           url=next_page))
    else:
        next_page = scrapertools.find_single_match(
            data, '<li class="next">.*?href="([^"]*)"')
        if next_page and not next_page.startswith("#"):
            next_page = urlparse.urljoin(host, next_page)
            itemlist.append(
                item.clone(action="lista",
                           title=">> Página Siguiente",
                           url=next_page))
        else:
            next_page = scrapertools.find_single_match(
                data, '<li class="next">.*?from:(\d+)')
            if next_page:
                if "from=" in item.url:
                    next_page = re.sub(r'&from=(\d+)', '&from=%s' % next_page,
                                       item.url)
                else:
                    next_page = "%s?mode=async&function=get_block&block_id=list_videos_common_videos_list&sort_by=post_date&from=%s" % (
                        item.url, next_page)
                itemlist.append(
                    item.clone(action="lista",
                               title=">> Página Siguiente",
                               url=next_page))

    return itemlist
Beispiel #39
0
def getmainlist(preferred_thumb=""):
    logger.info("channelselector.getmainlist")
    itemlist = []

    if config.get_setting("programs_enable_section") == "true":

        if config.get_setting("programs_enable_subsections") != "true":
            itemlist.append(
                Item(title="Programas",
                     channel="api_programas",
                     action="mainlist",
                     thumbnail=urlparse.urljoin(
                         get_thumbnail_path(preferred_thumb),
                         "menu/updated.png")))
        else:
            itemlist.append(
                Item(title="Programas",
                     channel="api_programas",
                     action="programas",
                     thumbnail=api.get_section_thumbnail("programas")))
            itemlist.append(
                Item(title="Informativos",
                     channel="api_programas",
                     action="informativos",
                     thumbnail=api.get_section_thumbnail("informativos")))
            itemlist.append(
                Item(title="Deportes",
                     channel="api_programas",
                     action="deportes",
                     thumbnail=api.get_section_thumbnail("deportes")))
            itemlist.append(
                Item(title="Series",
                     channel="api_programas",
                     action="series",
                     thumbnail=api.get_section_thumbnail("series")))
            itemlist.append(
                Item(title="Infantil",
                     channel="api_programas",
                     action="infantil",
                     thumbnail=api.get_section_thumbnail("infantil")))
            itemlist.append(
                Item(title="Cine",
                     channel="api_programas",
                     action="cine",
                     thumbnail=api.get_section_thumbnail("cine")))

    itemlist.append(
        Item(title="Canales",
             channel="channelselector",
             action="channeltypes",
             thumbnail=urlparse.urljoin(get_thumbnail_path(preferred_thumb),
                                        "menu/channels.png")))
    itemlist.append(
        Item(title="Directos",
             channel="directos",
             action="mainlist",
             thumbnail=urlparse.urljoin(get_thumbnail_path(preferred_thumb),
                                        "menu/live.png")))
    #itemlist.append( Item(title="Buscador" , channel="buscador" , action="mainlist" , thumbnail = urlparse.urljoin(get_thumbnail_path(preferred_thumb),"menu/search.png")) )
    itemlist.append(
        Item(title="Descargas",
             channel="descargas",
             action="mainlist",
             thumbnail=urlparse.urljoin(get_thumbnail_path(preferred_thumb),
                                        "menu/downloads.png")))
    itemlist.append(
        Item(title="Favoritos",
             channel="favoritos",
             action="mainlist",
             thumbnail=urlparse.urljoin(get_thumbnail_path(preferred_thumb),
                                        "menu/favorites.png")))
    itemlist.append(
        Item(title="Configuración",
             channel="configuracion",
             action="mainlist",
             thumbnail=urlparse.urljoin(get_thumbnail_path(preferred_thumb),
                                        "menu/settings.png")))
    itemlist.append(
        Item(title="Ayuda",
             channel="ayuda",
             action="mainlist",
             thumbnail=urlparse.urljoin(get_thumbnail_path(preferred_thumb),
                                        "menu/help.png")))

    return itemlist
Beispiel #40
0
def mainlist(item):
    logger.info("pelisalacarta.channels.xo mainlist")
    item.url = "http://xo.ro/"
    return novedades(item)
Beispiel #41
0
def findvideos(item):
    show = item.title.replace("Añadir esta serie a la biblioteca de XBMC", "")
    logger.info("[megaforo.py] findvideos show " + show)
    itemlist = []
    data = scrapertools.cache_page(item.url)

    if 'mega-foro' in data:
        patronimage = '<div class="inner" id="msg_\d{1,9}".*?<img src="([^"]+)".*?mega.co.nz/\#\![A-Za-z0-9\-\_]+\![A-Za-z0-9\-\_]+'
        matches = re.compile(patronimage, re.DOTALL).findall(data)
        if len(matches) > 0:
            thumbnail = matches[0]
            thumbnail = scrapertools.htmlclean(thumbnail)
            thumbnail = unicode(thumbnail, "iso-8859-1",
                                errors="replace").encode("utf-8")
            item.thumbnail = thumbnail

        patronplot = '<div class="inner" id="msg_\d{1,9}".*?<img src="[^"]+"[^/]+/>(.*?)lgf_facebook_share'
        matches = re.compile(patronplot, re.DOTALL).findall(data)
        if len(matches) > 0:
            plot = matches[0]
            title = item.title
            plot = re.sub('&nbsp;', '', plot)
            plot = re.sub('\s\s', '', plot)
            plot = scrapertools.htmlclean(plot)
            item.plot = ""

        from servers import servertools
        itemlist.extend(servertools.find_video_items(data=data))
        for videoitem in itemlist:
            videoitem.channel = __channel__
            videoitem.action = "play"
            videoitem.folder = False
            videoitem.thumbnail = item.thumbnail
            videoitem.plot = item.plot

            videoitem.title = "[" + videoitem.server + videoitem.title + " " + item.title

            videoitem.show = show
        if config.get_platform().startswith(
                "xbmc") or config.get_platform().startswith("boxee"):
            itemlist.append(
                Item(channel=item.channel,
                     title="Añadir esta serie a la biblioteca de XBMC",
                     url=item.url,
                     action="add_serie_to_library",
                     extra="findvideos"))
        return itemlist

    else:
        item.thumbnail = ""
        item.plot = ""
        from servers import servertools
        itemlist.extend(servertools.find_video_items(data=data))
        for videoitem in itemlist:
            videoitem.channel = __channel__
            videoitem.action = "play"
            videoitem.folder = False
            videoitem.thumbnail = item.thumbnail
            videoitem.plot = item.plot
            videoitem.title = "[" + videoitem.server + videoitem.title + " " + item.title
        return itemlist
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
import sys
import xbmc,time

from core import scrapertools
from core import config
from core import logger
from core.item import Item
from servers import servertools

logger.info("[library_service.py] Actualizando series...")
from platformcode.xbmc import library
from platformcode.xbmc import launcher
import xbmcgui

#Eliminar carpeta antes de actualizar
    
directorio = os.path.join(config.get_library_path(),"SERIES")
logger.info ("directorio="+directorio)
import shutil

#if os.path.exists(directorio):
#    shutil.rmtree(directorio)

if not os.path.exists(directorio):
    os.mkdir(directorio)
Beispiel #43
0
def mainlist(item):
    logger.info("[guardogratis.py] mainlist")
    itemlist = [
        Item(
            channel=__channel__,
            title="Film[COLOR orange]   - Novita'[/COLOR]",
            action="film",
            url="%s/movies/" % host,
            extra="movie",
            thumbnail=
            "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/popcorn_new.png"
        ),
        Item(
            channel=__channel__,
            title="Film[COLOR orange]   - Top IMDB[/COLOR]",
            action="film",
            url="%s/top-imdb/" % host,
            extra="movie",
            thumbnail=
            "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/hd_movies_P.png"
        ),
        Item(
            channel=__channel__,
            title="Film[COLOR orange]   - Animazione[/COLOR]",
            action="film",
            url="%s/genre/animazione/" % host,
            extra="movie",
            thumbnail=
            "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/animated_movie_P.png"
        ),
        Item(
            channel=__channel__,
            title="Film[COLOR orange]   - Categorie[/COLOR]",
            action="genere",
            url=host,
            thumbnail=
            "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/genres_P.png"
        ),
        Item(
            channel=__channel__,
            title="Serie TV",
            action="film",
            url="%s/series/" % host,
            extra="serie",
            thumbnail=
            "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/tv_series_P.png"
        ),
        Item(
            channel=__channel__,
            title="[COLOR orange]Cerca.....[/COLOR]",
            action="search",
            url=host,
            thumbnail=
            "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/search_P.png"
        ),
        Item(
            channel=__channel__,
            title="[COLOR orange]Cerca Serie...[/COLOR]",
            action="search",
            url=host,
            extra="serie",
            thumbnail=
            "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/search_P.png"
        )
    ]
    return itemlist
Beispiel #44
0
def mainlist(item):
    logger.info("[megaforo.py] mainlist")
    itemlist = []
    if config.get_setting("megaforoaccount") != "true":
        itemlist.append(
            Item(channel=__channel__,
                 title="Habilita tu cuenta en la configuración...",
                 action="",
                 url="",
                 folder=False))
    else:
        if login():
            itemlist.append(
                Item(channel=__channel__,
                     title="Series",
                     action="foro",
                     url="http://mega-foro.com/series-de-tv/",
                     folder=True))
            itemlist.append(
                Item(channel=__channel__,
                     title="Películas",
                     action="foro",
                     url="http://mega-foro.com/peliculas/",
                     folder=True))
            itemlist.append(
                Item(channel=__channel__,
                     title="Infantil",
                     action="foro",
                     url="http://mega-foro.com/para-los-peques!/",
                     folder=True))
            itemlist.append(
                Item(channel=__channel__,
                     title="Cortos y Documentales",
                     action="foro",
                     url="http://mega-foro.com/cortos-y-documentales/",
                     folder=True))
            itemlist.append(
                Item(channel=__channel__,
                     title="Contenido Online",
                     action="foro",
                     url="http://mega-foro.com/online/",
                     folder=True))
            itemlist.append(
                Item(channel=__channel__,
                     title="Anime & Manga",
                     action="foro",
                     url="http://mega-foro.com/anime-manga/",
                     folder=True))
            itemlist.append(
                Item(channel=__channel__,
                     title="Música",
                     action="foro",
                     url="http://mega-foro.com/musica/",
                     folder=True))
        else:
            itemlist.append(
                Item(channel=__channel__,
                     title="Cuenta incorrecta, revisa la configuración...",
                     action="",
                     url="",
                     folder=False))
    return itemlist
Beispiel #45
0
def get_temporadas(item):
    logger.info()

    itemlist = []
    infoLabels = {}


    data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)","",httptools.downloadpage(item.url).data)
    patron ='vars.title =(.*?)};'
    try:
        data_dict= jsontools.load_json(scrapertools.get_match(data,patron) +'}')
    except:
        return itemlist # Devolvemos lista vacia
    
    if item.extra == "serie_add":
        item.extra=str(data_dict['tmdb_id'])
        item.url=str(data_dict["link"])
        infoLabels['titleraw'] = data_dict["title"]
        infoLabels['tvshowtitle'] = data_dict["title"]
        infoLabels['title_id'] = data_dict['id']
        item.infoLabels = infoLabels
        itemlist= get_episodios(item)
    else:
        infoLabels = item.infoLabels
        if data_dict.has_key("actor"):
            cast=[]
            rol=[]
            for actor in data_dict["actor"]:
                cast.append(actor['name'])
                rol.append(actor['pivot']['char_name'])
            infoLabels['cast'] = cast
            infoLabels['castandrole'] = zip(cast,rol)
            
        if data_dict.has_key("writer"):    
            writers_list=[]
            for writer in data_dict["writer"]:
                writers_list.append(writer['name'])
            infoLabels['writer'] = ", ".join(writers_list )
        
        if data_dict.has_key("director"):  
            director_list=[]
            for director in data_dict["director"]:
                director_list.append(director['name'])    
            infoLabels['director'] = ", ".join(director_list )
    
        if len(data_dict["season"]) == 1: 
            # Si solo hay una temporada ...
            item.extra=str(data_dict['tmdb_id'])
            item.url=str(data_dict["link"])
            item.infoLabels = infoLabels
            itemlist= get_episodios(item)
        else: #... o si hay mas de una temporada y queremos el listado por temporada...
            item.extra=str(data_dict['tmdb_id'])
            item.viewcontent = "seasons"
            data_dict["season"].sort(key=lambda x:(x['number'])) # ordenamos por numero de temporada
            for season in data_dict["season"]:
                url= filter(lambda l: l["season"]== season['number'],data_dict["link"]) #filtramos enlaces por temporada
                if url:
                    if season['overview']: infoLabels['plot']=season['overview']
                    if season['number']: infoLabels['season']=season['number']
                    if season["poster"]: item.thumbnail=re.compile("/w\d{3}/").sub("/w500/",season["poster"])
                    if season["release_date"]: infoLabels['premiered']= season['release_date']

                    item.infoLabels = infoLabels
                    title=item.title + ' ' + season["title"].lower().replace('season','temporada').capitalize()
                    
                    itemlist.append( Item( channel=item.channel, action="get_episodios", title=title, url=str(url),
                                           extra=item.extra, fanart=item.fanart, text_color="0xFFFFCE9C",
                                           thumbnail=item.thumbnail, viewmode="movie_with_plot",
                                           infoLabels=item.infoLabels) )
            
            if config.get_library_support() and itemlist:
                url= urlparse.urljoin(__url_base__,"episodio-online/" + str(data_dict['id']))
                itemlist.append( Item(channel=item.channel,
                                      title="Añadir esta serie a la biblioteca", url=url,
                                      action="add_serie_to_library", extra='episodios###serie_add',
                                      show= data_dict["title"], text_color="0xFFe5ffcc",
                                      thumbnail = 'https://raw.githubusercontent.com/master-1970/resources/master/images/channels/pepecine/tv.png'))

    return itemlist      
Beispiel #46
0
def printMatches(matches):
    i = 0
    for match in matches:
        logger.info("pelisalacarta.core.scrapertools %d %s" % (i, match))
        i = i + 1
def episodios(item):
    logger.info("streamondemand.channels.guardaserie episodios")

    item.title = item.fulltitle

    itemlist = []

    ## Descarga la página
    data = re.sub(
        r'\t|\n|\r',
        '',
        scrapertools.anti_cloudflare(item.url, headers)
    )

    serie_id = scrapertools.get_match(data, 'id=([^"]+)" rel="nofollow" target="_blank"')

    data = scrapertools.get_match(data, '<div id="episode">(.*?)</div>')

    seasons_episodes = re.compile('<select name="episode" id="(\d+)">(.*?)</select>', re.DOTALL).findall(data)

    for scrapedseason, scrapedepisodes in seasons_episodes:

        episodes = re.compile('<option value="(\d+)"', re.DOTALL).findall(scrapedepisodes)
        for scrapedepisode in episodes:

            season = str(int(scrapedseason) + 1)
            episode = str(int(scrapedepisode) + 1)
            if len(episode) == 1: episode = "0" + episode

            title = season + "x" + episode + " - " + item.show

            # Le pasamos a 'findvideos' la url con tres partes divididas por el caracter "?"
            # [host+path]?[argumentos]?[Referer]
            url = host + "/wp-admin/admin-ajax.php?action=get_episode&id=" + serie_id + "&season=" + scrapedseason + "&episode=" + scrapedepisode + "?" + item.url

            itemlist.append(
                Item(channel=__channel__,
                     action="findvideos",
                     title=title,
                     url=url,
                     fulltitle=title,
                     show=item.show,
                     thumbnail=item.thumbnail))

    if config.get_library_support():
        itemlist.append(
            Item(channel=__channel__,
                 title="[COLOR azure]Aggiungi [/COLOR]" + item.title + "[COLOR azure] alla libreria di Kodi[/COLOR]",
                 url=item.url,
                 action="add_serie_to_library",
                 extra="episodios",
                 show=item.show))
        itemlist.append(
            Item(channel=__channel__,
                 title="[COLOR azure]Scarica tutti gli episodi della serie[/COLOR]",
                 url=item.url,
                 action="download_all_episodes",
                 extra="episodios",
                 show=item.show))

    return itemlist
Beispiel #48
0
def get_only_episodio(item):
    logger.info()
    itemlist = []
    plot={}
    
    data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)","",httptools.downloadpage(item.url).data)
    patron ='vars.title =(.*?)};'
    try:
        logger.debug(scrapertools.get_match(data,patron) +'}')
        data_dict= jsontools.load_json(scrapertools.get_match(data,patron) +'}')
    except:
        return itemlist # Devolvemos lista vacia

    try:
        from core.tmdb import Tmdb
        oTmdb= Tmdb(id_Tmdb= data_dict['tmdb_id'],tipo="tv")
    except:
        pass

    infoLabels = item.infoLabels
    if data_dict.has_key("actor"):
        cast=[]
        rol=[]
        for actor in data_dict["actor"]:
            cast.append(actor['name'])
            rol.append(actor['pivot']['char_name'])
        infoLabels['cast'] = cast
        infoLabels['castandrole'] = zip(cast, rol)

    if data_dict.has_key("writer"):
        writers_list=[]
        for writer in data_dict["writer"]:
            writers_list.append(writer['name'])
        infoLabels['writer'] = ", ".join(writers_list)

    if data_dict.has_key("director"):
        director_list=[]
        for director in data_dict["director"]:
            director_list.append(director['name'])
        infoLabels['director'] = ", ".join(director_list)


    infoLabels['season'], infoLabels['episode']= item.extra.split('x')
    try:
        # añadimos sinopsis e imagenes del capitulo
        datos_tmdb=oTmdb.get_episodio(temporada= infoLabels['season'],capitulo= infoLabels['episode'])
        if datos_tmdb["episodio_sinopsis"] !="": infoLabels['plot']= datos_tmdb["episodio_sinopsis"]
        if datos_tmdb["episodio_imagen"] !="": item.thumbnail= datos_tmdb["episodio_imagen"]
        #if datos_tmdb["episodio_titulo"] !="": title = title + " [COLOR 0xFFFFE6CC]" + datos_tmdb["episodio_titulo"].replace('\t','') + "[/COLOR]"
    except:
            pass
    
    def cap(l): 
        try:
            temporada_link = int(l["season"])
            capitulo_link = int(l['episode'])
        except:
            return False
        return True if temporada_link== int(infoLabels['season'])  and capitulo_link == int(infoLabels['episode']) else False    

    item.url= str(filter(cap, data_dict["link"])) #filtramos enlaces por capitulo

    item.infoLabels = infoLabels
    item.extra=str(data_dict['tmdb_id'])
    
    return findvideos(item)
def findvideos(item):
    logger.info("[divxonline.py] findvideos(%s)" % item.tostring())
    itemlist = []

    # Descarga la página
    data = scrapertools.cachePage(item.url.replace("pelicula",
                                                   "pelicula-divx"))
    patron = '<table class="parrillaDescargas">(.*?)</table>'
    data = scrapertools.get_match(data, patron)
    '''
    <td class="numMirror"><img src="http://webs.ono.com/divx/img/filmes1.png" align="middle" alt="Ver online" title="Ver online" /> <a target="_blank" href="/video/40-putlocker/82381-007-Al-servicio-secreto-de-su-Majestad-1969.html"> <b>1</ b> <img src="http://webs.ono.com/divx/img/flecha.png" align="middle" /></a></td>
    <td class="hostParrilla"><a target="_blank" href="/video/40-putlocker/82381-007-Al-servicio-secreto-de-su-Majestad-1969.html"><img src="http://imagenes.divxonline.info/logos_servers/40.jpg" height="23" alt="Host" title="Host" /></a></td>
    <td class="idiomaParrilla"><a target="_blank" href="/video/40-putlocker/82381-007-Al-servicio-secreto-de-su-Majestad-1969.html"><img src="http://imagenes.divxonline.info/idiomas/1.png" alt="Audio" title="Audio" /></a></td>
    <td class="partesParrilla"><a target="_blank" href="/video/40-putlocker/82381-007-Al-servicio-secreto-de-su-Majestad-1969.html">1</a></td>
    <td class="uploaderParrilla"><a target="_blank" href="/video/40-putlocker/82381-007-Al-servicio-secreto-de-su-Majestad-1969.html">anonimo</a></td>
    '''
    patron = '<td class="numMirror">.*?</td>[^<]+'
    patron += '<td class="hostParrilla"><a target="_blank" href="([^"]+)"><img src="([^"]+)"'

    matches = re.compile(patron, re.DOTALL).findall(data)
    for url, thumbnail in matches:
        scrapedurl = urlparse.urljoin(item.url, url)
        scrapedtitle = url
        try:
            scrapedtitle = scrapedtitle.split("/")[2]
        except:
            pass

        scrapedtitle = "Ver online " + scrapedtitle
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 title=scrapedtitle,
                 fulltitle=item.title,
                 url=scrapedurl,
                 thumbnail=thumbnail,
                 plot=item.plot,
                 folder=False))

    # Descarga la página
    data = scrapertools.cachePage(
        item.url.replace("pelicula", "descarga-directa"))
    patron = '<table class="parrillaDescargas">(.*?)</table>'
    data = scrapertools.get_match(data, patron)

    patron = '<td class="numMirror">.*?</td>[^<]+'
    patron += '<td class="hostParrilla"><a target="_blank" href="([^"]+)"><img src="([^"]+)"'

    matches = re.compile(patron, re.DOTALL).findall(data)
    for url, thumbnail in matches:
        scrapedurl = urlparse.urljoin(item.url, url)
        scrapedtitle = url
        try:
            scrapedtitle = scrapedtitle.split("/")[2]
        except:
            pass

        scrapedtitle = "Descarga directa " + scrapedtitle
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 title=scrapedtitle,
                 fulltitle=item.title,
                 url=scrapedurl,
                 thumbnail=thumbnail,
                 plot=item.plot,
                 folder=False))

    return itemlist
Beispiel #50
0
def listado(item):
    logger.info()
    itemlist = []

    try:
        data_dict = jsontools.load_json(httptools.downloadpage(item.url).data)
    except:
        return itemlist # Devolvemos lista vacia

    #Filtrado y busqueda
    if item.filtro:
        for i in data_dict["result"][:]:
            if (item.filtro[0] == "genero" and item.filtro[1] not in i['genre'].lower()) or \
                (item.filtro[0] == "search" and item.filtro[1] not in i['title'].lower()):
                    data_dict["result"].remove(i)


    if not item.page:
        item.page = 0

    offset= int(item.page) * 60
    limit= offset + 60
       
    for i in data_dict["result"][offset:limit]:
        infoLabels = InfoLabels()
        idioma = ''

        if item.extra == "movie":
            action= "get_movie"
            #viewcontent = 'movies'
            infoLabels["title"]= i["title"]
            title= '%s (%s)' % (i["title"], i['year'] )
            url= urlparse.urljoin(__url_base__,"ver-pelicula-online/" + str(i["id"]))

        elif item.extra=="series": 
            action="get_temporadas"
            #viewcontent = 'seasons'
            title= i["title"]
            infoLabels['tvshowtitle']= i["title"]
            url= urlparse.urljoin(__url_base__,"episodio-online/" + str(i["id"]))

        else: #item.extra=="series_novedades": 
            action="get_only_episodio"
            #viewcontent = 'episodes'
            infoLabels['season']=i['season']
            infoLabels['episode']=i['episode'].zfill(2)
            item.extra= "%sx%s" %(infoLabels["season"], infoLabels["episode"])
            infoLabels['tvshowtitle']= i["title"]
            flag= scrapertools.find_single_match(i["label"],'(\s*\<img src=.*\>)')
            idioma=i["label"].replace(flag,"")
            title = '%s %s (%s)' %(i["title"], item.extra, idioma)
            url= urlparse.urljoin(__url_base__,"episodio-online/" + str(i["id"]))
        
        if i.has_key("poster") and i["poster"]: 
            thumbnail=re.compile("/w\d{3}/").sub("/w500/",i["poster"])
        else:
            thumbnail= item.thumbnail
        if i.has_key("background") and i["background"]: 
            fanart= i["background"]
        else:
            fanart= item.fanart
        
        # Rellenamos el diccionario de infoLabels
        infoLabels['title_id']=i['id'] # title_id: identificador de la pelicula/serie en pepecine.com
        infoLabels['titleraw']= i["title"] # titleraw: titulo de la pelicula/serie sin formato
        if i['genre']: infoLabels['genre']=i['genre']
        if i['year']: infoLabels['year']=i['year']
        if i['tagline']: infoLabels['plotoutline']=i['tagline']
        if i['plot']: 
            infoLabels['plot']=i['plot']
        else:
            infoLabels['plot']=""
        if i['runtime']: infoLabels['duration']=int(i['runtime'])*60
        if i['imdb_rating']:
            infoLabels['rating']=i['imdb_rating']
        elif i['tmdb_rating']:
            infoLabels['rating']=i['tmdb_rating']
        if i['tmdb_id']: infoLabels['tmdb_id'] = i['tmdb_id']
        if i['imdb_id']: infoLabels['imdb_id'] = i['imdb_id']



        newItem = Item(channel=item.channel, action=action, title=title, url=url, extra=item.extra,
                         fanart=fanart, thumbnail=thumbnail, viewmode="movie_with_plot", #viewcontent=viewcontent,
                         language=idioma, text_color="0xFFFFCE9C", infoLabels=infoLabels)
        newItem.year=i['year']
        newItem.contentTitle=i['title']
        if 'season' in infoLabels and infoLabels['season']:
            newItem.contentSeason = infoLabels['season']
        if 'episode' in infoLabels and infoLabels['episode']:
            newItem.contentEpisodeNumber = infoLabels['episode']
        itemlist.append(newItem)
    
    # Paginacion
    if len(data_dict["result"]) > limit:
        itemlist.append(item.clone(text_color="0xFF994D00", title=">> Pagina siguiente >>", page=item.page + 1) )
    
    return itemlist      
Beispiel #51
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("streamondemand.servers.flashx url=" + page_url)

    # Lo pide una vez
    data = scrapertools.downloadpageWithoutCookies(page_url)
    # Si salta aviso, se carga la pagina de comprobacion y luego la inicial
    if "You try to access this video with Kodi" in data:
        url_reload = scrapertools.find_single_match(
            data, 'try to reload the page.*?href="([^"]+)"')
        url_reload = "http://www.flashx.tv" + url_reload[1:]
        try:
            data = scrapertools.downloadpageWithoutCookies(url_reload)
            data = scrapertools.downloadpageWithoutCookies(page_url)
        except:
            pass

    matches = scrapertools.find_multiple_matches(
        data, "<script type='text/javascript'>(.*?)</script>")
    for n, m in enumerate(matches):
        if m.startswith("eval"):
            try:
                m = jsunpack.unpack(m)
                fake = (scrapertools.find_single_match(m, "(\w{40,})") == "")
                if fake:
                    m = ""
                else:
                    break
            except:
                m = ""
    match = m
    if not "sources:[{file:" in match:
        page_url = page_url.replace("playvid-", "")

        headers = {
            'Host': 'www.flashx.tv',
            'User-Agent':
            'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/537.36',
            'Accept':
            'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'en-US,en;q=0.5',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
            'Cookie': ''
        }
        data = scrapertools.downloadpage(page_url, headers=headers.items())
        flashx_id = scrapertools.find_single_match(
            data, 'name="id" value="([^"]+)"')
        fname = scrapertools.find_single_match(data,
                                               'name="fname" value="([^"]+)"')
        hash_f = scrapertools.find_single_match(data,
                                                'name="hash" value="([^"]+)"')
        post = 'op=download1&usr_login=&id=%s&fname=%s&referer=&hash=%s&imhuman=Proceed+to+video' % (
            flashx_id, urllib.quote(fname), hash_f)
        wait_time = scrapertools.find_single_match(data,
                                                   "<span id='xxc2'>(\d+)")

        file_id = scrapertools.find_single_match(data, "'file_id', '([^']+)'")
        coding_url = 'https://files.fx.fastcontentdelivery.com/jquery2.js?fx=%s' % base64.encodestring(
            file_id)
        headers['Host'] = "files.fx.fastcontentdelivery.com"
        headers['Referer'] = "https://www.flashx.tv/"
        headers['Accept'] = "*/*"
        coding = scrapertools.downloadpage(coding_url, headers=headers.items())

        coding_url = 'https://www.flashx.tv/counter.cgi?fx=%s' % base64.encodestring(
            file_id)
        headers['Host'] = "www.flashx.tv"
        coding = scrapertools.downloadpage(coding_url, headers=headers.items())

        coding_url = 'https://www.flashx.tv/flashx.php?fxfx=3'
        headers['X-Requested-With'] = 'XMLHttpRequest'
        coding = scrapertools.downloadpage(coding_url, headers=headers.items())

        try:
            time.sleep(int(wait_time) + 1)
        except:
            time.sleep(6)

        headers.pop('X-Requested-With')
        headers['Content-Type'] = 'application/x-www-form-urlencoded'
        data = scrapertools.downloadpage('https://www.flashx.tv/dl?playthis',
                                         post=post,
                                         headers=headers.items())

        matches = scrapertools.find_multiple_matches(
            data, "(eval\(function\(p,a,c,k.*?)\s+</script>")
        for match in matches:
            if match.startswith("eval"):
                try:
                    match = jsunpack.unpack(match)
                    fake = (scrapertools.find_single_match(match,
                                                           "(\w{40,})") == "")
                    if fake:
                        match = ""
                    else:
                        break
                except:
                    match = ""

        if not match:
            match = data

    # Extrae la URL
    # {file:"http://f11-play.flashx.tv/luq4gfc7gxixexzw6v4lhz4xqslgqmqku7gxjf4bk43u4qvwzsadrjsozxoa/video1.mp4"}
    video_urls = []
    media_urls = scrapertools.find_multiple_matches(
        match, '\{file\:"([^"]+)",label:"([^"]+)"')
    subtitle = ""
    for media_url, label in media_urls:
        if media_url.endswith(".srt") and label == "Italian":
            try:
                from core import filetools
                data = scrapertools.downloadpage(media_url)
                subtitle = os.path.join(config.get_data_path(),
                                        'sub_flashx.srt')
                filetools.write(subtitle, data)
            except:
                import traceback
                logger.info(
                    "streamondemand.servers.flashx Error al descargar el subtítulo: "
                    + traceback.format_exc())

    for media_url, label in media_urls:
        if not media_url.endswith("png") and not media_url.endswith(".srt"):
            video_urls.append([
                "." + media_url.rsplit('.', 1)[1] + " [flashx]", media_url, 0,
                subtitle
            ])

    for video_url in video_urls:
        logger.info("streamondemand.servers.flashx %s - %s" %
                    (video_url[0], video_url[1]))

    return video_urls
def play(item):
    logger.info("[divxonline.py] play")
    itemlist = []
    data = scrapertools.cachePage(item.url)
    logger.info("data=" + data)

    logger.info(
        "***********************************************************************************************************************"
    )
    patron = "decodeBase64\('(.+?)'\)"
    matches = re.compile(patron, re.DOTALL).findall(data)
    if len(matches) > 0:
        cadena = matches[0]
        validchars = " ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890!#$%&'()-@[]^_`{}~.<>"
        cadena = ''.join(c for c in cadena if c in validchars)
        logger.info(cadena)

    data = decryptinks(data)
    logger.info(
        "***********************************************************************************************************************"
    )
    logger.info(data)
    logger.info(
        "***********************************************************************************************************************"
    )
    logger.info(
        "***********************************************************************************************************************"
    )
    logger.info(
        "***********************************************************************************************************************"
    )
    logger.info(
        "***********************************************************************************************************************"
    )
    itemlist = servertools.find_video_items(data=data)
    i = 1
    for videoitem in itemlist:
        videoitem.title = "Mirror %d%s" % (i, videoitem.title)
        videoitem.fulltitle = item.fulltitle
        videoitem.channel = channel = __channel__
        i = i + 1

    return itemlist
Beispiel #53
0
def mainlist(item):
    logger.info("streamondemand-pureita altadefinizione01_wiki mainlist")
    itemlist = [
        Item(
            channel=__channel__,
            title="[COLOR azure]Film[COLOR orange] - Al Cinema[/COLOR]",
            action="peliculas_new",
            url=host,
            extra="movie",
            thumbnail=
            "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/popcorn_cinema_P.png"
        ),
        Item(
            channel=__channel__,
            title="[COLOR azure]Film[COLOR orange] - Raccomandati[/COLOR]",
            action="peliculas_top",
            url=host,
            extra="movie",
            thumbnail=
            "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/movies_P.png"
        ),
        Item(
            channel=__channel__,
            title="[COLOR azure]Film[COLOR orange] - Aggiornati[/COLOR]",
            action="peliculas_topnew",
            url=host,
            extra="movie",
            thumbnail=
            "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/movies_P.png"
        ),
        Item(
            channel=__channel__,
            title="[COLOR azure]Film[COLOR orange] - Novita'[/COLOR]",
            action="peliculas",
            url=host + "page/2/",
            extra="movie",
            thumbnail=
            "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/movie_new_P.png"
        ),
        Item(
            channel=__channel__,
            title="[COLOR azure]Film[COLOR orange] - Categorie[/COLOR]",
            action="categorias",
            url=host,
            thumbnail=
            "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/genres_P.png"
        ),
        #Item(channel=__channel__,
        #title="[COLOR azure]Film[COLOR orange] - Anno[/COLOR]",
        #action="categorias_years",
        #url=host,
        #thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/movie_year_P.png"),
        #Item(channel=__channel__,
        #title="[COLOR azure]Film[COLOR orange] - Paese[/COLOR]",
        #action="categorias_country",
        #url=host,
        #thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/movie_country_P.png"),
        Item(
            channel=__channel__,
            title="[COLOR yellow]Cerca ...[/COLOR]",
            action="search",
            thumbnail=
            "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/search_P.png"
        )
    ]

    return itemlist
def movielist(item):  # pelis sin ficha (en listados por género)
    logger.info("[divxonline.py] movielist")
    itemlist = []

    # Descarga la página
    data = scrapertools.cachePage(item.url)
    #logger.info(data)

    data = stepinto(item.url, data, 'Ver página:(.*?)</p>')

    # Extrae las entradas (carpetas)
    patronvideos = '<li><h2><a href="([^"]+?)">(.*?)</a>'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    #scrapertools.printMatches(matches)

    if (Generate):
        f = open(config.DATA_PATH + '/films.tab',
                 'w')  # fichero para obtener las notas

    for match in matches:
        # Titulo
        scrapedtitle = remove_html_tags(match[1])
        if (not Generate and Notas):
            score = anotador.getscore(remove_html_tags(match[1]))
            if (score != ""):
                scrapedtitle += " " + score

        # URL
        scrapedurl = urlparse.urljoin(item.url,
                                      match[0])  # url de la ficha divxonline
        scrapedurl = scrapedurl.replace(
            "pelicula", "pelicula-divx")  # url de la página de reproducción

        # Thumbnail
        #scrapedthumbnail = urlparse.urljoin(url,match[1])
        scrapedthumbnail = ""

        # procesa el resto
        scrapedplot = ""

        # Depuracion
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")

        if (Generate):
            sanio = re.search('(.*?)\((.*?)\)', scrapedtitle)
            if (sanio):  # si hay anio
                fareg = sanio.group(1) + "\t" + sanio.group(
                    2) + "\t" + scrapedtitle
            else:
                fareg = scrapedtitle + "\t\t" + scrapedtitle
            f.write(fareg + "\n")

        # Añade al listado de XBMC
        itemlist.append(
            Item(channel=__channel__,
                 action="findvideos",
                 title=scrapedtitle,
                 fulltitle=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 folder=True))

    if (Generate):
        f.close()

    return itemlist
def episodios(item):
    def load_episodios(html, item, itemlist, lang_title):
        for data in scrapertools.decodeHtmlentities(html).splitlines():
            # Extrae las entradas
            end = data.find('<a ')
            if end > 0:
                scrapedtitle = re.sub(r'<[^>]*>', '', data[:end]).strip()
            else:
                scrapedtitle = ''
            if scrapedtitle == '':
                patron = '<a\s*href="[^"]+"\s*target="_blank">(.*?)</a>'
                scrapedtitle = scrapertools.find_single_match(data, patron)
                scrapedtitle = re.sub(r'<[^>]*>', '', scrapedtitle).strip()
            title = scrapertools.find_single_match(scrapedtitle,
                                                   '\d+[^\d]+\d+')
            if title == '':
                title = scrapedtitle
            if title != '':
                itemlist.append(
                    Item(channel=__channel__,
                         action="findvid_serie",
                         title=title + " (" + lang_title + ")",
                         url=item.url,
                         thumbnail=item.thumbnail,
                         extra=data,
                         fulltitle=item.fulltitle,
                         show=item.show))

    logger.info("streamondemand.liberoita episodios")

    itemlist = []

    # Descarga la página
    data = scrapertools.cache_page(item.url)

    lang_titles = []
    starts = []
    patron = r"STAGIONE.*?ITA"
    matches = re.compile(patron, re.IGNORECASE | re.DOTALL).finditer(data)
    for match in matches:
        season_title = match.group()
        if season_title != '':
            lang_titles.append('SUB ITA' if 'SUB' in
                               season_title.upper() else 'ITA')
            starts.append(match.end())

    i = 1
    len_lang_titles = len(lang_titles)

    while i <= len_lang_titles:
        inizio = starts[i - 1]
        fine = starts[i] if i < len_lang_titles else -1

        html = data[inizio:fine]
        lang_title = lang_titles[i - 1]

        load_episodios(html, item, itemlist, lang_title)

        i += 1

    if config.get_library_support() and len(itemlist) != 0:
        itemlist.append(
            Item(channel=__channel__,
                 title=item.title,
                 url=item.url,
                 action="add_serie_to_library",
                 extra="episodios",
                 show=item.show))
        itemlist.append(
            Item(channel=item.channel,
                 title="Scarica tutti gli episodi della serie",
                 url=item.url,
                 action="download_all_episodes",
                 extra="episodios",
                 show=item.show))

    return itemlist
Beispiel #56
0
def find_videos(data):
    encontrados = set()
    devuelve = []

    # http://netu.tv/player/embed_player.php?vid=82U4BRSOB4UU&autoplay=no
    patronvideos = 'netu.tv/player/embed_player.php\?vid\=([A-Z0-9]+)'
    logger.info("[netutv.py] find_videos #" + patronvideos + "#")
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    for match in matches:
        titulo = "[netu.tv]"
        url = "http://netu.tv/watch_video.php?v=" + match
        if url not in encontrados:
            logger.info("  url=" + url)
            devuelve.append([titulo, url, 'netutv'])
            encontrados.add(url)
        else:
            logger.info("  url duplicada=" + url)

    # http://netu.tv/watch_video.php?v=96WDAAA71A8K
    patronvideos = 'netu.tv/watch_video.php\?v\=([A-Z0-9]+)'
    logger.info("[netutv.py] find_videos #" + patronvideos + "#")
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    for match in matches:
        titulo = "[netu.tv]"
        url = "http://netu.tv/watch_video.php?v=" + match
        if url not in encontrados:
            logger.info("  url=" + url)
            devuelve.append([titulo, url, 'netutv'])
            encontrados.add(url)
        else:
            logger.info("  url duplicada=" + url)

    return devuelve
def episodios(item):
    logger.info("[seriesdanko.py] episodios")

    if config.get_platform() == "xbmc" or config.get_platform(
    ) == "xbmcdharma":
        import xbmc
        if config.get_setting("forceview") == "true":
            xbmc.executebuiltin("Container.SetViewMode(53)")  #53=icons
            #xbmc.executebuiltin("Container.Content(Movies)")

    if "|" in item.url:
        url = item.url.split("|")[0]
        sw = True
    else:
        url = item.url
        sw = False
    # Descarga la página
    if item.extra:

        contenidos = item.extra
        #print contenidos
    else:
        data = scrapertools.downloadpageWithoutCookies(url)

        # Extrae las entradas
        if sw:
            try:
                datadict = eval("(" + data + ")")
                data = urllib.unquote_plus(
                    datadict["entry"]["content"]["$t"].replace("\\u00", "%"))
                matches = []
                matches.append(data)
            except:
                matches = []
        else:
            patronvideos = "entry-content(.*?)<div class='blog-pager' id='blog-pager'>"
            matches = re.compile(patronvideos, re.DOTALL).findall(data)

        if len(matches) > 0:
            contenidos = matches[0].replace('"', "'").replace("\n", "")
        else:
            contenidos = item.url
            if sw:
                url = item.url.split("|")[1]
                if not url.startswith("http://"):
                    url = urlparse.urljoin("http://seriesdanko.com", url)
                # Descarga la página
                data = scrapertools.downloadpageGzip(url)
                patronvideos = "entry-content(.*?)<div class='post-footer'>"
                matches = re.compile(patronvideos, re.DOTALL).findall(data)
                if len(matches) > 0:
                    contenidos = matches[0]

    patronvideos = "<a href='([^']+)'>([^<]+)</a> <img(.+?)/>"
    matches = re.compile(patronvideos,
                         re.DOTALL).findall(contenidos.replace('"', "'"))
    #print contenidos
    try:
        plot = re.compile(r'(Informac.*?/>)</div>').findall(contenidos)[0]
        if len(plot) == 0:
            plot = re.compile(r"(Informac.*?both;'>)</div>").findall(
                contenidos)[0]
        plot = re.sub('<[^>]+>', " ", plot)
    except:
        plot = ""

    itemlist = []
    for match in matches:
        scrapedtitle = match[1].replace("\n", "").replace("\r", "")
        scrapedtitle = scrapertools.remove_show_from_title(
            scrapedtitle, item.show)

        #[1x01 - Capitulo 01]
        #patron = "(\d+x\d+) - Capitulo \d+"
        #matches = re.compile(patron,re.DOTALL).findall(scrapedtitle)
        #print matches
        #if len(matches)>0 and len(matches[0])>0:
        #    scrapedtitle = matches[0]

        if "es.png" in match[2]:
            subtitle = " (Español)"
        elif "la.png" in match[2]:
            subtitle = " (Latino)"
        elif "vo.png" in match[2]:
            subtitle = " (Version Original)"
        elif "vos.png" in match[2]:
            subtitle = " (Subtitulado)"
        elif "ca.png" in match[2]:
            subtitle = " (Catalan)"
        elif "ga.jpg" in match[2]:
            subtitle = " (Gallego)"
        elif "eu.jpg" in match[2]:
            subtitle = " (Euskera)"
        elif "ba.png" in match[2]:
            subtitle = " (Bable)"
        else:
            subtitle = ""
        scrapedplot = plot
        scrapedurl = urlparse.urljoin(item.url,
                                      match[0]).replace("\n",
                                                        "").replace("\r", "")
        if not item.thumbnail:
            try:
                scrapedthumbnail = re.compile(r"src=([^']+)'").findall(
                    contenidos)[0]
            except:
                scrapedthumbnail = ""
        else:
            scrapedthumbnail = item.thumbnail
        scrapedthumbnail = scrapedthumbnail.replace("\n", "").replace("\r", "")
        if item.fulltitle == '':
            item.fulltitle = scrapedtitle + subtitle
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")

        # Añade al listado de XBMC
        itemlist.append(
            Item(channel=__channel__,
                 action="findvideos",
                 title=scrapedtitle + subtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 fulltitle=item.fulltitle,
                 show=item.show,
                 context="4",
                 folder=True))

    #xbmc.executebuiltin("Container.Content(Movies)")

    if len(itemlist) == 0:
        listvideos = servertools.findvideos(contenidos)

        for title, url, server in listvideos:

            if server == "youtube":
                scrapedthumbnail = "http://i.ytimg.com/vi/" + url + "/0.jpg"
            else:
                scrapedthumbnail = item.thumbnail
            scrapedtitle = title
            scrapedplot = ""
            scrapedurl = url

            if (DEBUG):
                logger.info("title=[" + scrapedtitle + "], url=[" +
                            scrapedurl + "], thumbnail=[" + scrapedthumbnail +
                            "]")

            # Añade al listado de XBMC
            itemlist.append(
                Item(channel=__channel__,
                     action="play",
                     server=server,
                     title=item.title + " " + scrapedtitle,
                     url=scrapedurl,
                     thumbnail=scrapedthumbnail,
                     plot=scrapedplot,
                     fulltitle=scrapedtitle,
                     folder=False))

    return itemlist
def episodios(item):
    logger.info()

    itemlist = []
    data = httptools.downloadpage(item.url).data
    # obtener el numero total de episodios
    total_episode = 0

    patron_caps = '<li><span>Capitulo ([^"]+)\:<\/span><[^"]+"(.+?)">([^"]+)<[^"]+<\/li>'
    matches = scrapertools.find_multiple_matches(data, patron_caps)
    # data_info = scrapertools.find_single_match(data, '<div class="info">.+?<\/div><\/div>')
    patron_info = '<img src="([^"]+)">.+?<\/span>([^"]+)<\/p><p><span>I.+?Reseña: <\/span>(.+?)<\/p><\/div>'
    scrapedthumbnail, show, scrapedplot = scrapertools.find_single_match(
        data, patron_info)
    scrapedthumbnail = host + scrapedthumbnail

    for cap, link, name in matches:

        title = ""
        pat = "as/sd"
        # varios episodios en un enlace
        if len(name.split(pat)) > 1:
            i = 0
            for pos in name.split(pat):
                i = i + 1
                total_episode += 1
                season, episode = renumbertools.numbered_for_tratk(
                    item.channel, item.show, 1, total_episode)
                if len(name.split(pat)) == i:
                    title += "{0}x{1:02d} ".format(season, episode)
                else:
                    title += "{0}x{1:02d}_".format(season, episode)
        else:
            total_episode += 1
            season, episode = renumbertools.numbered_for_tratk(
                item.channel, item.show, 1, total_episode)

            title += "{0}x{1:02d} ".format(season, episode)

        url = host + "/" + link
        if "disponible" in link:
            title += "No Disponible aún"
        else:
            title += name
            itemlist.append(
                Item(channel=item.channel,
                     action="findvideos",
                     title=title,
                     url=url,
                     show=show,
                     plot=scrapedplot,
                     thumbnail=scrapedthumbnail))

    if config.get_library_support() and len(itemlist) > 0:
        itemlist.append(
            Item(channel=item.channel,
                 title="Añadir esta serie a la biblioteca de Kodi",
                 url=item.url,
                 action="add_serie_to_library",
                 extra="episodios",
                 show=show))

    return itemlist
Beispiel #59
0
def find_videos(text):
    encontrados = set()
    devuelve = []

    # http://www.peliculasaudiolatino.com/show/firedrive.php?url=CEE0B3A7DDFED758
    patronvideos = '(?:firedrive|putlocker).php\?url=([A-Z0-9]+)'
    logger.info("[firedrive.py] find_videos #" + patronvideos + "#")
    matches = re.compile(patronvideos, re.DOTALL).findall(text)

    for match in matches:
        titulo = "[firedrive]"
        url = "http://www.firedrive.com/embed/" + match
        if url not in encontrados:
            logger.info("  url=" + url)
            devuelve.append([titulo, url, 'firedrive'])
            encontrados.add(url)
        else:
            logger.info("  url duplicada=" + url)

    # http://www.firedrive.com/embed/CEE0B3A7DDFED758 | http://www.firedrive.com/file/CEE0B3A7DDFED758
    patronvideos = '(?:firedrive|putlocker).com/(?:file|embed)/([A-Z0-9]+)'
    logger.info("[firedrive.py] find_videos #" + patronvideos + "#")
    matches = re.compile(patronvideos, re.DOTALL).findall(text)

    for match in matches:
        titulo = "[firedrive]"
        url = "http://www.firedrive.com/embed/" + match
        if url not in encontrados:
            logger.info("  url=" + url)
            devuelve.append([titulo, url, 'firedrive'])
            encontrados.add(url)
        else:
            logger.info("  url duplicada=" + url)

    #//www.cinezer.com/firedrive/CD6003D971725774
    patronvideos = '/(?:firedrive|putlocker)/([A-Z0-9]+)'
    logger.info("[firedrive.py] find_videos #" + patronvideos + "#")
    matches = re.compile(patronvideos, re.DOTALL).findall(text)

    for match in matches:
        titulo = "[firedrive]"
        url = "http://www.firedrive.com/embed/" + match
        if url not in encontrados:
            logger.info("  url=" + url)
            devuelve.append([titulo, url, 'firedrive'])
            encontrados.add(url)
        else:
            logger.info("  url duplicada=" + url)

    #http://www.firedrive.ch/file/0e6f1eeb473e0d87b390a71cd50c24a2/
    patronvideos = '((?:firedrive|putlocker).ch/file/[a-z0-9]+)'
    logger.info("[firedrive.py] find_videos #" + patronvideos + "#")
    matches = re.compile(patronvideos, re.DOTALL).findall(text)

    for match in matches:
        titulo = "[firedrive]"
        url = "http://www." + match + "/"
        if url not in encontrados:
            logger.info("  url=" + url)
            devuelve.append([titulo, url, 'firedrive'])
            encontrados.add(url)
        else:
            logger.info("  url duplicada=" + url)

    #http://www.player3k.info/firedrive/?id=92FA671A11CA7A05
    patronvideos = '/(?:firedrive|putlocker)/\?id\=([A-Z0-9]+)'
    logger.info("[firedrive.py] find_videos #" + patronvideos + "#")
    matches = re.compile(patronvideos, re.DOTALL).findall(text)

    for match in matches:
        titulo = "[firedrive]"
        url = "http://www.firedrive.com/embed/" + match
        if url not in encontrados:
            logger.info("  url=" + url)
            devuelve.append([titulo, url, 'firedrive'])
            encontrados.add(url)
        else:
            logger.info("  url duplicada=" + url)

    #http://www.yaske.net/archivos/firedrive/play.php?v=D68E78CBA144AE59
    patronvideos = '(?:firedrive|putlocker)/play.php\?v\=([A-Z0-9]+)'
    logger.info("[firedrive.py] find_videos #" + patronvideos + "#")
    matches = re.compile(patronvideos, re.DOTALL).findall(text)

    for match in matches:
        titulo = "[firedrive]"
        url = "http://www.firedrive.com/embed/" + match
        if url not in encontrados:
            logger.info("  url=" + url)
            devuelve.append([titulo, url, 'firedrive'])
            encontrados.add(url)
        else:
            logger.info("  url duplicada=" + url)

    #http://www.cinetux.org/video/firedrive.php?id=31A2C1B48C5F8969
    patronvideos = '(?:firedrive|putlocker).php\?id\=([A-Z0-9]+)'
    logger.info("[firedrive.py] find_videos #" + patronvideos + "#")
    matches = re.compile(patronvideos, re.DOTALL).findall(text)

    for match in matches:
        titulo = "[firedrive]"
        url = "http://www.firedrive.com/embed/" + match
        if url not in encontrados:
            logger.info("  url=" + url)
            devuelve.append([titulo, url, 'firedrive'])
            encontrados.add(url)
        else:
            logger.info("  url duplicada=" + url)

    return devuelve
def findvideos(item):
    logger.info("[seriesdanko.py] findvideos")

    # Descarga la página
    if config.get_platform() == "xbmceden":
        from core.subtitletools import saveSubtitleName
        saveSubtitleName(item)

    if "seriesdanko.com" in item.url:
        data = scrapertools.downloadpageGzip(item.url).replace("\n", "")
        patronvideos = "<tr><td class=('tam12'>.*?)</td></tr>"
        matches = re.compile(patronvideos, re.DOTALL).findall(data)
        #for match in matches:
        #print match
        itemlist = []
        for match in matches:
            try:
                scrapedurl = urlparse.urljoin(
                    item.url,
                    re.compile(r"href='(.+?)'").findall(match)[0])
            except:
                continue

            try:
                scrapedthumbnail = re.compile(r"src='(.+?)'").findall(match)[1]
                if "megavideo" in scrapedthumbnail:
                    mega = " [Megavideo]"
                elif "megaupload" in scrapedthumbnail:
                    mega = " [Megaupload]"
                else:
                    mega = ""
                if not scrapedthumbnail.startswith("http"):
                    scrapedthumbnail = urlparse.urljoin(
                        item.url, scrapedthumbnail)
            except:
                continue
            try:
                subtitle = re.compile(r"src='(.+?)'").findall(match)[0]
                if "es.png" in subtitle:
                    subtitle = " (Español)"
                elif "la.png" in subtitle:
                    subtitle = " (Latino)"
                elif "vo.png" in subtitle:
                    subtitle = " (Version Original)"
                elif "vos.png" in subtitle:
                    subtitle = " (Subtitulado)"
                elif "ca.png" in match[2]:
                    subtitle = " (Catalan)"
                elif "ga.jpg" in match[2]:
                    subtitle = " (Gallego)"
                elif "eu.jpg" in match[2]:
                    subtitle = " (Euskera)"
                elif "ba.png" in match[2]:
                    subtitle = " (Bable)"
                else:
                    subtitle = "(desconocido)"

                try:
                    opcion = re.compile(r"(Ver|Descargar)").findall(match)[0]
                except:
                    opcion = "Ver"

                scrapedtitle = opcion + " video" + subtitle + mega
            except:
                scrapedtitle = item.title
            scrapedplot = ""
            #scrapedthumbnail = item.thumbnail
            #if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

            # Añade al listado de XBMC
            itemlist.append(
                Item(channel=__channel__,
                     action="play",
                     title=scrapedtitle,
                     url=scrapedurl,
                     thumbnail=scrapedthumbnail,
                     plot=scrapedplot,
                     fulltitle=item.fulltitle,
                     extra=item.thumbnail,
                     fanart=item.thumbnail,
                     folder=False))

    else:
        from core import servertools
        itemlist = servertools.find_video_items(item)

    return itemlist