コード例 #1
0
def categorias(item):
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url, headers=headers)
    bloque = scrapertools.get_match(data, '<ul>(.*?)</ul>')

    # Extrae las entradas (carpetas)
    patron = '<a href="([^"]+)" >(.*?)</a>(.*?)\s*</li>'
    matches = re.compile(patron, re.DOTALL).findall(bloque)
    scrapertools.printMatches(matches)

    for scrapedurl, scrapedtitle, scrapedtot in matches:
        scrapedplot = ""
        scrapedthumbnail = ""
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.replace("Animazione", ""))
        scrapedurl = scrapertools.decodeHtmlentities(scrapedurl.replace("%s/category/animazione/" % host, ""))
        if (DEBUG): logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl + "]")
        itemlist.append(
            Item(channel=__channel__,
                 action="pelicat",
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR][COLOR gray]" + scrapedtot + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail="http://xbmc-repo-ackbarr.googlecode.com/svn/trunk/dev/skin.cirrus%20extended%20v2/extras/moviegenres/All%20Movies%20by%20Genre.png",
                 folder=True))

    return itemlist
コード例 #2
0
def episodi(item):
    logger.info("[GuardaSerieOnline.py]==> episodi")
    itemlist = []

    data = scrapertools.anti_cloudflare(item.url, headers=headers)

    patron = r'<img\s*.*?[meta-src|data-original]*="([^"]+)"\s*/>[^>]+>([^<]+)<[^>]+>[^>]+>[^>]+>'
    patron += r'[^>]+>[^>]+>([^<]+)*<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>'
    patron += r'[^>]+>[^>]+>[^>]+>\s*<span\s*.*?(meta-embed="[^"]+">)'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for scrapedthumbnail, scrapedep, scrapedeptitle, scrapedextra in matches:
        scrapedeptitle = scrapertools.decodeHtmlentities(scrapedeptitle).strip()
        scrapedep = scrapertools.decodeHtmlentities(scrapedep).strip()
        scrapedtitle = "%s - %s" % (scrapedep, scrapedeptitle) if scrapedeptitle != "" else scrapedep
        itemlist.append(
            Item(channel=__channel__,
                 action="findvideos",
                 title=scrapedtitle,
                 fulltitle=scrapedtitle,
                 url="",
                 contentType="episode",
                 extra=scrapedextra,
                 thumbnail=scrapedthumbnail,
                 folder=True))
    
    if config.get_library_support() and len(itemlist) != 0:
        itemlist.append(
            Item(channel=__channel__,
                 title="Aggiungi alla libreria",
                 url=item.url,
                 action="add_serie_to_library",
                 extra="episodi",
                 show=item.show))

    return itemlist
コード例 #3
0
def peliculas(item):
    logger.info("streamondemand.bibliotrailer peliculas")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url)

    # Extrae las entradas (carpetas)
    patron  = '<div class=\'post-thumb\'>\s*'
    patron  += '<a href=\'(.*?)\' style=[^:]+:url\((.*?)\)[^>]+>[^>]+>'
    matches = re.compile(patron,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for scrapedurl,scrapedthumbnail in matches:
        scrapedtitle = scrapedurl
        scrapedtitle=scrapertools.decodeHtmlentities(scrapedtitle.replace("http://www.bibliotrailer.it/",""))
        scrapedtitle=scrapertools.decodeHtmlentities(scrapedtitle.replace("/",": "))
        scrapedtitle=scrapertools.decodeHtmlentities(scrapedtitle.replace("-"," "))
        scrapedtitle=scrapertools.decodeHtmlentities(scrapedtitle.replace(".html",""))
        scrapedplot = scrapedtitle
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
        itemlist.append( Item(channel=__channel__, action="findvideos", title=scrapedtitle, url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )

    # Extrae el paginador
    patronvideos  = '<a href="\'\+f(.*?)\+\'">\'\+pageNaviConf.nextText\+'
    matches = re.compile(patronvideos,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    if len(matches)>0:
        scrapedurl = urlparse.urljoin(item.url,matches[0])
        itemlist.append( Item(channel=__channel__, action="peliculas", title="[COLOR orange]Avanti >>[/COLOR]" , url=sito+scrapedurl , folder=True) )

    return itemlist
コード例 #4
0
def peliculas(item):
    logger.info("streamondemand.liberostreaming peliculas")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url, headers=headers)

    # Extrae las entradas (carpetas)
    patron  = '<div class="entry-thumbnails"><a class=[^=]+="(.*?)"><img[^s]+src="(.*?)" class=[^=]+="(.*?)" title="(.*?)"/>'
    matches = re.compile(patron,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for scrapedurl,scrapedthumbnail,scrapedplot,scrapedtitle in matches:
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.replace("Streaming",""))
        scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        #scrapedplot = ""
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
        itemlist.append( Item(channel=__channel__, action="findvideos", fulltitle=scrapedtitle, show=scrapedtitle, title=scrapedtitle, url=scrapedurl , thumbnail=scrapedthumbnail, plot=scrapedplot , folder=True) )

    # Extrae el paginador
    patronvideos  = '<a class="nextpostslink" rel="next" href="(.*?)">»</a>'
    matches = re.compile(patronvideos,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    if len(matches)>0:
        scrapedurl = urlparse.urljoin(item.url,matches[0])
        itemlist.append( Item(channel=__channel__, action="peliculas", title="[COLOR orange]Avanti >>[/COLOR]" , url=scrapedurl , folder=True) )

    return itemlist
コード例 #5
0
def episodios(item):
    logger.info("anime sub ita - episodianime")

    itemlist = []

    # Downloads page
    data = anti_cloudflare(item.url)
    # Extracts the entries
    patron = '(.*?)<a href="(.*?)" target="_blank" rel="nofollow".*?>(.*?)</a>'
    matches = re.compile(patron).findall(data)

    for scrapedtitle, scrapedurl, scrapedserver in matches:
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        scrapedurl = scrapertools.decodeHtmlentities(scrapedurl)
        if scrapedtitle.startswith("<p>"):
            scrapedtitle = scrapedtitle[3:]

        itemlist.append(
            Item(channel=__channel__,
                 action="find_video_items",
                 title="[COLOR red]"+scrapedtitle+" [/COLOR]"+"[COLOR azure]"+item.fulltitle+" [/COLOR]"+"[COLOR orange] ["+scrapedserver+"][/COLOR]",
                 url=scrapedurl,
                 #data=scrapedurl,
                 thumbnail=item.thumbnail,
                 fulltitle=item.fulltitle,
                 show=item.show))

    return itemlist
コード例 #6
0
ファイル: boxingclub.py プロジェクト: Jmlaguna89/miNuevoRepo
def novedades_pokeryour(item):
    logger.info("pelisalacarta.channels.boxingclub novedades_pokeryour")
    itemlist = []
    ## Petición 1
    url = "http://translate.google.com/translate?depth=1&nv=1&rurl=translate.google.com&sl=ru&tl=es&u="+item.url
    data = scrapertools.decodeHtmlentities( scrapertools.downloadpage(url,follow_redirects=False) )
    ## Petición 2
    url = scrapertools.get_match(data, ' src="([^"]+)" name=c ')
    data = scrapertools.decodeHtmlentities( scrapertools.downloadpage(url,follow_redirects=False) )
    ## Petición 3
    url = scrapertools.get_match(data, 'URL=([^"]+)"')
    data = scrapertools.decodeHtmlentities( scrapertools.cachePage(url) )
    data = re.sub(r"\n|\r|\t|</span> comentario de Rusia.</span>", '', data)

    bloque_entradas = scrapertools.find_multiple_matches(data, '<div class="item column(.*?)<div class=item-separator>')
    for bloque in bloque_entradas:
        patron = 'title="([^>]+)>.*?<a href=([^>]+)>.*?' \
                 '<img src=(/sport/media/com_hwdmediashare/files/[^\s]+).*?' \
                 '<dd class=media-info-description>.*?</span>(.*?)</span>'
        matches = scrapertools.find_multiple_matches(bloque, patron)
        for scrapedtitle, scrapedurl, scrapedthumbnail, scrapedplot  in matches:
            scrapedthumbnail = host + scrapedthumbnail

            scrapedtitle = scrapedtitle.replace("vídeo de alta definición","HD").replace('::"','')
            scrapedtitle = re.sub(r'(?i)- tarjeta principal|tarjeta de|tarjeta|en línea de|el vídeo|el video|vídeo|video|en línea|en ruso|::','',scrapedtitle)
            if not "/" in scrapedtitle: scrapedtitle += "/"
            scrapedtitle = "[COLOR darkorange]"+scrapedtitle.split("/",1)[0]+"/[/COLOR][COLOR red]"+scrapedtitle.split("/",1)[1]+"[/COLOR]"
            scrapedurl = scrapedurl.replace("http://translate.googleusercontent.com/translate_c?depth=2&nv=1&rurl=translate.google.com&sl=ru&tl=es&u=","")
            scrapedurl = urllib.unquote(scrapedurl)
            itemlist.append(Item(channel=__channel__, title=scrapedtitle, action="play", url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True))
    
    next_page = scrapertools.find_single_match(data, '<li class=pagination-next>.*?href=([^\s]+)')
    if next_page != "":
        itemlist.append(Item(channel=__channel__, title=">> Siguiente", action="novedades_pokeryour", url=next_page, thumbnail=item.thumbnail, folder=True))
    return itemlist
コード例 #7
0
ファイル: tantifilm.py プロジェクト: umbvitt/pelisalacartait
def peliculas(item):
    logger.info("pelisalacarta.tantifilm peliculas")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url, headers=headers)

    # Extrae las entradas (carpetas)
    patron = '<div class="media3">[^>]+><a href="(.*?)"><img[^s]+src="(.*?)"[^>]+></a><[^>]+><a[^>]+><p>(.*?)</p></a></div>'
    matches = re.compile(patron,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
        html = scrapertools.cache_page(scrapedurl, headers=headers)
        start = html.find("<div class=\"content-left-film\">")
        end = html.find("</div>", start)
        scrapedplot = html[start:end]
        scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
        scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        scrapedtitle = scrapedtitle.replace("streaming","")
        #scrapedplot = ""
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
        itemlist.append( Item(channel=__channel__, action="findvideos", title="[COLOR azure]"+scrapedtitle+"[/COLOR]" , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )

    # Extrae el paginador
    patronvideos  = '<a class="nextpostslink" rel="next" href="(.*?)">»</a>'
    matches = re.compile(patronvideos,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    if len(matches)>0:
        scrapedurl = urlparse.urljoin(item.url,matches[0])
        itemlist.append( Item(channel=__channel__, action="peliculas", title="[COLOR orange]Successivo>>[/COLOR]" , url=scrapedurl , thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png", folder=True) )

    return itemlist
コード例 #8
0
def peliculas(item):
    logger.info("[italiafilm.py] peliculas")
    itemlist = []

    data = scrapertools.cachePage(item.url)
    patron = '<article(.*?)</article>'
    matches = re.compile(patron,re.DOTALL).findall(data)

    for match in matches:

        title = scrapertools.find_single_match(match,'<h3[^<]+<a href="[^"]+"[^<]+>([^<]+)</a>')
        title = scrapertools.htmlclean(title).strip()
        title = scrapertools.decodeHtmlentities(title.replace("Streaming",""))
        url = scrapertools.find_single_match(match,'<h3[^<]+<a href="([^"]+)"')
        html = scrapertools.cache_page(url)
        start = html.find("<p><br/>")
        end = html.find("</h2>", start)
        plot = html[start:end]
        plot = re.sub(r'<[^>]*>', '', plot)
        plot = scrapertools.decodeHtmlentities(plot)
        thumbnail = scrapertools.find_single_match(match,'data-echo="([^"]+)"')

        if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")

        itemlist.append( Item(channel=__channel__, action='findvideos', title="[COLOR azure]" + title + "[/COLOR]", url=url , thumbnail=thumbnail , fanart=thumbnail, plot=plot , viewmode="movie_with_plot", folder=True) )

    # Siguiente
    try:
        pagina_siguiente = scrapertools.get_match(data,'<a class="next page-numbers" href="([^"]+)"')
        itemlist.append( Item(channel=__channel__, action="peliculas", title="[COLOR orange]Successivo >> [/COLOR]" , url=pagina_siguiente , thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png", folder=True) )
    except:
        pass

    return itemlist
コード例 #9
0
ファイル: casacinema.py プロジェクト: umbvitt/pelisalacartait
def peliculas( item ):
    logger.info( "pelisalacarta.casacinema peliculas" )

    itemlist = []

    ## Descarga la pagina
    data = scrapertools.cache_page( item.url )

    ## Extrae las entradas (carpetas)
    patron  = '<div class="box-single-movies">\s*'
    patron += '<a href="([^>"]+)".*?title="([^>"]+)" >.*?<img class.*?<img.*?src="([^>"]+)"'

    matches = re.compile( patron, re.DOTALL ).findall( data )

    for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
        title = scrapertools.decodeHtmlentities( scrapedtitle )
        html = scrapertools.cache_page(scrapedurl)
        start = html.find("<div class=\"row content-post\" >")
        end = html.find("<a class=\"addthis_button_facebook_like\" fb:like:layout=\"button_count\"></a>", start)
        scrapedplot = html[start:end]
        scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
        scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        itemlist.append( Item( channel=__channel__, action="findvideos", title="[COLOR azure]" + title + "[/COLOR]", url=scrapedurl, thumbnail=scrapedthumbnail, fulltitle=title, show=title , plot=scrapedplot , viewmode="movie_with_plot") )

    ## Paginación
    next_page  = scrapertools.find_single_match( data, 'rel="next" href="([^"]+)"' )

    if next_page != "":
        itemlist.append( Item( channel=__channel__, action="peliculas", title="[COLOR orange]Successivo >>[/COLOR]", url=next_page, thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png") )

    return itemlist
コード例 #10
0
ファイル: streamingfilmit.py プロジェクト: orione7/Italorione
def categorias(item):
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url)
    bloque = scrapertools.get_match(data, '<ul class="categories-module">(.*?)</ul>')

    # Extrae las entradas (carpetas)
    patron = 'href="([^"]+)">(.*?)</a>'
    matches = re.compile(patron, re.DOTALL).findall(bloque)

    for scrapedurl, scrapedtitle in matches:
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.replace("PORNO", ""))
        scrapedurl = scrapertools.decodeHtmlentities(scrapedurl.replace("/index.php/film-in-streaming/porno.html", ""))
        if DEBUG:
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl + "]")
        itemlist.append(
            Item(
                channel=__channel__,
                action="elenco",
                title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                url=host + scrapedurl,
                thumbnail="http://xbmc-repo-ackbarr.googlecode.com/svn/trunk/dev/skin.cirrus%20extended%20v2/extras/moviegenres/All%20Movies%20by%20Genre.png",
                folder=True,
            )
        )

    return itemlist
コード例 #11
0
def search(item, texto):
    log("search", "init texto=[" + texto + "]")
    itemlist = []
    url = host + "/?s="
    url = url + texto + "&search=Cerca+un+film"

    patron = 'class="bottom_line"></div>[^<]+<[^<]+<img.*?src="(.*?)"[^<]+</a>[^>]+<[^<]+<[^<]+<[^<]+<.*?class="movie_title"><a href="(.*?)">(.*?)</a>'
    for scrapedthumbnail, scrapedurl, scrapedtitle in scrapedSingle(url, 'div id="movie_post_content">(.*?)</ul>',
                                                                    patron):
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        log("novita", "title=[" + scrapedtitle + "] url=[" + scrapedurl + "] thumbnail=[" + scrapedthumbnail + "]")

        itemlist.append(infoSod(
            Item(channel=__channel__, action="findvideos", title=scrapedtitle, fulltitle=scrapedtitle, url=scrapedurl,
                 thumbnail=scrapedthumbnail), tipo="movie"))

    # Paginazione
    # ===========================================================================================================================
    matches = scrapedSingle(url, 'class="vh-pages-wrapper span12 body-bg">(.*?)</div>',
                            'class="current">.*?</span><.*?href="(.*?)"')
    if len(matches) > 0:
        paginaurl = scrapertools.decodeHtmlentities(matches[0])
        itemlist.append(Item(channel=__channel__, action="elenco", title=AvantiTxt, url=paginaurl, thumbnail=AvantiImg))
        itemlist.append(Item(channel=__channel__, action="HomePage", title=HomeTxt, folder=True))
    else:
        itemlist.append(Item(channel=__channel__, action="mainlist", title=ListTxt, folder=True))
    # ============================================================================================================================
    return itemlist
コード例 #12
0
def categorias(item):
    logger.info("streamondemand.darkstream categorias")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url)

    # Extrae las entradas (carpetas)
    patron = '<li class="menu-item-3[^>]+><a[^=]+=[^=]+="(.*?)">(.*?)</a>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for scrapedurl, scrapedtitle in matches:
        scrapedplot = ""
        scrapedthumbnail = ""
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.replace("Home", ""))
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.replace("http://www.darkstream.tv/", ""))
        if DEBUG:
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl + "]")
        itemlist.append(
            Item(
                channel=__channel__,
                action="cat_elenco",
                title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                url=scrapedurl,
                thumbnail="http://xbmc-repo-ackbarr.googlecode.com/svn/trunk/dev/skin.cirrus%20extended%20v2/extras/moviegenres/All%20Movies%20by%20Genre.png",
                folder=True,
            )
        )

    return itemlist
コード例 #13
0
ファイル: tantifilm.py プロジェクト: orione7/Italorione
def search_peliculas(item):
    logger.info("streamondemand.tantifilm search_peliculas")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url, headers=headers)

    # Extrae las entradas (carpetas)
    patron = '<a href="([^"]+)" title="([^"]+)" rel="[^"]+">\s*<img width="[^"]+" height="[^"]+" src="([^"]+)" class="[^"]+" alt="[^"]+" />'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
        html = scrapertools.cache_page(scrapedurl, headers=headers)
        start = html.find("<div class=\"content-left-film\">")
        end = html.find("</div>", start)
        scrapedplot = html[start:end]
        scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
        scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        scrapedtitle = scrapedtitle.replace("streaming", "").replace("Permalink to ", "")
        if DEBUG: logger.info(
            "title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(
            Item(channel=__channel__,
                 action="episodios" if item.extra == "serie" else "findvideos",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 folder=True))

    return itemlist
コード例 #14
0
def novedades(item):
    logger.info("pelisalacarta.channels.footballia novedades")
    itemlist = []
    data = scrapertools.cachePage(item.url)
    data = data.replace("\n","").replace("\t","")

    if item.title == "Novedades":
        bloque = scrapertools.find_single_match(data, '<div class="last_matches section-title">(.*?)<h2>')
        first_match = scrapertools.find_single_match(data, '<h2>Últimos partidos añadidos</h2>.*?<a href="(.*?)">(.*?)</a>.*?image: "([^"]+)"')
        if first_match != "":
            scrapedtitle = scrapertools.decodeHtmlentities(first_match[1]).replace("<br />", " (")+")"
            scrapedtitle = "[COLOR gold] "+scrapedtitle.rsplit("(",1)[0]+"[/COLOR][COLOR brown]("+scrapedtitle.rsplit("(",1)[1]+"[/COLOR]"
            itemlist.append(Item(channel=__channel__, title=scrapedtitle, action="play", url=host_footballia+first_match[0], thumbnail=host_footballia+first_match[2], folder=True))
    else:
        bloque = scrapertools.find_single_match(data, '<h2>Partidos más votados(.*?)<h2>')

    patron = '<a href="([^"]+)".*?title="([^"]+)".*?src="([^"]+)".*?'
    patron += '<div class="(?:competition text-center">|m-t-xs competition">)(.*?)</div>'
    matches = scrapertools.find_multiple_matches(bloque, patron)
    for scrapedurl, scrapedtitle, scrapedthumbnail, competition in matches:
        scrapedtitle = " [COLOR gold]"+scrapertools.decodeHtmlentities(scrapedtitle)+"[/COLOR]"
        scrapedtitle += " [COLOR brown]("+scrapertools.decodeHtmlentities(competition)+")[/COLOR]"
        scrapedthumbnail = scrapedthumbnail.replace("mini_","")
        itemlist.append(Item(channel=__channel__, title=bbcode_kodi2html(scrapedtitle), action="play", url=host_footballia+scrapedurl, thumbnail=host_footballia+scrapedthumbnail, folder=True))

    return itemlist
コード例 #15
0
def partidos(item):
    logger.info("pelisalacarta.channels.footballia partidos")
    itemlist = []
    data = scrapertools.cachePage(item.url)
    data = data.replace("\n","").replace("\t","")

    if "Jugador" in item.title:
        bloque = scrapertools.find_single_match(data, 'id="matches_as_player">(.*?)</table>')
    elif "Entrenador" in item.title:
        bloque = scrapertools.find_single_match(data, 'id="matches_as_coach">(.*?)</table>')
    else:
        bloque = scrapertools.find_single_match(data, '<div class="search-results">(.*?)<footer')

    patron = '<td class="match">.*?href="([^"]+)".*?<span itemprop="name">(.*?)</span>' \
             '.*?<span itemprop="name">(.*?)</span>.*?' \
             '<td class="competition hidden-xs">(.*?)</td>' \
             '<td class="season">(.*?)</td>' \
             '<td class="language hidden-xs">(.*?)</td>'
    matches = scrapertools.find_multiple_matches(bloque, patron)
    for scrapedurl, team1, team2, competition, season, lang in matches:
        team1 = scrapertools.decodeHtmlentities(team1)
        team2 = scrapertools.decodeHtmlentities(team2)
        competition = " [COLOR brown]("+scrapertools.decodeHtmlentities(competition)+"/"+season+")[/COLOR] "
        scrapedtitle = "[COLOR orange]"+team1+"-"+team2+competition+"[COLOR green]["+lang+"][/COLOR]"
        scrapedthumbnail = host_footballia+scrapedurl+"/preview_image"
        itemlist.append(Item(channel=__channel__, title=bbcode_kodi2html(scrapedtitle), action="play", url=host_footballia+scrapedurl, thumbnail=scrapedthumbnail, folder=False))

    next_page = scrapertools.find_single_match(data, '<a rel="next" href="([^"]+)"')
    if next_page != "":
        itemlist.append(Item(channel=__channel__, title=">> Siguiente", action="partidos", url=host_footballia+next_page, thumbnail=item.thumbnail, folder=True))

    return itemlist
コード例 #16
0
def peliculas( item ):
    logger.info( "streamondemand.asiansubita peliculas" )

    itemlist = []

    ## Descarga la pagina
    data = scrapertools.cache_page( item.url )

    ## Extrae las entradas (carpetas)
    patron  = '<!-- Post Type 3 -->\s*'
    patron += '<a.*?href="(.*?)" title="(.*?)" rel="bookmark">.*?<img src="(.*?)".*?<div class="entry-summary">\s*'
    patron += '(.*?)<a class="more-link"'

    matches = re.compile( patron, re.DOTALL ).findall( data )

    for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedplot in matches:
        scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        title = scrapertools.decodeHtmlentities( scrapedtitle )
 
        itemlist.append( Item( channel=__channel__, action="findvideos", title=title, url=scrapedurl, thumbnail=scrapedthumbnail, fulltitle=title, show=title , plot=scrapedplot , viewmode="movie_with_plot") )


    ## Paginación
    next_page  = scrapertools.find_single_match( data, '<div class="nav-previous"><a href="(.*?)" ><span class="meta-nav">&larr;</span> Articoli precedenti</a></div>' )

    if next_page != "":
        itemlist.append( Item( channel=__channel__, action="peliculas", title="[COLOR orange]Post piu' vecchi >>[/COLOR]", url=next_page, thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png") )

    return itemlist
コード例 #17
0
ファイル: guardarefilm.py プロジェクト: orione7/Italorione
def pelis_top100(item):
    logger.info("streamondemand.guardarefilm peliculas")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url, headers=headers)

    # Extrae las entradas (carpetas)
    patron = r'<span class="top100_title"><a href="([^"]+)">(.*?\(\d+\))</a>'
    matches = re.compile(patron).findall(data)

    for scrapedurl, scrapedtitle in matches:
        html = scrapertools.cache_page(scrapedurl, headers=headers)
        start = html.find("<div class=\"textwrap\" itemprop=\"description\">")
        end = html.find("</div>", start)
        scrapedplot = html[start:end]
        scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
        scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        scrapedthumbnail = scrapertools.find_single_match(html, r'class="poster-wrapp"><a href="([^"]+)"')
        if (DEBUG): logger.info(
                "title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(
                Item(channel=__channel__,
                     action="episodios" if item.extra == "serie" else "findvideos",
                     fulltitle=scrapedtitle,
                     show=scrapedtitle,
                     title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                     url=scrapedurl,
                     thumbnail=urlparse.urljoin(host, scrapedthumbnail),
                     plot=scrapedplot,
                     folder=True,
                     fanart=host + scrapedthumbnail))

    return itemlist
コード例 #18
0
def peliculas(item):
    logger.info("streamondemand.italiaserie peliculas")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url)

    # Extrae las entradas (carpetas)
    patron  = '<div class="post-thumb">\s*<a href="(.*?)" title="(.*?)">\s*<img src="(.*?)"[^>]+>'
    matches = re.compile(patron,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
        #scrapedplot = ""
        html = scrapertools.cache_page(scrapedurl)
        start = html.find("<div class=\"entry-content\">")
        end = html.find("</p>", start)
        scrapedplot = html[start:end]
        scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
        scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
        itemlist.append( Item(channel=__channel__, action="findvideos", fulltitle=scrapedtitle, show=scrapedtitle, title="[COLOR azure]" + scrapedtitle + "[/COLOR]", url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )

    # Extrae el paginador
    patronvideos  = '<a class="next page-numbers" href="([^"]+)">Avanti &raquo;</a>'
    matches = re.compile(patronvideos,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    if len(matches)>0:
        scrapedurl = urlparse.urljoin(item.url,matches[0])
        itemlist.append( Item(channel=__channel__, action="peliculas", title="[COLOR orange]Successivo >>[/COLOR]" , url=scrapedurl, thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png" , folder=True) )

    return itemlist
コード例 #19
0
def peliculas(item):
    logger.info("streamondemand.altadefinizioneclub peliculas")
    itemlist = []

    patron = '<li><a href="([^"]+)" data-thumbnail="([^"]+)"><div>\s*<div class="title">(.*?)</div>'
    for scrapedurl,scrapedthumbnail,scrapedtitle  in scrapedAll(item.url,patron):
        logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
        xbmc.log(("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]"))
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        scrapedtitle = scrapedtitle.replace("[HD]","")
        itemlist.append(infoSod(
                        Item(channel=__channel__,
                              action="findvideos",
                               title=scrapedtitle,
                           fulltitle=scrapedtitle,
                                 url=scrapedurl,
                           thumbnail=scrapedthumbnail,
                            viewmode="movie"),
                                tipo="movie",))

    # Paginazione
    # ===========================================================================================================================
    matches = scrapedSingle(item.url, '<span class=\'pages\'>(.*?)class="clearfix"',"class='current'>.*?</span>.*?href=\"(.*?)\">.*?</a>")
    if len(matches) > 0:
        paginaurl = scrapertools.decodeHtmlentities(matches[0])
        itemlist.append(Item(channel=__channel__, action="peliculas", title=AvantiTxt, url=paginaurl, thumbnail=AvantiImg))
        itemlist.append(Item(channel=__channel__, action="HomePage", title=HomeTxt, thumbnail=ThumbnailHome, folder=True))
    else:
        itemlist.append(Item(channel=__channel__, action="mainlist", title=ListTxt, thumbnail=ThumbnailHome,folder=True))
    # ===========================================================================================================================
    return itemlist
コード例 #20
0
def peliculas(item):
    logger.info("streamondemand.italianstream peliculas")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url)

    # Extrae las entradas (carpetas)
    patron = '<div class="arch-thumb">[^<]+<a href="(.*?)" title="(.*?)"><img[^src]+src="(.*?)"[^<]+</a>'
    matches = re.compile(patron,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        response = urllib2.urlopen(scrapedurl)
        html = response.read()
        start = html.find("Trama:")
        end = html.find("</div>", start)
        scrapedplot = html[start:end]
        scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
        scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
        itemlist.append( Item(channel=__channel__, action="findvideos", title="[COLOR azure]" + scrapedtitle + "[/COLOR]" , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True, fanart=scrapedthumbnail) )

    # Extrae el paginador
    patronvideos  = '<div class="wp-pagenavi">.*?<a href="([^"]+)" >&rsaquo;</a></div>'
    matches = re.compile(patronvideos,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    if len(matches)>0:
        scrapedurl = urlparse.urljoin(item.url,matches[0])
        itemlist.append( Item(channel=__channel__, action="peliculas", title="[COLOR orange]Successivo >>[/COLOR]" , url=scrapedurl , folder=True, thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png") )

    return itemlist
コード例 #21
0
def updateserietv(item):
    logger.info("streamondemand.solostreaming update serietv")

    itemlist = []

    # Descarga la pagina
    data = cache_jsonpage(item.url)

    for singledata in data['results']:

        type = normalize_unicode(singledata['type'])
        uri = normalize_unicode(singledata['uri'])
        if item.extra == 'serietv':
            ep_num = normalize_unicode(singledata['ep_num'])
            serie = scrapertools.decodeHtmlentities(normalize_unicode(singledata['serieNome'])).strip()
            titolo = scrapertools.decodeHtmlentities(normalize_unicode(singledata['ep_title'])).strip()

            apisingle = host + "/sod/api.php?get=serietv&type=episodi&uri=" + uri + "&ep_num=" + ep_num + "&sub=" + urllib.quote_plus(
                type)

            frm_title = "[COLOR white](%s)[/COLOR] [B][COLOR royalblue]%s[/COLOR][/B] [B][COLOR deepskyblue]- %s %s[/COLOR][/B]" % (
            type.upper(), serie, ep_num, titolo)
        else:
            e_num = normalize_unicode(singledata['e_num'])
            s_num = normalize_unicode(singledata['s_num'])
            serie = scrapertools.decodeHtmlentities(normalize_unicode(singledata['serie'])).strip()

            apisingle = host + "/sod/api.php?get=anime&type=episodi&uri=" + uri + "&e_num=" + e_num + "&s_num=" + s_num + "&sub=" + urllib.quote_plus(
                type)

            frm_title = "[COLOR white](%s)[/COLOR] [B][COLOR royalblue]%s[/COLOR][/B] [B][COLOR deepskyblue]- %sx%s[/COLOR][/B]" % (
            type.upper(), serie, s_num, e_num)

        itemlist.append(
            Item(channel=__channel__,
                 action="findvid_serie",
                 fulltitle=frm_title,
                 show=frm_title,
                 title=frm_title,
                 url=apisingle,
                 thumbnail=singledata['fileName']))

    itemlist.append(
        Item(channel=__channel__,
             action="HomePage",
             title="[COLOR yellow]Torna Home[/COLOR]"))

    if len(data['results']) == result_per_page:
        end = int(scrapertools.find_single_match(item.url, r"&end=(\d+)"))
        next_page = item.url.split('&start=')[0] + "&start=%d&end=%d" % (end, end + result_per_page)

        itemlist.append(
            Item(channel=__channel__,
                 action="updateserietv",
                 title="[COLOR orange]Successivo>>[/COLOR]",
                 url=next_page,
                 extra=item.extra,
                 thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))

    return itemlist
コード例 #22
0
def findvideos(item):
    logger.info("[asiansubita.py] findvideos")

    itemlist = []

    # Descarga la página
    data = scrapertools.cache_page(item.url)

    # Extrae las datos
    thumbnail = scrapertools.find_single_match(data, 'src="([^"]+)"[^<]+</p>')
    plot = scrapertools.find_single_match(data, '<p style="text-align: justify;">(.*?)</p>')
    plot = scrapertools.decodeHtmlentities(plot)

    patron = 'href="(http://adf.ly/[^"]+)" target="_blank">([^<]+)</a>'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle in matches:
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        title = "[" + scrapedtitle + "] " + item.fulltitle

        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 title=title,
                 url=scrapedurl,
                 thumbnail=thumbnail,
                 plot=plot,
                 fulltitle=item.fulltitle,
                 show=item.show))

    return itemlist
def peliculas(item):
    logger.info("streamondemand.documentaristreaming peliculas")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url)


    # Extrae las entradas (carpetas)
    patron = '<img[^s]+src="(.*?)"[^>]+>[^<]+<[^<]+<[^<]+<[^<]+<[^<]+</a>\s*'
    patron += '<div[^>]+>\s*'
    patron += '<div[^<]+<[^<]+<[^<]+</div>\s*'
    patron += '<h3[^>]+>\s*'
    patron += '<a href="(.*?)"[^>]+>\s*'
    patron += '(.*?)</a>\s*'
    patron += '</h3>\s*'
    patron += '<div[^>]+>\s*'
    patron += '<span[^>]+>\s*'
    patron += '<a[^<]+<[^<]+</a>\s*'
    patron += '<a[^<]+</a>\s*'
    patron += '</span>\s*'
    patron += '<span[^<]+</span>\s*'
    patron += '<a[^<]+<[^<]+<[^<]+<[^<]+<[^<]+</a>\s*'
    patron += '</div>\s*'
    patron += '<div[^>]+><p>(.*?)</p>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for scrapedthumbnail, scrapedurl, scrapedtitle, scrapedplot in matches:
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
        scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        if (DEBUG): logger.info(
            "title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(
            Item(channel=__channel__,
                 action="findvideos",
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 viewmode="movie_with_plot",
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 folder=True))

    # Extrae el paginador
    patronvideos = '<a class="next page-numbers" href="(.*?)"><i class="icon-iconic-right"></i></a>'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(channel=__channel__,
                 action="peliculas",
                 title="[COLOR orange]Successivo >>[/COLOR]",
                 url=scrapedurl,
                 thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                 folder=True))

    return itemlist
コード例 #24
0
def pelicat(item):
    logger.info("streamondemand.streamingfilmit pelicat")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url, headers=headers)

    # Extrae las entradas (carpetas)
    patron = '<div class="kapsa">\s*'
    patron += '<a href="([^"]+)">\s*'
    patron += '<[^>]+>\s*'
    patron += '<[^>]+>\s*'
    patron += '<img[^=]+=[^=]+=[^=]+="([^"]+)"[^>]+>\s*'
    patron += '<[^>]+>\s*'
    patron += '<[^>]+>\s*'
    patron += '[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s*'
    patron += '<[^>]+>\s*'
    patron += '<[^>]+>\s*'
    patron += '<h4><a[^>]+>(.*?)</a></h4>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        response = urllib2.urlopen(scrapedurl)
        html = response.read()
        start = html.find("<div id=\"detay-aciklama\">")
        end = html.find("</p>", start)
        scrapedplot = html[start:end]
        scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
        scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        # scrapedplot = ""
        if (DEBUG): logger.info(
            "title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(
            Item(channel=__channel__,
                 action="findvideos",
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 folder=True))

    # Extrae el paginador
    patronvideos = '<a class="nextpostslink" rel="next" href="([^"]+)">&raquo;</a>'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(channel=__channel__,
                 action="pelicat",
                 title="[COLOR orange]Successivo>>[/COLOR]",
                 url=scrapedurl,
                 thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                 folder=True))

    return itemlist
コード例 #25
0
def peliculas(item):
    logger.info("streamondemand.streamingpopcorn peliculas")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url, headers=headers)

    # Extrae las entradas (carpetas)
    patron = '<img style="[^"]+" rel="image_src" src="(.*?)" />.*?'
    patron += '<h4 class="widgettitle"><a href="([^"]+)">\s*(.*?)</a>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
        scrapedurl = urlparse.urljoin(host, scrapedurl)
        scrapedthumbnail = urlparse.urljoin(host, scrapedthumbnail)
        html = scrapertools.cache_page(scrapedurl, headers=headers)
        start = html.find("</figure>")
        end = html.find("</div>", start)
        scrapedplot = html[start:end]
        scrapedplot = re.sub(r"<[^>]*>", "", scrapedplot)
        scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        scrapedtitle = scrapedtitle.strip()
        # scrapedthumbnail = ""
        if DEBUG:
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(
            Item(
                channel=__channel__,
                action="play",
                fulltitle=scrapedtitle,
                show=scrapedtitle,
                title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                url=scrapedurl,
                thumbnail=scrapedthumbnail,
                plot=scrapedplot,
                folder=True,
            )
        )

    # Extrae el paginador
    patron = '<a href="([^"]+)">Pagina successiva</a>'
    match = scrapertools.find_single_match(data, patron)

    if match != "":
        scrapedurl = urlparse.urljoin(host, match)
        itemlist.append(
            Item(
                channel=__channel__,
                action="peliculas",
                title="[COLOR orange]Successivo>>[/COLOR]",
                url=scrapedurl,
                thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                folder=True,
            )
        )

    return itemlist
def peliculas(item):
    logger.info("streamondemand.documentaristreamingdb peliculas")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url, headers=headers)

    # Extrae las entradas (carpetas)
    patron = '<div class="movie-poster">\s*<img[^s]+src="(.*?)"[^=]+=[^=]+="(.*?)"[^>]+>\s*<a[^h]+href="(.*?)">'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
        #html = scrapertools.cache_page(scrapedurl)
        #start = html.find("</div><h2>")
        #end = html.find("<p><strong>", start)
        #scrapedplot = html[start:end]
        #scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
        #scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        scrapedplot = ""
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.replace("Serie Documentari ", ""))
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.replace("Documentario ", ""))
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.replace("Documentari ", ""))
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.replace("streaming", " "))
        scrapedtitle = scrapedtitle.split('"')[0]
        if DEBUG: logger.info(
            "title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(
            Item(channel=__channel__,
                 action="findvideos",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 viewmode="movie_with_plot",
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 folder=True))

    # Extrae el paginador
    patronvideos = '<a class="next page-numbers" href="(.*?)">'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Torna Home[/COLOR]",
                 folder=True)),
        itemlist.append(
            Item(channel=__channel__,
                 action="peliculas",
                 title="[COLOR orange]Successivo >>[/COLOR]",
                 url=scrapedurl,
                 thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/successivo_P.png",
                 folder=True))

    return itemlist
コード例 #27
0
def showupdateserietv(item):
    logger.info("streamondemand.solostreaming showupdateserietv")

    extra = json.loads(item.extra)

    itemlist = []

    for singledata in extra:
        scrapedplot = ""

        type = normalize_unicode(singledata['type'])
        uri = normalize_unicode(singledata['uri'])

        if item.url == 'serietv':
            ep_num = normalize_unicode(singledata['ep_num'])
            serie = scrapertools.decodeHtmlentities(
                normalize_unicode(singledata['serieNome'])).strip()
            titolo = scrapertools.decodeHtmlentities(
                normalize_unicode(singledata['ep_title'])).strip()

            apisingle = host + "/sod/api.php?get=serietv&type=episodi&uri=" + uri + "&ep_num=" + ep_num + "&sub=" + urllib.quote_plus(
                type)

            fulltitle = serie + ' | ' + ep_num + ' ' + titolo
            frm_title = "[COLOR white](%s)[/COLOR] [B][COLOR royalblue]%s[/COLOR][/B] [B][COLOR deepskyblue]- %s %s[/COLOR][/B]" % (
                type.upper(), serie, ep_num, titolo)
        else:
            e_num = normalize_unicode(singledata['e_num'])
            s_num = normalize_unicode(singledata['s_num'])
            serie = scrapertools.decodeHtmlentities(
                normalize_unicode(singledata['serie'])).strip()

            apisingle = host + "/sod/api.php?get=anime&type=episodi&uri=" + uri + "&e_num=" + e_num + "&s_num=" + s_num + "&sub=" + urllib.quote_plus(
                type)

            fulltitle = serie + ' | ' + s_num + 'x' + e_num
            frm_title = "[COLOR white](%s)[/COLOR] [B][COLOR royalblue]%s[/COLOR][/B] [B][COLOR deepskyblue]- %sx%s[/COLOR][/B]" % (
                type.upper(), serie, s_num, e_num)

        itemlist.append(
            infoSod(
                Item(
                    channel=__channel__,
                    action="findvid_serie",
                    fulltitle=fulltitle,
                    show=serie,
                    title=frm_title,
                    url=apisingle,
                    thumbnail=singledata['fileName']),
                tipo='tv'))

    itemlist.append(
        Item(
            channel=__channel__,
            action="HomePage",
            title="[COLOR yellow]Torna Home[/COLOR]"))

    return itemlist
コード例 #28
0
def peliculas(item):
    logger.info("streamondemand.streaming01 peliculas")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url)

    # Extrae las entradas (carpetas)
    patron = '<a class="short-img" href="([^"]+)"[^>]+>\s*'
    patron += '<img src="([^"]+)"[^>]+>\s*'
    patron += '</a>\s*'
    patron += '<div[^>]+>\s*'
    patron += '<h3>[^>]+>(.*?)</a>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        html = scrapertools.cache_page(scrapedurl)
        start = html.find("<div class=\"full-text clearfix desc-text\">")
        end = html.find("<table>", start)
        scrapedplot = html[start:end]
        scrapedplot = re.sub(r'<.*?>', '', scrapedplot)
        scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        scrapedtitle = scrapedtitle.replace("Streaming download gratis ", "")
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        if DEBUG: logger.info(
                "title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(
                Item(channel=__channel__,
                     action="findvideos",
                     fulltitle=scrapedtitle,
                     show=scrapedtitle,
                     title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                     url=scrapedurl,
                     thumbnail=scrapedthumbnail,
                     plot=scrapedplot,
                     folder=True))

    # Extrae el paginador
    patronvideos = '<span class="pnext"><a href="([^"]+)">Avanti</a></span>'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Torna Home[/COLOR]",
                 folder=True)),
        itemlist.append(
            Item(channel=__channel__,
                 action="peliculas",
                 title="[COLOR orange]Successivo >>[/COLOR]",
                 url=scrapedurl,
                 thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                 folder=True))

    return itemlist
コード例 #29
0
def peliculas(item):
    logger.info("pelisalacarta.cinemagratis peliculas")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url)

    # Extrae las entradas (carpetas)
    patron = '<div class="moviefilm">\s*'
    patron += '<a href="(.*?)">\s*'
    patron += '<img src="(.*?)" alt="(.*?)"[^>]+></a>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        response = urllib2.urlopen(scrapedurl)
        html = response.read()
        start = html.find('<span style="font-family: Arial, Helvetica, sans-serif;">')
        end = html.find("</font></a><br />", start)
        scrapedplot = html[start:end]
        scrapedplot = re.sub(r"<[^>]*>", "", scrapedplot)
        scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        # scrapedplot = ""
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.replace("streaming", ""))
        if DEBUG:
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(
            Item(
                channel=__channel__,
                action="findvideos",
                title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                url=scrapedurl,
                thumbnail=scrapedthumbnail,
                plot=scrapedplot,
                folder=True,
            )
        )

    # Extrae el paginador
    patronvideos = '<a class="nextpostslink" rel="next" href="(.*?)">&raquo;</a>'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(
                channel=__channel__,
                action="peliculas",
                title="[COLOR orange]Successivo>>[/COLOR]",
                url=scrapedurl,
                thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                folder=True,
            )
        )

    return itemlist
コード例 #30
0
def anime(item):
    logger.info("streamondemand.filmpertutti anime")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url)

    # Extrae las entradas (carpetas)
    patron = '<div class="general-box container-single-image">\s*'
    patron += '<a href="([^>"]+)"?.*?title="?([^>"]+)"?.*?<img.*?src="([^>"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
        html = scrapertools.cache_page(scrapedurl)
        start = html.find("<div class=\"entry-content\">")
        end = html.find("</a></p>", start)
        scrapedplot = html[start:end]
        scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
        scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.replace("Streaming", ""))
        if scrapedtitle.startswith("Link to "):
            scrapedtitle = scrapedtitle[8:]
        if (DEBUG): logger.info(
            "title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(
            Item(channel=__channel__,
                 action="episodios",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 extra=item.extra,
                 folder=True))

    # Extrae el paginador
    patronvideos = '<a href="([^"]+)" >Avanti</a>'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Torna Home[/COLOR]",
                 folder=True)),
        itemlist.append(
            Item(channel=__channel__,
                 action="anime",
                 title="[COLOR orange]Successivo >>[/COLOR]",
                 url=scrapedurl,
                 thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                 extra=item.extra,
                 folder=True))

    return itemlist
コード例 #31
0
def letras(item):

    thumbletras = {
        '0-9': 'https://s32.postimg.org/drojt686d/image.png',
        '0 - 9': 'https://s32.postimg.org/drojt686d/image.png',
        '#': 'https://s32.postimg.org/drojt686d/image.png',
        'a': 'https://s32.postimg.org/llp5ekfz9/image.png',
        'b': 'https://s32.postimg.org/y1qgm1yp1/image.png',
        'c': 'https://s32.postimg.org/vlon87gmd/image.png',
        'd': 'https://s32.postimg.org/3zlvnix9h/image.png',
        'e': 'https://s32.postimg.org/bgv32qmsl/image.png',
        'f': 'https://s32.postimg.org/y6u7vq605/image.png',
        'g': 'https://s32.postimg.org/9237ib6jp/image.png',
        'h': 'https://s32.postimg.org/812yt6pk5/image.png',
        'i': 'https://s32.postimg.org/6nbbxvqat/image.png',
        'j': 'https://s32.postimg.org/axpztgvdx/image.png',
        'k': 'https://s32.postimg.org/976yrzdut/image.png',
        'l': 'https://s32.postimg.org/fmal2e9yd/image.png',
        'm': 'https://s32.postimg.org/m19lz2go5/image.png',
        'n': 'https://s32.postimg.org/b2ycgvs2t/image.png',
        'o': 'https://s32.postimg.org/c6igsucpx/image.png',
        'p': 'https://s32.postimg.org/jnro82291/image.png',
        'q': 'https://s32.postimg.org/ve5lpfv1h/image.png',
        'r': 'https://s32.postimg.org/nmovqvqw5/image.png',
        's': 'https://s32.postimg.org/zd2t89jol/image.png',
        't': 'https://s32.postimg.org/wk9lo8jc5/image.png',
        'u': 'https://s32.postimg.org/w8s5bh2w5/image.png',
        'v': 'https://s32.postimg.org/e7dlrey91/image.png',
        'w': 'https://s32.postimg.org/fnp49k15x/image.png',
        'x': 'https://s32.postimg.org/dkep1w1d1/image.png',
        'y': 'https://s32.postimg.org/um7j3zg85/image.png',
        'z': 'https://s32.postimg.org/jb4vfm9d1/image.png'
    }

    logger.info("pelisalacarta.channels.locopelis letras")
    itemlist = []
    data = scrapertools.cache_page(item.url)
    realplot = ''
    if item.extra == 'letras':
        patron = '<li><a href="([^"]+)" title="Letra.*?">([^<]+)</a></li>'
    else:
        patron = '<li><a.*?href="([^"]+)" title="([^v]+)' + item.extra + '.*?">'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle in matches:
        url = urlparse.urljoin(item.url, scrapedurl)
        if item.extra != 'letras':
            data = scrapertools.cache_page(scrapedurl)
            thumbnail = scrapertools.get_match(
                data, '<link rel="image_src" href="([^"]+)"/>')
            realplot = scrapertools.find_single_match(
                data, '<p itemprop="articleBody">([^<]+)<\/p> ')
            plot = scrapertools.remove_htmltags(realplot)
            action = 'temporadas'
        else:
            if scrapedtitle.lower() in thumbletras:
                thumbnail = thumbletras[scrapedtitle.lower()]
            else:
                thumbnail = ''
            plot = ''
            action = 'todas'
        title = scrapedtitle.replace(': ', '')
        title = scrapertools.decodeHtmlentities(title)
        if item.extra == 'letras':
            fanart = 'https://s31.postimg.org/c3bm9cnl7/a_z.png'
        elif item.extra == 'Vista':
            fanart = 'https://s32.postimg.org/466gt3ipx/vistas.png'
        else:
            fanart = ''

        if (DEBUG):
            logger.info("title=[" + title + "], url=[" + url +
                        "], thumbnail=[" + thumbnail + "])")
        itemlist.append(
            Item(channel=item.channel,
                 action=action,
                 title=title,
                 url=url,
                 thumbnail=thumbnail,
                 plot=plot,
                 fanart=fanart))

    return itemlist
コード例 #32
0
ファイル: descargasmix.py プロジェクト: sal666/pelisalacarta
def entradas(item):
    logger.info()
    itemlist = []
    item.text_color = color2

    data = get_data(item.url)
    bloque = scrapertools.find_single_match(
        data, '<div id="content" role="main">(.*?)<div id="sidebar" '
        'role="complementary">')
    contenido = ["series", "deportes", "anime", 'miniseries']
    c_match = [True for match in contenido if match in item.url]
    # Patron dependiendo del contenido
    if True in c_match:
        patron = '<a class="clip-link".*?href="([^"]+)".*?<img alt="([^"]+)" src="([^"]+)"' \
                 '.*?<span class="overlay(|[^"]+)">'
        matches = scrapertools.find_multiple_matches(bloque, patron)
        for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedinfo in matches:
            scrapedurl = urllib.unquote(
                re.sub(r'&amp;b=4|/go\.php\?u=', '', scrapedurl))
            if scrapedinfo != "":
                scrapedinfo = scrapedinfo.replace(" ", "").replace("-", " ")

                scrapedinfo = "  [%s]" % unicode(
                    scrapedinfo, "utf-8").capitalize().encode("utf-8")
            titulo = scrapedtitle + scrapedinfo
            titulo = scrapertools.decodeHtmlentities(titulo)
            scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
            scrapedthumbnail = urllib.unquote(
                re.sub(r'&amp;b=4|/go\.php\?u=', '', scrapedthumbnail))
            if not scrapedthumbnail.startswith("http"):
                scrapedthumbnail = "http:" + scrapedthumbnail
            scrapedthumbnail = scrapedthumbnail.replace("-129x180", "")
            scrapedthumbnail = scrapedthumbnail.rsplit("/", 1)[0] + "/" + \
                               urllib.quote(scrapedthumbnail.rsplit("/", 1)[1])
            if "series" in item.url or "anime" in item.url:
                item.show = scrapedtitle
            itemlist.append(
                item.clone(action="episodios",
                           title=titulo,
                           url=scrapedurl,
                           thumbnail=scrapedthumbnail,
                           fulltitle=scrapedtitle,
                           contentTitle=scrapedtitle,
                           contentType="tvshow"))
    else:
        patron = '<a class="clip-link".*?href="([^"]+)".*?<img alt="([^"]+)" src="([^"]+)"' \
                 '.*?<span class="overlay.*?>(.*?)<.*?<p class="stats">(.*?)</p>'
        matches = scrapertools.find_multiple_matches(bloque, patron)
        for scrapedurl, scrapedtitle, scrapedthumbnail, info, categoria in matches:
            scrapedurl = urllib.unquote(
                re.sub(r'&amp;b=4|/go\.php\?u=', '', scrapedurl))
            titulo = scrapertools.decodeHtmlentities(scrapedtitle)
            scrapedtitle = scrapertools.decodeHtmlentities(
                scrapedtitle.split("[")[0])
            action = "findvideos"
            show = ""
            if "Series" in categoria:
                action = "episodios"
                show = scrapedtitle
            elif categoria and categoria != "Películas" and categoria != "Documentales":
                try:
                    titulo += " [%s]" % categoria.rsplit(", ", 1)[1]
                except:
                    titulo += " [%s]" % categoria
                if 'l-espmini' in info:
                    titulo += " [ESP]"
                if 'l-latmini' in info:
                    titulo += " [LAT]"
                if 'l-vosemini' in info:
                    titulo += " [VOSE]"

            if info:
                titulo += " [%s]" % unicode(
                    info, "utf-8").capitalize().encode("utf-8")

            scrapedthumbnail = urllib.unquote(
                re.sub(r'&amp;b=4|/go\.php\?u=', '', scrapedthumbnail))
            if not scrapedthumbnail.startswith("http"):
                scrapedthumbnail = "http:" + scrapedthumbnail
            scrapedthumbnail = scrapedthumbnail.replace("-129x180", "")
            scrapedthumbnail = scrapedthumbnail.rsplit("/", 1)[0] + "/" + \
                               urllib.quote(scrapedthumbnail.rsplit("/", 1)[1])

            itemlist.append(
                item.clone(action=action,
                           title=titulo,
                           url=scrapedurl,
                           thumbnail=scrapedthumbnail,
                           fulltitle=scrapedtitle,
                           contentTitle=scrapedtitle,
                           viewmode="movie_with_plot",
                           show=show,
                           contentType="movie"))

    # Paginación
    next_page = scrapertools.find_single_match(
        data, '<a class="nextpostslink".*?href="([^"]+)"')
    if next_page:
        next_page = urllib.unquote(
            re.sub(r'&amp;b=4|/go\.php\?u=', '', next_page))
        itemlist.append(
            item.clone(title=">> Siguiente", url=next_page, text_color=color3))

    return itemlist
def episodios(item):
    logger.info("streamondemand.mondolunatico episodios")

    itemlist = []

    # Carica la pagina 
    data = httptools.downloadpage(item.url).data

    html = []

    for i in range(2):
        patron = 'href="(https?://www\.keeplinks\.co/p92/([^"]+))"'
        matches = re.compile(patron, re.DOTALL).findall(data)
        for keeplinks, id in matches:
            _headers = [['Cookie', 'flag[' + id + ']=1; defaults=1; nopopatall=' + str(int(time.time()))],
                        ['Referer', keeplinks]]

            html.append(httptools.downloadpage(keeplinks, headers=_headers).data)

        patron = r'="(%s/pass/index\.php\?ID=[^"]+)"' % host
        matches = re.compile(patron, re.DOTALL).findall(data)
        for scrapedurl in matches:
            tmp = httptools.downloadpage(scrapedurl).data

            if 'CaptchaSecurityImages.php' in tmp:
                # Descarga el captcha
                img_content = httptools.downloadpage(captcha_url).data

                captcha_fname = os.path.join(config.get_data_path(), __channel__ + "captcha.img")
                with open(captcha_fname, 'wb') as ff:
                    ff.write(img_content)

                from platformcode import captcha

                keyb = captcha.Keyboard(heading='', captcha=captcha_fname)
                keyb.doModal()
                if keyb.isConfirmed():
                    captcha_text = keyb.getText()
                    post_data = urllib.urlencode({'submit1': 'Invia', 'security_code': captcha_text})
                    tmp = httptools.downloadpage(scrapedurl, post=post_data).data

                try:
                    os.remove(captcha_fname)
                except:
                    pass

            html.append(tmp)

        data = '\n'.join(html)

    encontrados = set()

    patron = '<p><a href="([^"]+?)">([^<]+?)</a></p>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for scrapedurl, scrapedtitle in matches:
        scrapedtitle = scrapedtitle.split('/')[-1]
        if not scrapedtitle or scrapedtitle in encontrados: continue
        encontrados.add(scrapedtitle)
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        itemlist.append(
            Item(channel=__channel__,
                 extra=item.extra,
                 action="findvideos",
                 title=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=item.thumbnail,
                 fulltitle=item.fulltitle,
                 show=item.show))

    patron = '<a href="([^"]+)" target="_blank" class="selecttext live">([^<]+)</a>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for scrapedurl, scrapedtitle in matches:
        scrapedtitle = scrapedtitle.split('/')[-1]
        if not scrapedtitle or scrapedtitle in encontrados: continue
        encontrados.add(scrapedtitle)
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        itemlist.append(
            Item(channel=__channel__,
                 extra=item.extra,
                 action="findvideos",
                 title=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=item.thumbnail,
                 fulltitle=item.fulltitle,
                 show=item.show))

    return itemlist
コード例 #34
0
def peliculas_tv(item):
    logger.info("streamondemand.filmstream peliculas")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url)

    # Extrae las entradas (carpetas)
    patron = '<div class="galleryitem".*?>\s*'
    patron += '<a href="?([^>"]+)"?.*?title="?([^>"]+)"?.*?<img.*?src="([^>"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
        html = scrapertools.cache_page(scrapedurl)
        start = html.find("</strong></p>")
        end = html.find("<p>&nbsp;</p>", start)
        scrapedplot = html[start:end]
        scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
        scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        scrapedtitle = scrapedtitle.replace("Streaming", "")
        scrapedtitle = scrapedtitle.replace("(Serie Tv)", "{Serie Tv}")
        scrapedtitle = scrapedtitle.replace("(Serie TV)", "{Serie Tv}")
        scrapedtitle = scrapedtitle.replace("(Tv)", "{Tv}")
        scrapedtitle = scrapertools.decodeHtmlentities(
            scrapedtitle.replace("(Miniserie Tv)", "{Miniserie Tv}"))
        if scrapedtitle.startswith("Permanent Link to "):
            scrapedtitle = scrapedtitle[18:]
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="episodios",
                         fulltitle=scrapedtitle,
                         show=scrapedtitle,
                         title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         plot=scrapedplot,
                         extra=item.extra,
                         folder=True),
                    tipo='tv'))

    # Extrae el paginador
    patronvideos = '<li><a href="([^"]+)">&gt;</a></li>'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Torna Home[/COLOR]",
                 folder=True)),
        itemlist.append(
            Item(
                channel=__channel__,
                action="peliculas_tv",
                title="[COLOR orange]Successivo >>[/COLOR]",
                url=scrapedurl,
                thumbnail=
                "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                extra=item.extra,
                folder=True))

    return itemlist
コード例 #35
0
def findvid_film(item):
    logger.info("[cineblog01.py] findvideos")

    itemlist = []

    # Descarga la página
    data = scrapertools.anti_cloudflare(item.url, headers)
    data = scrapertools.decodeHtmlentities(data).replace(
        'http://cineblog01.pw', 'http://k4pp4.pw')

    # Extract the quality format
    patronvideos = '>([^<]+)</strong></div>'
    matches = re.compile(patronvideos, re.DOTALL).finditer(data)
    QualityStr = ""
    for match in matches:
        QualityStr = scrapertools.unescape(match.group(1))[6:]

    # Extrae las entradas
    streaming = scrapertools.find_single_match(
        data, '<strong>Streaming:</strong>(.*?)<table height="30">')
    patron = '<td><a href="([^"]+)" target="_blank">([^<]+)</a></td>'
    matches = re.compile(patron, re.DOTALL).findall(streaming)
    for scrapedurl, scrapedtitle in matches:
        print "##### findvideos Streaming ## %s ## %s ##" % (scrapedurl,
                                                             scrapedtitle)
        title = "[COLOR orange]Streaming:[/COLOR] " + item.title + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]"
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 title=title,
                 url=scrapedurl,
                 fulltitle=item.fulltitle,
                 thumbnail=item.thumbnail,
                 show=item.show,
                 folder=False))

    streaming_hd = scrapertools.find_single_match(
        data, '<strong>Streaming HD[^<]+</strong>(.*?)<table height="30">')
    patron = '<td><a href="([^"]+)" target="_blank">([^<]+)</a></td>'
    matches = re.compile(patron, re.DOTALL).findall(streaming_hd)
    for scrapedurl, scrapedtitle in matches:
        print "##### findvideos Streaming HD ## %s ## %s ##" % (scrapedurl,
                                                                scrapedtitle)
        title = "[COLOR yellow]Streaming HD:[/COLOR] " + item.title + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]"
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 title=title,
                 url=scrapedurl,
                 fulltitle=item.fulltitle,
                 thumbnail=item.thumbnail,
                 show=item.show,
                 folder=False))

    streaming_3D = scrapertools.find_single_match(
        data, '<strong>Streaming 3D[^<]+</strong>(.*?)<table height="30">')
    patron = '<td><a href="([^"]+)" target="_blank">([^<]+)</a></td>'
    matches = re.compile(patron, re.DOTALL).findall(streaming_3D)
    for scrapedurl, scrapedtitle in matches:
        print "##### findvideos Streaming 3D ## %s ## %s ##" % (scrapedurl,
                                                                scrapedtitle)
        title = "[COLOR pink]Streaming 3D:[/COLOR] " + item.title + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]"
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 title=title,
                 url=scrapedurl,
                 fulltitle=item.fulltitle,
                 thumbnail=item.thumbnail,
                 show=item.show,
                 folder=False))

    download = scrapertools.find_single_match(
        data, '<strong>Download:</strong>(.*?)<table height="30">')
    patron = '<td><a href="([^"]+)" target="_blank">([^<]+)</a></td>'
    matches = re.compile(patron, re.DOTALL).findall(download)
    for scrapedurl, scrapedtitle in matches:
        print "##### findvideos Download ## %s ## %s ##" % (scrapedurl,
                                                            scrapedtitle)
        title = "[COLOR aqua]Download:[/COLOR] " + item.title + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]"
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 title=title,
                 url=scrapedurl,
                 fulltitle=item.fulltitle,
                 thumbnail=item.thumbnail,
                 show=item.show,
                 folder=False))

    download_hd = scrapertools.find_single_match(
        data,
        '<strong>Download HD[^<]+</strong>(.*?)<table width="100%" height="20">'
    )
    patron = '<td><a href="([^"]+)" target="_blank">([^<]+)</a></td>'
    matches = re.compile(patron, re.DOTALL).findall(download_hd)
    for scrapedurl, scrapedtitle in matches:
        print "##### findvideos Download HD ## %s ## %s ##" % (scrapedurl,
                                                               scrapedtitle)
        title = "[COLOR azure]Download HD:[/COLOR] " + item.title + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]"
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 title=title,
                 url=scrapedurl,
                 fulltitle=item.fulltitle,
                 thumbnail=item.thumbnail,
                 show=item.show,
                 folder=False))

    if len(itemlist) == 0:
        itemlist = servertools.find_video_items(item=item)

    return itemlist
コード例 #36
0
def peliculas(item):
    logger.info("streamondemand.playcinema peliculas")
    itemlist = []

    data = scrapertools.anti_cloudflare(item.url, headers)

    # ------------------------------------------------
    cookies = ""
    matches = config.get_cookie_data(item.url).splitlines()[4:]
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    # Extrae las entradas (carpetas)
    patron = '<div class="moviefilm">\s*'
    patron += '<a href="([^"]+)">\s*'
    patron += '<img src="([^"]+)" alt="([^"]+)"[^>]+></a>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        # response = urllib2.urlopen(scrapedurl)
        # html = response.read()
        # start = html.find("<div class=\"filmicerik\">")
        # start = html.find("<p><span style=\"font-family: Arial, Helvetica, sans-serif;\">")
        # end = html.find("<span style=\"font-size: xx-small;\">+Info", start)
        # end = html.find("</font></a><br />", start)
        # scrapedplot = html[start:end]
        # scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
        # scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        scrapedplot = ""
        scrapedtitle = scrapertools.decodeHtmlentities(
            scrapedtitle.replace("Streaming", ""))
        scrapedthumbnail += '|' + _headers
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="findvideos",
                         fulltitle=scrapedtitle,
                         show=scrapedtitle,
                         title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         plot=scrapedplot,
                         folder=True),
                    tipo='movie'))

    # Extrae el paginador
    patronvideos = '<a class="nextpostslink" rel="next" href="([^"]+)">&raquo;</a>'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Torna Home[/COLOR]",
                 folder=True)),
        itemlist.append(
            Item(
                channel=__channel__,
                action="peliculas",
                title="[COLOR orange]Successivo >>[/COLOR]",
                url=scrapedurl,
                thumbnail=
                "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                folder=True))

    return itemlist
コード例 #37
0
def fichas(item):
    logger.info("[hdgratis.py] fichas")

    itemlist = []

    # Descarga la pagina
    data = scrapertools.anti_cloudflare(item.url, headers)
    # fix - calidad

    # ------------------------------------------------
    cookies = ""
    matches = re.compile('(.altadefinizione.black.*?)\n',
                         re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    if "/?s=" in item.url:
        patron = '<div class="col-lg-3 col-md-3 col-xs-3">\s*<a href="([^"]+")>\s*<div class="wrapperImage">[^i]+i[^s]+src="([^"]+)"[^>]+> <div class="info">\s*<h5[^>]+>(.*?)<'
    else:
        patron = '<span class="hd">HD</span>\s*<a href="([^"]+)"><img[^s]+src="([^"]+)"[^>]+></a> <div class="info">\s*<[^>]+>[^>]+>(.*?)</a>'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:

        title = scrapertools.decodeHtmlentities(scrapedtitle)

        # ------------------------------------------------
        scrapedthumbnail += "|" + _headers
        # ------------------------------------------------

        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="findvideos",
                         title=title,
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         fulltitle=title,
                         show=scrapedtitle),
                    tipo='movie'))

    # Paginación
    next_page = re.compile('<link rel="next" href="(.+?)"/>',
                           re.DOTALL).findall(data)
    for page in next_page:
        next_page = page

    if next_page != "":
        itemlist.append(
            Item(
                channel=__channel__,
                action="fichas",
                title="[COLOR orange]Successivo >>[/COLOR]",
                url=next_page,
                thumbnail=
                "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"
            ))

    return itemlist
コード例 #38
0
def lista(item):
    logger.info("pelisalacarta.channels.pelisplus lista")
    if item.extra == 'series/':
        accion = 'temporadas'

    else:
        accion = 'findvideos'

    itemlist = []
    data = scrapertools.cache_page(item.url)

    if item.title != 'Buscar':
        patron = '<img.*?width="147" heigh="197".*?src="([^"]+)".*?>.*?.<i class="icon online-play"><\/i>.*?.<h2 class="title title-.*?">.*?.<a href="([^"]+)" title="([^"]+)">.*?>'
    else:
        patron = '<img data-original="([^"]+)".*?width="147" heigh="197".*?src=.*?>.*?\n<i class="icon online-play"><\/i>.*?\n<h2 class="title title-.*?">.*?\n<a href="([^"]+)" title="([^"]+)">.*?>'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
        url = scrapedurl
        title = scrapertools.decodeHtmlentities(scrapedtitle)
        thumbnail = scrapedthumbnail
        plot = ''
        fanart = ''
        if item.extra != 'series/':
            datab = scrapertools.cache_page(scrapedurl)
            fanart = scrapertools.find_single_match(
                datab, '<meta property="og:image" content="([^"]+)" \/>')
            plot = scrapertools.find_single_match(
                datab,
                '<span>Sinopsis:<\/span>.([^<]+)<span class="text-detail-hide"><\/span>.<\/p>'
            )
        if (DEBUG):
            logger.info("title=[" + title + "], url=[" + url +
                        "], thumbnail=[" + thumbnail + "])")

        if item.title != 'Buscar':
            itemlist.append(
                Item(channel=item.channel,
                     action=accion,
                     title=title,
                     url=url,
                     thumbnail=thumbnail,
                     plot=plot,
                     fanart=fanart))
        else:
            item.extra = item.extra.rstrip('s/')
            if item.extra in url:
                itemlist.append(
                    Item(channel=item.channel,
                         action=accion,
                         title=title,
                         url=url,
                         thumbnail=thumbnail,
                         plot=plot,
                         fanart=fanart))


#Paginacion
    if item.title != 'Buscar':
        actual = scrapertools.find_single_match(
            data,
            '<a href="http:\/\/www.pelisplus.tv\/.*?\/pag-([^p]+)pag-2" class="page bicon last"><<\/a>'
        )
        if itemlist != []:
            next_page = str(int(actual) + 1)
            next_page_url = host + item.extra + 'pag-' + next_page
            import inspect
            itemlist.append(
                Item(
                    channel=item.channel,
                    action="lista",
                    title='Siguiente >>>',
                    url=next_page_url,
                    thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png',
                    extra=item.extra))
    return itemlist
コード例 #39
0
def peliculas_update(item):
    logger.info("streamondemand-pureita.filmpertutti peliculas")
    itemlist = []

    numpage = 14
	
    p = 1
    if '{}' in item.url:
        item.url, p = item.url.split('{}')
        p = int(p)
	
    # Descarga la pagina
    data = httptools.downloadpage(item.url).data

    # Extrae las entradas (carpetas)
    patron = '<li><a\s*href="([^\/]+\/\/[^\/]+\/([^"]+))" data-\s*thumbnail="([^"]+)">'
    patron += '<div>\s*<div class="title">(.*?)<\/div>\s*<div class="episode"[^>]+>(.*?)<\/div>'
    matches = re.compile(patron, re.DOTALL).findall(data)


    for i, (scrapedurl, titolo, scrapedthumbnail, scrapedtitle, episode) in enumerate(matches):

        if (p - 1) * numpage > i: continue
        if i >= p * numpage: break

        if scrapedtitle=="":
           scrapedtitle=titolo.title()
		   
        episode = episode.replace("<br>", " ")
		
        scrapedtitle = scrapedtitle.replace("<br>", " ").replace("&amp;", "e")
        scrapedtitle = scrapedtitle.replace("-", " ").replace("6", "")
        scrapedtitle = scrapedtitle.replace("/", " ").replace("Serie Tv", "")
        scrapedtitle = scrapedtitle.replace("serie tv", "").replace("Serie TV", "")
        scrapedtitle = scrapedtitle.replace("SERIE TV", "")

        scrapedtitle = scrapedtitle.strip()
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)

        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="episodios",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title="[COLOR azure]" + scrapedtitle + " ([COLOR yellow]" + episode + "[/COLOR])",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot="",
                 extra=item.extra,
                 folder=True), tipo='tv'))

    # Extrae el paginador
    if len(matches) >= p * numpage:
        scrapedurl = item.url + '{}' + str(p + 1)
        itemlist.append(
            Item(channel=__channel__,
                 extra=item.extra,
                 action="peliculas_update",
                 title="[COLOR orange]Successivi >>[/COLOR]",
                 url=scrapedurl,
                 thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png",
                 folder=True))

    return itemlist
コード例 #40
0
def episodios(item):
    def load_episodios(html, item, itemlist, lang_title):
        patron = '.*?<a href="[^"]+"[^o]+ofollow[^>]+>[^<]+</a><(?:b|/)[^>]+>'
        matches = re.compile(patron).findall(html)
        for data in matches:
            # Estrae i contenuti 
            scrapedtitle = data.split('<a ')[0]
            scrapedtitle = re.sub(r'<[^>]*>', '', scrapedtitle).strip()
            if scrapedtitle != 'Categorie':
                scrapedtitle = scrapedtitle.replace('&#215;', 'x')
                scrapedtitle = scrapedtitle.replace('×', 'x')
                scrapedtitle = scrapedtitle.replace(';', '')
                itemlist.append(
                    Item(channel=__channel__,
                         action="findvideos",
                         contentType="episode",
                         title="[COLOR azure]%s[/COLOR]" % (scrapedtitle + " (" + lang_title + ")"),
                         url=data,
                         thumbnail=item.thumbnail,
                         extra=item.extra,
                         fulltitle=scrapedtitle + " (" + lang_title + ")" + ' - ' + item.show,
                         show=item.show))

    logger.info("[casacinema.py] episodios")

    itemlist = []

    # Carica la pagina 
    data = httptools.downloadpage(item.url).data
    data = scrapertools.decodeHtmlentities(data)
    data = scrapertools.get_match(data, '<p>(?:<strong>|)(.*?)<div id="disqus_thread">')

    lang_titles = []
    starts = []
    patron = r"Stagione.*?(?:ITA|\d+)"
    matches = re.compile(patron, re.IGNORECASE).finditer(data)
    for match in matches:
        season_title = match.group()
        if season_title != '':
            lang_titles.append('SUB ITA' if 'SUB' in season_title.upper() else 'ITA')
            starts.append(match.end())

    i = 1
    len_lang_titles = len(lang_titles)

    while i <= len_lang_titles:
        inizio = starts[i - 1]
        fine = starts[i] if i < len_lang_titles else -1

        html = data[inizio:fine]
        lang_title = lang_titles[i - 1]

        load_episodios(html, item, itemlist, lang_title)

        i += 1

    if config.get_library_support() and len(itemlist) != 0:
        itemlist.append(
            Item(channel=__channel__,
                 title="Aggiungi alla libreria",
                 url=item.url,
                 action="add_serie_to_library",
                 extra="episodios" + "###" + item.extra,
                 show=item.show))

    return itemlist
コード例 #41
0
def findvid_film(item):
    logger.info("[cineblog01.py] findvid_film")

    itemlist = []

    # Descarga la página
    data = httptools.downloadpage(item.url, headers=headers).data
    data = scrapertools.decodeHtmlentities(data)

    # Extract the quality format
    patronvideos = '>([^<]+)</strong></div>'
    matches = re.compile(patronvideos, re.DOTALL).finditer(data)
    QualityStr = ""
    for match in matches:
        QualityStr = scrapertools.unescape(match.group(1))[6:]

    # STREAMANGO
    matches = []
    u = scrapertools.find_single_match(
        data, '(?://|\.)streamango\.com/(?:f/|embed/)?[0-9a-zA-Z]+')
    if u: matches.append((u, 'Streamango'))

    # Extrae las entradas
    streaming = scrapertools.find_single_match(
        data, '<strong>Streaming:</strong>(.*?)<table height="30">')
    patron = '<td><a[^h]href="([^"]+)"[^>]+>([^<]+)<'
    matches = re.compile(patron, re.DOTALL).findall(streaming) + matches
    for scrapedurl, scrapedtitle in matches:
        logger.debug("##### findvideos Streaming ## %s ## %s ##" %
                     (scrapedurl, scrapedtitle))
        title = "[COLOR orange]Streaming:[/COLOR] " + item.title + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]"
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 title=title,
                 url=scrapedurl,
                 fulltitle=item.fulltitle,
                 thumbnail=item.thumbnail,
                 show=item.show,
                 folder=False))

    streaming_hd = scrapertools.find_single_match(
        data, '<strong>Streaming HD[^<]+</strong>(.*?)<table height="30">')
    patron = '<td><a[^h]href="([^"]+)"[^>]+>([^<]+)<'
    matches = re.compile(patron, re.DOTALL).findall(streaming_hd)
    for scrapedurl, scrapedtitle in matches:
        logger.debug("##### findvideos Streaming HD ## %s ## %s ##" %
                     (scrapedurl, scrapedtitle))
        title = "[COLOR yellow]Streaming HD:[/COLOR] " + item.title + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]"
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 title=title,
                 url=scrapedurl,
                 fulltitle=item.fulltitle,
                 thumbnail=item.thumbnail,
                 show=item.show,
                 folder=False))

    streaming_3D = scrapertools.find_single_match(
        data, '<strong>Streaming 3D[^<]+</strong>(.*?)<table height="30">')
    patron = '<td><a[^h]href="([^"]+)"[^>]+>([^<]+)<'
    matches = re.compile(patron, re.DOTALL).findall(streaming_3D)
    for scrapedurl, scrapedtitle in matches:
        logger.debug("##### findvideos Streaming 3D ## %s ## %s ##" %
                     (scrapedurl, scrapedtitle))
        title = "[COLOR pink]Streaming 3D:[/COLOR] " + item.title + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]"
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 title=title,
                 url=scrapedurl,
                 fulltitle=item.fulltitle,
                 thumbnail=item.thumbnail,
                 show=item.show,
                 folder=False))

    download = scrapertools.find_single_match(
        data, '<strong>Download:</strong>(.*?)<table height="30">')
    patron = '<td><a[^h]href="([^"]+)"[^>]+>([^<]+)<'
    matches = re.compile(patron, re.DOTALL).findall(download)
    for scrapedurl, scrapedtitle in matches:
        logger.debug("##### findvideos Download ## %s ## %s ##" %
                     (scrapedurl, scrapedtitle))
        title = "[COLOR aqua]Download:[/COLOR] " + item.title + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]"
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 title=title,
                 url=scrapedurl,
                 fulltitle=item.fulltitle,
                 thumbnail=item.thumbnail,
                 show=item.show,
                 folder=False))

    download_hd = scrapertools.find_single_match(
        data,
        '<strong>Download HD[^<]+</strong>(.*?)<table width="100%" height="20">'
    )
    patron = '<td><a[^h]href="([^"]+)"[^>]+>([^<]+)<'
    matches = re.compile(patron, re.DOTALL).findall(download_hd)
    for scrapedurl, scrapedtitle in matches:
        logger.debug("##### findvideos Download HD ## %s ## %s ##" %
                     (scrapedurl, scrapedtitle))
        title = "[COLOR azure]Download HD:[/COLOR] " + item.title + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]"
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 title=title,
                 url=scrapedurl,
                 fulltitle=item.fulltitle,
                 thumbnail=item.thumbnail,
                 show=item.show,
                 folder=False))

    if len(itemlist) == 0:
        itemlist = servertools.find_video_items(item=item)

    return itemlist
コード例 #42
0
def episodios(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = data.replace("\n", "").replace("\t", "")

    plot = scrapertools.find_single_match(data, '<p><p>(.*?)</p>')
    item.plot = scrapertools.htmlclean(plot)
    bloque = scrapertools.find_multiple_matches(
        data, '<td data-th="Temporada"(.*?)</div>')
    for match in bloque:
        matches = scrapertools.find_multiple_matches(
            match, '.*?href="([^"]+)".*?title="([^"]+)"')
        for scrapedurl, scrapedtitle in matches:
            try:
                season, episode = scrapertools.find_single_match(
                    scrapedtitle, '(\d+)(?:×|x)(\d+)')
                item.infoLabels['season'] = season
                item.infoLabels['episode'] = episode
                contentType = "episode"
            except:
                try:
                    episode = scrapertools.find_single_match(
                        scrapedtitle,
                        '(?i)(?:Capitulo|Capítulo|Episodio)\s*(\d+)')
                    item.infoLabels['season'] = "1"
                    item.infoLabels['episode'] = episode
                    contentType = "episode"
                except:
                    contentType = "tvshow"

            scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle) + "  "
            scrapedtitle = scrapedtitle.replace('Temporada', '')
            if "ES.png" in match:
                scrapedtitle += "[CAST]"
            if "SUB.png" in match:
                scrapedtitle += "[VOSE]"
            if "LA.png" in match:
                scrapedtitle += "[LAT]"
            if "EN.png" in match:
                scrapedtitle += "[V.O]"

            itemlist.append(
                item.clone(action="findvideos",
                           title=scrapedtitle,
                           url=scrapedurl,
                           fulltitle=scrapedtitle,
                           contentType=contentType))

    itemlist.reverse()
    if itemlist and item.extra != "episodios":
        try:
            from core import tmdb
            tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
        except:
            pass
        itemlist.append(
            item.clone(channel="trailertools",
                       title="Buscar Tráiler",
                       action="buscartrailer",
                       context="",
                       text_color="magenta"))
        if item.category != "" and config.get_library_support():
            itemlist.append(
                Item(channel=item.channel,
                     title="Añadir esta temporada a la biblioteca",
                     url=item.url,
                     action="add_serie_to_library",
                     extra="episodios",
                     text_color="green",
                     show=item.show))

    return itemlist
コード例 #43
0
def lista(item):
    logger.info()

    itemlist = []

    if 'series/' in item.extra:
        accion = 'temporadas'
        tipo = 'tvshow'
    else:
        accion = 'findvideos'
        tipo = 'movie'

    data = httptools.downloadpage(item.url).data

    if item.title != 'Buscar':
        patron = '<img.*?width="147" heigh="197".*?src="([^"]+)".*?>.*?.<i class="icon online-play"><\/i>.*?.<h2 class="title title-.*?">.*?.<a href="([^"]+)" title="([^"]+)">.*?>'
        actual = scrapertools.find_single_match(
            data,
            '<a href="http:\/\/www.pelisplus.tv\/.*?\/pag-([^p]+)pag-2" class="page bicon last"><<\/a>'
        )
    else:
        patron = '<img data-original="([^"]+)".*?width="147" heigh="197".*?src=.*?>.*?\n<i class="icon online-play"><\/i>.*?\n<h2 class="title title-.*?">.*?\n<a href="([^"]+)" title="([^"]+)">.*?>'
        actual = ''

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
        url = scrapedurl
        title = scrapertools.decodeHtmlentities(scrapedtitle)
        thumbnail = scrapedthumbnail

        filtro_thumb = scrapedthumbnail.replace(
            "https://image.tmdb.org/t/p/w154", "")
        filtro_list = {
            "poster_path": filtro_thumb
        }  #Nombre del campo a filtrar y valor en los resultados de la api de tmdb
        filtro_list = filtro_list.items()

        if item.title != 'Buscar':
            itemlist.append(
                Item(channel=item.channel,
                     contentType=tipo,
                     action=accion,
                     title=scrapedtitle,
                     url=scrapedurl,
                     thumbnail=scrapedthumbnail,
                     fulltitle=scrapedtitle,
                     infoLabels={'filtro': filtro_list},
                     contentTitle=scrapedtitle,
                     contentSerieName=scrapedtitle,
                     extra=item.extra))
        else:
            item.extra = item.extra.rstrip('s/')
            if item.extra in url:
                itemlist.append(
                    Item(channel=item.channel,
                         contentType=tipo,
                         action=accion,
                         title=scrapedtitle,
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         fulltitle=scrapedtitle,
                         infoLabels={'filtro': filtro_list},
                         contentTitle=scrapedtitle,
                         contentSerieName=scrapedtitle,
                         extra=item.extra))

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    # Encuentra los elementos que no tienen plot y carga las paginas correspondientes para obtenerlo#
    for item in itemlist:
        if item.infoLabels['plot'] == '':
            data = httptools.downloadpage(item.url).data
            item.fanart = scrapertools.find_single_match(
                data, 'meta property="og:image" content="([^"]+)" \/>')
            item.plot = scrapertools.find_single_match(
                data,
                '<span>Sinopsis:<\/span>.([^<]+)<span class="text-detail-hide"><\/span>.<\/p>'
            )


#Paginacion
    if item.title != 'Buscar' and actual != '':
        if itemlist != []:
            next_page = str(int(actual) + 1)
            next_page_url = host + item.extra + 'pag-' + next_page
            import inspect
            itemlist.append(
                Item(
                    channel=item.channel,
                    action="lista",
                    title='Siguiente >>>',
                    url=next_page_url,
                    thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png',
                    extra=item.extra))
    return itemlist
コード例 #44
0
def peliculas(item):
    logger.info("streamondemand.streaming01 peliculas")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url)

    # Extrae las entradas (carpetas)
    patron = '<a class="short-img" href="([^"]+)"[^>]+>\s*'
    patron += '<img src="([^"]+)"[^>]+>\s*'
    patron += '</a>\s*'
    patron += '<div[^>]+>\s*'
    patron += '<h3>[^>]+>(.*?)</a>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        #		COMMENTING PLOT LINES BECAUSE CHANNELS' TOO SLOW
        #       html = scrapertools.cache_page(scrapedurl)
        #       start = html.find("<div class=\"full-text clearfix desc-text\">")
        #       end = html.find("<table>", start)
        #       scrapedplot = html[start:end]
        #       scrapedplot = re.sub(r'<.*?>', '', scrapedplot)
        #       scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        scrapedtitle = scrapedtitle.replace("Streaming ", "")
        scrapedtitle = scrapedtitle.replace(" e download", "")
        scrapedtitle = scrapedtitle.replace("gratis", "")
        scrapedtitle = scrapedtitle.replace("streaming", "")
        scrapedtitle = scrapedtitle.replace("ita", "")
        scrapedtitle = scrapedtitle.replace("ITA", "")
        scrapedtitle = scrapedtitle.replace("download", "")
        scrapedtitle = scrapedtitle.replace("GRATIS", "")
        scrapedtitle = scrapedtitle.replace("[", "")
        scrapedtitle = scrapedtitle.replace("]", "")
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        if DEBUG:
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="findvideos",
                         fulltitle=scrapedtitle,
                         show=scrapedtitle,
                         title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         folder=True),
                    tipo='movie'))

    # Extrae el paginador
    patronvideos = '<span class="pnext"><a href="([^"]+)">Avanti</a></span>'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Torna Home[/COLOR]",
                 folder=True)),
        itemlist.append(
            Item(
                channel=__channel__,
                action="peliculas",
                title="[COLOR orange]Successivo >>[/COLOR]",
                url=scrapedurl,
                thumbnail=
                "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                folder=True))

    return itemlist
コード例 #45
0
def novita(item):
    logger.info("[cb01anime.py] mainlist")
    itemlist = []

    # Descarga la página
    data = scrapertools.anti_cloudflare(item.url, headers)

    # Extrae las entradas (carpetas)
    patronvideos = '<div class="span4"> <a.*?<img src="(.*?)".*?'
    patronvideos += '<div class="span8">.*?<a href="(.*?)">.*?'
    patronvideos += '<h1>(.*?)</h1></a>.*?<br />(.*?)<br>.*?'
    matches = re.compile(patronvideos, re.DOTALL).finditer(data)

    for match in matches:
        scrapedthumbnail = match.group(1)
        scrapedurl = match.group(2)
        scrapedtitle = scrapertools.unescape(match.group(3))
        scrapedplot = scrapertools.unescape(match.group(4))
        scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        if scrapedtitle.startswith(("Lista Richieste Up & Re-Up")):
            continue
        if scrapedplot.startswith(""):
            scrapedplot = scrapedplot[64:]
        if DEBUG:
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")

        ## ------------------------------------------------
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
        ## ------------------------------------------------

        # Añade al listado de XBMC
        itemlist.append(
            Item(channel=__channel__,
                 action="listacompleta" if scrapedtitle
                 == "Lista Alfabetica Completa Anime/Cartoon" else "episodios",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 viewmode="movie_with_plot",
                 plot=scrapedplot))

    # Put the next page mark
    try:
        next_page = scrapertools.get_match(data,
                                           "<link rel='next' href='([^']+)'")
        itemlist.append(
            Item(
                channel=__channel__,
                action="HomePage",
                title="[COLOR yellow]Torna Home[/COLOR]",
                thumbnail=
                "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/return_home_P.png",
                folder=True)),
        itemlist.append(
            Item(
                channel=__channel__,
                action="novita",
                title="[COLOR orange]Successivo>>[/COLOR]",
                url=next_page,
                thumbnail=
                "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/successivo_P.png"
            ))
    except:
        pass

    return itemlist
コード例 #46
0
def peliculas_tv(item):
    logger.info("[italiafilm.py] peliculas")
    itemlist = []

    data = scrapertools.anti_cloudflare(item.url, headers)
    patron = '<article(.*?)</article>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for match in matches:
        title = scrapertools.find_single_match(
            match, '<h3[^<]+<a href="[^"]+"[^<]+>([^<]+)</a>')
        title = title.replace("Streaming", "")
        title = scrapertools.decodeHtmlentities(title).strip()
        show_title = re.sub('\(.*?\)', '', title.replace('Serie TV', ''))
        url = scrapertools.find_single_match(match,
                                             '<h3[^<]+<a href="([^"]+)"')
        plot = ""
        thumbnail = scrapertools.find_single_match(match,
                                                   'data-echo="([^"]+)"')

        if (DEBUG):
            logger.info("title=[" + title + "], url=[" + url +
                        "], thumbnail=[" + thumbnail + "]")

        itemlist.append(
            infoSod(Item(
                channel=__channel__,
                extra=item.extra,
                action='episodios' if item.extra == 'serie' else 'findvideos',
                fulltitle=title,
                show=show_title,
                title="[COLOR azure]" + title + "[/COLOR]",
                url=url,
                thumbnail=thumbnail,
                plot=plot,
                viewmode="movie_with_plot",
                folder=True),
                    tipo='tv'))

    # Siguiente
    try:
        pagina_siguiente = scrapertools.get_match(
            data, '<a class="next page-numbers" href="([^"]+)"')
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Torna Home[/COLOR]",
                 folder=True)),
        itemlist.append(
            Item(
                channel=__channel__,
                action="peliculas_tv",
                extra=item.extra,
                title="[COLOR orange]Successivo >> [/COLOR]",
                url=pagina_siguiente,
                thumbnail=
                "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/successivo_P.png",
                folder=True))
    except:
        pass

    return itemlist
コード例 #47
0
def episodios(item):
    def load_episodios(html, item, itemlist):
        for data in html.splitlines():
            # Extrae las entradas
            end = data.find('<a ')
            if end > 0:
                scrapedtitle = re.sub(r'<[^>]*>', '', data[:end]).strip()
            else:
                scrapedtitle = ''
            if scrapedtitle == '':
                patron = '<a\s*href="[^"]+"(?:\s*target="_blank")?>([^<]+)</a>'
                scrapedtitle = scrapertools.find_single_match(data,
                                                              patron).strip()
            title = scrapertools.find_single_match(scrapedtitle,
                                                   '\d+[^\d]+\d+')
            if title == '':
                title = scrapedtitle
            if title != '':
                itemlist.append(
                    Item(channel=__channel__,
                         action="findvideos",
                         title=title,
                         url=item.url,
                         thumbnail=item.thumbnail,
                         extra=data,
                         fulltitle=item.fulltitle,
                         show=item.show))

    logger.info("streamondemand.tantifilm episodios")

    itemlist = []

    data = scrapertools.cache_page(item.url, headers=headers)
    data = scrapertools.decodeHtmlentities(data)

    start = data.find('<div class="sp-wrap sp-wrap-blue">')
    end = data.find('<div id="disqus_thread">', start)

    data_sub = data[start:end]

    starts = []
    patron = r".*?STAGIONE|MINISERIE|WEBSERIE|SERIE"
    matches = re.compile(patron, re.IGNORECASE).finditer(data_sub)
    for match in matches:
        season_title = match.group()
        if season_title != '':
            starts.append(match.end())

    i = 1
    len_starts = len(starts)

    while i <= len_starts:
        inizio = starts[i - 1]
        fine = starts[i] if i < len_starts else -1

        html = data_sub[inizio:fine]

        load_episodios(html, item, itemlist)

        i += 1

    if len(itemlist) == 0:
        patron = '<a href="(#wpwm-tabs-\d+)">([^<]+)</a></li>'
        seasons_episodes = re.compile(patron, re.DOTALL).findall(data)

        end = None
        for scrapedtag, scrapedtitle in seasons_episodes:
            start = data.find(scrapedtag, end)
            end = data.find('<div class="clearfix"></div>', start)
            html = data[start:end]

            itemlist.append(
                Item(channel=__channel__,
                     action="findvideos",
                     title=scrapedtitle,
                     url=item.url,
                     thumbnail=item.thumbnail,
                     extra=html,
                     fulltitle=item.fulltitle,
                     show=item.show))

    if config.get_library_support() and len(itemlist) != 0:
        itemlist.append(
            Item(channel=__channel__,
                 title=item.title,
                 url=item.url,
                 action="add_serie_to_library",
                 extra="episodios",
                 show=item.show))
        itemlist.append(
            Item(channel=item.channel,
                 title="Scarica tutti gli episodi della serie",
                 url=item.url,
                 action="download_all_episodes",
                 extra="episodios",
                 show=item.show))

    return itemlist
コード例 #48
0
ファイル: cinefoxtv.py プロジェクト: yonvima/addon
def lista(item):
    logger.info()
    itemlist = []
    duplicado = []
    max_items = 24
    next_page_url = ''

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
    data = scrapertools.decodeHtmlentities(data)
    patron = '"box_image_b.*?"><a href="([^"]+)" title=".*?><img src="([^"]+)" alt="(.*?)(\d{4}).*?"'
    matches = re.compile(patron, re.DOTALL).findall(data)

    if item.next_page != 'b':
        if len(matches) > max_items:
            next_page_url = item.url
            matches = matches[:max_items]
            next_page = 'b'
    else:
        matches = matches[max_items:]
        next_page = 'a'
        patron_next_page = '<a class="page dark gradient" href="([^"]+)">PROXIMO'
        matches_next_page = re.compile(patron_next_page,
                                       re.DOTALL).findall(data)
        if len(matches_next_page) > 0:
            next_page_url = urlparse.urljoin(item.url, matches_next_page[0])

    for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:

        url = scrapedurl
        thumbnail = scrapedthumbnail
        contentTitle = re.sub(r"\(.*?\)|\/.*?|\(|\)|.*?\/|&excl;", "",
                              scrapedtitle)
        title = scrapertools.decodeHtmlentities(
            contentTitle) + '(' + scrapedyear + ')'
        fanart = ''
        plot = ''

        if url not in duplicado:
            itemlist.append(
                Item(channel=item.channel,
                     action='findvideos',
                     title=title,
                     url=url,
                     thumbnail=thumbnail,
                     plot=plot,
                     fanart=fanart,
                     contentTitle=contentTitle,
                     infoLabels={'year': scrapedyear}))
            duplicado.append(url)

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
    if next_page_url != '':
        itemlist.append(
            Item(channel=item.channel,
                 action="lista",
                 title='Siguiente >>>',
                 url=next_page_url,
                 thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
                 extra=item.extra,
                 next_page=next_page))
    return itemlist
コード例 #49
0
def peliculas(item):
    logger.info("streamondemand.documentaristreaming peliculas")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url)

    # Extrae las entradas (carpetas)
    patron = '<a class="vw-post-box-thumbnail" href="(.*?)".*?rel="bookmark">.*?<img.*?src="(.*?)"'
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for scrapedurl, scrapedthumbnail in matches:
        #scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
        #scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        scrapedplot = ""
        scrapedtitle = scrapedurl
        scrapedtitle = scrapertools.decodeHtmlentities(
            scrapedtitle.replace("http://documentaristreaming.net/", ""))
        scrapedtitle = scrapertools.decodeHtmlentities(
            scrapedtitle.replace("-", " "))
        scrapedtitle = scrapertools.decodeHtmlentities(
            scrapedtitle.replace("/", ""))
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(
            Item(channel=__channel__,
                 action="findvideos",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 viewmode="movie_with_plot",
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 folder=True))

    # Extrae el paginador
    patronvideos = '<a class="next page-numbers" href="(.*?)">'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Torna Home[/COLOR]",
                 folder=True)),
        itemlist.append(
            Item(
                channel=__channel__,
                action="peliculas",
                title="[COLOR orange]Successivo >>[/COLOR]",
                url=scrapedurl,
                thumbnail=
                "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                folder=True))

    return itemlist
コード例 #50
0
def novedades(item):
    logger.info("[filmsenzalimiti.py] novedades")
    itemlist = []

    # Descarga la página
    data = scrapertools.cache_page(item.url)

    patronvideos = '<div class="post-item-side"[^<]+'
    patronvideos += '<a href="([^"]+)"[^<]+<img src="([^"]+)"'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail in matches:
        html = scrapertools.cache_page(scrapedurl)
        start = html.find("</b></center></div>")
        end = html.find("</p>", start)
        scrapedplot = html[start:end]
        scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
        scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        scrapedtitle = scrapertools.get_filename_from_url(scrapedurl).replace("-", " ").replace("/", "").replace(
                ".html", "").capitalize().strip()
        if (DEBUG): logger.info(
                "title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
        try:
           plot, fanart, poster, extrameta = info(scrapedtitle)

           itemlist.append(
               Item(channel=__channel__,
                    thumbnail=poster,
                    fanart=fanart if fanart != "" else poster,
                    extrameta=extrameta,
                    plot=str(plot),
                    action="episodios" if item.extra == "serie" else "findvideos",
                    title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                    url=scrapedurl,
                    fulltitle=scrapedtitle,
                    show=scrapedtitle,
                    folder=True))
        except:
           itemlist.append(
               Item(channel=__channel__,
                    action="episodios" if item.extra == "serie" else "findvideos",
                    fulltitle=scrapedtitle,
                    show=scrapedtitle,
                    title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                    url=scrapedurl,
                    thumbnail=scrapedthumbnail,
                    plot=scrapedplot,
                    folder=True))

    try:
        next_page = scrapertools.get_match(data, 'class="nextpostslink" rel="next" href="([^"]+)"')
        itemlist.append(
                Item(channel=__channel__,
                     extra=item.extra,
                     action="novedades",
                     title="[COLOR orange]Successivo >>[/COLOR]",
                     url=next_page,
                     thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                     folder=True))
    except:
        pass

    return itemlist
コード例 #51
0
def episodios(item):
    def load_episodios(html, item, itemlist, lang_title):
        patron = '(?:.*?<a href="[^"]+"[^_]+_blank[^>]+>[^>]+>.*?<br \/>)'
        matches = re.compile(patron).findall(html)
        for data in matches:
            # Extrae las entradas
            scrapedtitle = data.split('<a ')[0]
            scrapedtitle = re.sub(r'<[^>]*>', '', scrapedtitle).strip()
            if scrapedtitle != 'Categorie':
                scrapedtitle = scrapedtitle.replace('&#215;', 'x')
                itemlist.append(
                    Item(channel=__channel__,
                         action="findvideos_tv",
                         contentType="episode",
                         title="[COLOR azure]%s[/COLOR]" %
                         (scrapedtitle + " (" + lang_title + ")"),
                         url=data,
                         thumbnail=item.thumbnail,
                         extra=item.extra,
                         fulltitle=scrapedtitle + " (" + lang_title + ")" +
                         ' - ' + item.show,
                         show=item.show))

    logger.info("[streamblog.py] episodios")

    itemlist = []

    # Descarga la página
    data = httptools.downloadpage(item.url, headers=headers).data
    data = scrapertools.decodeHtmlentities(data)
    data = scrapertools.get_match(data, '<left>(.*?)</left>')

    lang_titles = []
    starts = []
    patron = r"Stagione.*?ITA"
    matches = re.compile(patron, re.IGNORECASE).finditer(data)
    for match in matches:
        season_title = match.group()
        if season_title != '':
            lang_titles.append('SUB ITA' if 'SUB' in
                               season_title.upper() else 'ITA')
            starts.append(match.end())

    i = 1
    len_lang_titles = len(lang_titles)

    while i <= len_lang_titles:
        inizio = starts[i - 1]
        fine = starts[i] if i < len_lang_titles else -1

        html = data[inizio:fine]
        lang_title = lang_titles[i - 1]

        load_episodios(html, item, itemlist, lang_title)

        i += 1

    if config.get_library_support() and len(itemlist) != 0:
        itemlist.append(
            Item(channel=__channel__,
                 title="Aggiungi alla libreria",
                 url=item.url,
                 action="add_serie_to_library",
                 extra="episodios",
                 show=item.show))

    return itemlist
コード例 #52
0
def peliculas(item):
    logger.info("streamondemand-pureita.filmstreamingita peliculas")
    itemlist = []

    data = httptools.downloadpage(item.url, headers=headers).data

    patron = r'<div class="home_tall_box">\s*'
    patron += r'<a href="([^"]+)".*?>\s*<img.*?alt="([^"]+)".*?src="([^"]+)">'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
        scrapedtitle = scrapedtitle.replace("’", "'")
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)

        scrapedplot = ""
        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="findvideos",
                         contentType="movie",
                         title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                         fulltitle=scrapedtitle,
                         url=scrapedurl,
                         extra="movie",
                         plot=scrapedplot,
                         show=scrapedtitle,
                         thumbnail=scrapedthumbnail,
                         folder=True),
                    tipo="movie"))

    patron = '<li><a href="([^"]+)" class="next">&raquo;</a></li>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = matches[0]
        itemlist.append(
            Item(
                channel=__channel__,
                action="peliculas",
                title="[COLOR orange]Successivi >>[/COLOR]",
                url=scrapedurl,
                thumbnail=
                "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png",
                extra=item.extra,
                folder=True))

    patron = '<a class="next page-numbers" href="([^"]+)">Successivo &raquo;</a></center>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = matches[0]
        itemlist.append(
            Item(
                channel=__channel__,
                action="peliculas",
                title="[COLOR orange]Successivi >>[/COLOR]",
                url=scrapedurl,
                thumbnail=
                "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png",
                extra=item.extra,
                folder=True))
    return itemlist
コード例 #53
0
def episodios(item):
    def load_episodios(html, item, itemlist, lang_title):
        patron = '((?:.*?<a href="[^"]+" class="external" rel="nofollow" target="_blank">[^<]+</a>)+)'
        matches = re.compile(patron).findall(html)
        for data in matches:
            # Extrae las entradas
            scrapedtitle = data.split('<a ')[0]
            scrapedtitle = re.sub(r'<[^>]*>', '', scrapedtitle).strip()
            if scrapedtitle != 'Categorie':
                itemlist.append(
                        Item(channel=__channel__,
                             action="findvid_serie",
                             title="[COLOR azure]%s[/COLOR]" % (scrapedtitle + " (" + lang_title + ")"),
                             url=item.url,
                             thumbnail=item.thumbnail,
                             extra=data,
                             fulltitle=item.fulltitle,
                             show=item.show))

    logger.info("[filmsenzalimiti.py] episodios")

    itemlist = []

    # Descarga la página
    data = scrapertools.cache_page(item.url)
    data = scrapertools.decodeHtmlentities(data)

    lang_titles = []
    starts = []
    patron = r"STAGIONE.*?ITA"
    matches = re.compile(patron, re.IGNORECASE).finditer(data)
    for match in matches:
        season_title = match.group()
        if season_title != '':
            lang_titles.append('SUB ITA' if 'SUB' in season_title.upper() else 'ITA')
            starts.append(match.end())

    i = 1
    len_lang_titles = len(lang_titles)

    while i <= len_lang_titles:
        inizio = starts[i - 1]
        fine = starts[i] if i < len_lang_titles else -1

        html = data[inizio:fine]
        lang_title = lang_titles[i - 1]

        load_episodios(html, item, itemlist, lang_title)

        i += 1

    if config.get_library_support() and len(itemlist) != 0:
        itemlist.append(
                Item(channel=__channel__,
                     title=item.title,
                     url=item.url,
                     action="add_serie_to_library",
                     extra="episodios",
                     show=item.show))
        itemlist.append(
                Item(channel=__channel__,
                     title="Scarica tutti gli episodi della serie",
                     url=item.url,
                     action="download_all_episodes",
                     extra="episodios",
                     show=item.show))

    return itemlist
コード例 #54
0
ファイル: kbagi.py プロジェクト: yonvima/addon
def listado(item):
    logger.info()
    itemlist = []
    data_thumb = httptools.downloadpage(
        item.url, item.post.replace("Mode=List", "Mode=Gallery")).data
    if not item.post:
        data_thumb = ""
        item.url = item.url.replace("/gallery,", "/list,")
    data = httptools.downloadpage(item.url, item.post).data
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)

    folder = filetools.join(config.get_data_path(), 'thumbs_kbagi')
    patron = 'data-file-id(.*?</p>)</div></div>'
    bloques = scrapertools.find_multiple_matches(data, patron)
    for block in bloques:
        if "adult_info" in block and not adult_content:
            continue
        size = scrapertools.find_single_match(block, '<p.*?>([^<]+)</p>')
        patron = 'class="name"><a href="([^"]+)".*?>([^<]+)<'
        scrapedurl, scrapedtitle = scrapertools.find_single_match(
            block, patron)
        scrapedthumbnail = scrapertools.find_single_match(
            block, "background-image:url\('([^']+)'")
        if scrapedthumbnail:
            try:
                thumb = scrapedthumbnail.split("-", 1)[0].replace("?", "\?")
                if data_thumb:
                    url_thumb = scrapertools.find_single_match(
                        data_thumb, "(%s[^']+)'" % thumb)
                else:
                    url_thumb = scrapedthumbnail
                scrapedthumbnail = filetools.join(
                    folder, "%s.jpg" % url_thumb.split("e=", 1)[1][-20:])
            except:
                scrapedthumbnail = ""
        if scrapedthumbnail:
            t = threading.Thread(target=download_thumb,
                                 args=[scrapedthumbnail, url_thumb])
            t.setDaemon(True)
            t.start()
        else:
            scrapedthumbnail = item.extra + "/img/file_types/gallery/movie.png"
        scrapedurl = item.extra + scrapedurl
        title = "%s (%s)" % (scrapedtitle, size)
        if "adult_info" in block:
            title += " [COLOR %s][+18][/COLOR]" % color4
        plot = scrapertools.find_single_match(block,
                                              '<div class="desc">(.*?)</div>')
        if plot:
            plot = scrapertools.decodeHtmlentities(plot)
        new_item = Item(channel=item.channel,
                        action="findvideos",
                        title=title,
                        url=scrapedurl,
                        thumbnail=scrapedthumbnail,
                        contentTitle=scrapedtitle,
                        text_color=color2,
                        extra=item.extra,
                        infoLabels={'plot': plot},
                        post=item.post)
        if item.post:
            try:
                new_item.folderurl, new_item.foldername = scrapertools.find_single_match(
                    block, '<p class="folder"><a href="([^"]+)".*?>([^<]+)<')
            except:
                pass
        else:
            new_item.folderurl = item.url.rsplit("/", 1)[0]
            new_item.foldername = item.foldername
            new_item.fanart = item.thumbnail
        itemlist.append(new_item)
    next_page = scrapertools.find_single_match(
        data, 'class="pageSplitter.*?" data-nextpage-number="([^"]+)"')
    if next_page:
        if item.post:
            post = re.sub(r'pageNumber=(\d+)', "pageNumber=" + next_page,
                          item.post)
            url = item.url
        else:
            url = re.sub(r',\d+\?ref=pager', ",%s?ref=pager" % next_page,
                         item.url)
            post = ""
        itemlist.append(
            Item(channel=item.channel,
                 action="listado",
                 title=">> Página Siguiente (%s)" % next_page,
                 url=url,
                 post=post,
                 extra=item.extra))
    return itemlist
コード例 #55
0
def cleantitle(scrapedtitle):
    scrapedtitle = scrapertools.decodeHtmlentities(
        scrapedtitle.strip()).replace('"', "'")
    return scrapedtitle.strip()
コード例 #56
0
def lista(item):
    logger.info()

    if item.extra == 'series':
        accion = 'episodiosxtemp'
    elif 'series-' in item.extra:
        accion = 'temporadas'
    else:
        accion = 'findvideos'

    itemlist = []
    data = httptools.downloadpage(item.url).data

    if 'series' in item.extra or item.extra == 'documental':
        patron = '<h2 itemprop="name" >([^<]+)<\/h2><a href="([^.]+)" title=".*?" ><img.*?src="([^"]+)".*?class=".*?boren2"\/([^<]+)'
    else:
        patron = '<h2 itemprop="name" >([^<]+)<\/h2><a href="([^.]+)" title=".*?" ><img.*?src="([^"]+)".*?class=".*?boren2".*?>([^<]+)'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedtitle, scrapedurl, scrapedthumbnail, scrapedcalidad in matches:
        url = scrapertools.decodeHtmlentities(host + scrapedurl)
        url = url.strip(' ')

        scrapedcalidad = scrapedcalidad.strip(' ')
        scrapedcalidad = scrapedcalidad.strip('p')
        scrapedcalidad = scrapedcalidad.lower()
        if 'series' in item.extra or item.extra == 'documental':
            title = scrapertools.decodeHtmlentities(scrapedtitle)
        else:
            calidad = tcalidad[scrapedcalidad]
            title = scrapertools.decodeHtmlentities(
                scrapedtitle) + ' (' + calidad + ') '

        thumbnail = scrapedthumbnail
        fanart = ''
        plot = ''

        itemlist.append(
            Item(channel=item.channel,
                 action=accion,
                 title=title,
                 url=url,
                 thumbnail=thumbnail,
                 plot=plot,
                 fanart=fanart,
                 contentSerieName=scrapedtitle,
                 contentTitle=scrapedtitle,
                 extra=item.extra))

        # Paginacion
    if itemlist != []:
        actual_page_url = item.url
        next_page = scrapertools.find_single_match(
            data, '<a href="\?page=([^"]+)" class="next">next &')
        while item.url[-1] != '=':
            item.url = item.url[:-1]
        next_page_url = item.url + next_page
        if next_page != '':
            itemlist.append(
                Item(
                    channel=item.channel,
                    action="lista",
                    title='Siguiente >>>',
                    url=next_page_url,
                    thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png',
                    extra=item.extra))
    return itemlist
コード例 #57
0
def todas(item):
    latino = 'limegreen'
    #    español = 'yellow'
    #    sub = 'white'
    logger.info("pelisalacarta.channels.locopelis todas")
    itemlist = []
    data = scrapertools.cache_page(item.url)

    patron = '<h2 class="titpeli bold ico_b">.*?<\/h2>.*?'
    patron += '<a href="([^"]+)" title="([^"]+)">.*?'
    patron += '<img src="([^"]+)" alt=.*?><\/a>.*?'
    patron += '<p>([^<]+)<\/p>.*?'
    patron += '<div class=.*?>Idioma<\/strong>:.<img src=.*?>([^<]+)<\/div>'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedplot, scrapedidioma in matches:

        idioma = scrapedidioma.strip()
        idioma = scrapertools.decodeHtmlentities(idioma)
        url = urlparse.urljoin(item.url, scrapedurl)
        title = scrapedtitle.decode('cp1252')
        title = title.encode('utf-8') + ' (' + idioma + ')'

        thumbnail = scrapedthumbnail
        plot = scrapedplot
        fanart = 'https://s31.postimg.org/5worjw2nv/locopelis.png'

        if (DEBUG):
            logger.info("title=[" + title + "], url=[" + url +
                        "], thumbnail=[" + thumbnail + "])")
        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 title=title,
                 url=url,
                 thumbnail=thumbnail,
                 plot=plot,
                 fanart=fanart,
                 extra=idioma))


#Paginacion
    siguiente = ''
    title = ''
    actual = scrapertools.find_single_match(
        data, '<li><a href=".*?"><span><b>([^<]+)<\/b><\/span><\/a><\/li>')
    ultima = scrapertools.find_single_match(
        data, '<li><a href=".*?page=([^"]+)">Ultima<\/a><\/li>')
    if 'page' in item.title:
        while not item.url.endswith('='):
            item.url = item.url[:-1]
    if actual:
        siguiente = int(actual) + 1
        if item.url.endswith('='):
            siguiente_url = item.url + str(siguiente)
        else:
            siguiente_url = item.url + '?&page=' + str(siguiente)
    if actual and ultima and siguiente <= int(ultima):
        #import inspect
        titlen = 'Pagina Siguiente >>> ' + str(actual) + '/' + str(ultima)
        fanart = 'https://s31.postimg.org/5worjw2nv/locopelis.png'
        itemlist.append(
            Item(channel=item.channel,
                 action="todas",
                 title=titlen,
                 url=siguiente_url,
                 fanart=fanart))
    return itemlist
コード例 #58
0
ファイル: cine24h.py プロジェクト: Reunion90/raiz
def findvideos(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|amp;|#038;|\(.*?\)|\s{2}|&nbsp;", "", data)
    data = scrapertools.decodeHtmlentities(data)
    patron = 'data-tplayernv="Opt(.*?)"><span>(.*?)</span>(.*?)</li>'  # option, server, lang - quality
    matches = re.compile(patron, re.DOTALL).findall(data)

    for option, servername, quote in matches:
        patron = '<span>(.*?) -([^<]+)</span'
        match = re.compile(patron, re.DOTALL).findall(quote)
        lang, quality = match[0]
        quality = quality.strip()
        headers = {'Referer': item.url}
        url_1 = scrapertools.find_single_match(
            data, 'id="Opt%s"><iframe width="560" height="315" src="([^"]+)"' %
            option)
        new_data = httptools.downloadpage(url_1, headers=headers).data
        new_data = re.sub(r"\n|\r|\t|amp;|\(.*?\)|\s{2}|&nbsp;", "", new_data)
        new_data = scrapertools.decodeHtmlentities(new_data)
        url2 = scrapertools.find_single_match(
            new_data, '<iframe width="560" height="315" src="([^"]+)"')
        url = url2 + '|%s' % url_1
        if 'rapidvideo' in url2:
            url = url2

        lang = lang.lower().strip()
        languages = {
            'latino': '[COLOR cornflowerblue](LAT)[/COLOR]',
            'español': '[COLOR green](CAST)[/COLOR]',
            'subespañol': '[COLOR red](VOS)[/COLOR]',
            'sub': '[COLOR red](VOS)[/COLOR]'
        }
        if lang in languages:
            lang = languages[lang]

        servername = servertools.get_server_from_url(url)

        title = "Ver en: [COLOR yellowgreen](%s)[/COLOR] [COLOR yellow](%s)[/COLOR] %s" % (
            servername.title(), quality, lang)

        itemlist.append(
            item.clone(action='play',
                       url=url,
                       title=title,
                       language=lang,
                       quality=quality,
                       text_color=color3))

    itemlist = servertools.get_servers_itemlist(itemlist)

    itemlist.sort(key=lambda it: it.language, reverse=False)

    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language)
    # Requerido para AutoPlay
    autoplay.start(itemlist, item)

    if config.get_videolibrary_support(
    ) and len(itemlist) > 0 and item.extra != 'episodios':
        itemlist.append(
            Item(channel=__channel__,
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 title=
                 '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                 thumbnail=thumbnail_host,
                 contentTitle=item.contentTitle))

    return itemlist
コード例 #59
0
def episodios(item):
    def load_episodios():
        for data in match.split('<br/>'):
            ## Extrae las entradas
            end = data.find('<a ')
            if end > 0:
                scrapedtitle = scrapertools.find_single_match(
                    data[:end], '\d+[^\d]+\d+')
                itemlist.append(
                    Item(channel=__channel__,
                         action="findvid_serie",
                         title=scrapedtitle + " (" + lang_title + ")",
                         url=item.url,
                         thumbnail=item.thumbnail,
                         extra=data,
                         fulltitle=item.title,
                         show=item.title))

    logger.info("[eurostreaming.py] episodios")

    itemlist = []

    ## Descarga la página
    data = scrapertools.cache_page(item.url)

    patron = r"onclick=\"top.location=atob\('([^']+)'\)\""
    b64_link = scrapertools.find_single_match(data, patron)
    if b64_link != '':
        import base64
        data = scrapertools.cache_page(base64.b64decode(b64_link))

    patron = r'<a href="(%s/\?p=\d+)">' % host
    link = scrapertools.find_single_match(data, patron)
    if link != '':
        data = scrapertools.cache_page(link)

    data = scrapertools.decodeHtmlentities(data)

    patron = '</span>([^<]+)</div><div class="su-spoiler-content su-clearfix" style="display:none">(.+?)</div></div>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for lang_title, match in matches:
        lang_title = 'SUB ITA' if 'SUB' in lang_title.upper() else 'ITA'
        load_episodios()

    patron = '<li><span style="[^"]+"><a onclick="[^"]+" href="[^"]+">([^<]+)</a>(?:</span>\s*<span style="[^"]+"><strong>([^<]+)</strong>)?</span>(.*?)</div>\s*</li>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for lang_title1, lang_title2, match in matches:
        lang_title = 'SUB ITA' if 'SUB' in (lang_title1 +
                                            lang_title2).upper() else 'ITA'
        load_episodios()

    if config.get_library_support() and len(itemlist) != 0:
        itemlist.append(
            Item(channel=__channel__,
                 title=item.title,
                 url=item.url,
                 action="add_serie_to_library",
                 extra="episodios",
                 show=item.show))
        itemlist.append(
            Item(channel=item.channel,
                 title="Scarica tutti gli episodi della serie",
                 url=item.url,
                 action="download_all_episodes",
                 extra="episodios",
                 show=item.show))

    return itemlist
コード例 #60
0
def novedades(item):
    logger.info()
    itemlist = []
    item.text_color = color2

    data = httptools.downloadpage(item.url).data
    data = data.replace("\n", "").replace("\t", "")

    bloque = scrapertools.find_single_match(
        data, '<section class="list-galeria">(.*?)</section>')
    bloque = scrapertools.find_multiple_matches(bloque,
                                                '<li><a href=(.*?)</a></li>')
    for match in bloque:
        patron = '([^"]+)".*?<img class="fade" src="([^"]+)".*?title="(?:ver |)([^"]+)"'
        matches = scrapertools.find_multiple_matches(match, patron)
        for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
            titleinfo = scrapertools.decodeHtmlentities(scrapedtitle)
            try:
                titleinfo = re.split("Temporada",
                                     titleinfo,
                                     flags=re.IGNORECASE)[0]
            except:
                try:
                    titleinfo = re.split("Capitulo",
                                         titleinfo,
                                         flags=re.IGNORECASE)[0]
                except:
                    pass
            scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle) + " "
            if item.extra != "newest":
                contentTitle = titleinfo
            else:
                contentTitle = re.sub(
                    r'(?i)(temporada |episodios |capítulo |capitulo )', '',
                    scrapedtitle)

            if "ES.png" in match:
                scrapedtitle += "[CAST]"
            if "SUB.png" in match:
                scrapedtitle += "[VOSE]"
            if "LA.png" in match:
                scrapedtitle += "[LAT]"
            if "EN.png" in match:
                scrapedtitle += "[V.O]"
            itemlist.append(
                item.clone(action="findvideos",
                           title=scrapedtitle,
                           url=scrapedurl,
                           thumbnail=scrapedthumbnail,
                           fulltitle=titleinfo,
                           show=titleinfo,
                           contentTitle=contentTitle,
                           context=["buscar_trailer"],
                           contentType="tvshow"))

    if item.extra != "newest":
        try:
            from core import tmdb
            tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
        except:
            pass

    #Paginación
    next_page = scrapertools.find_single_match(
        data, '<a class="nextpostslink".*?href="([^"]+)">')
    if next_page != "":
        itemlist.append(
            item.clone(title=">> Siguiente", url=next_page, text_color=color3))

    return itemlist