Exemple #1
0
def get_video_url(page_url, user="", password="", video_password=""):
    logger.debug()
    video_urls = []
    urls = []
    streams = []
    logger.debug('page_url: %s' % page_url)

    if 'googleusercontent' in page_url:

        url = page_url
        headers_string = httptools.get_url_headers(page_url, forced=True)

        quality = scrapertools.find_single_match(url, '.itag=(\d+).')
        if not quality:
            quality = '59'
        streams.append((quality, url))

    else:

        data = page.data
        bloque = scrapertools.find_single_match(
            data, 'url_encoded_fmt_stream_map(.*)')

        if bloque:
            data = bloque

        data = data.decode('unicode-escape', errors='replace')
        data = urllib.unquote_plus(urllib.unquote_plus(data))

        headers_string = httptools.get_url_headers(page_url, forced=True)
        streams = scrapertools.find_multiple_matches(
            data,
            'itag=(\d+)&url=(.*?)(?:;.*?quality=.*?(?:,|&)|&quality=.*?(?:,|&))'
        )

    itags = {
        '18': '360p',
        '22': '720p',
        '34': '360p',
        '35': '480p',
        '37': '1080p',
        '43': '360p',
        '59': '480p'
    }
    for itag, video_url in streams:
        if not video_url in urls:
            video_url += headers_string
            video_urls.append([itags.get(itag, ''), video_url])
            urls.append(video_url)
        video_urls.sort(
            key=lambda video_urls: int(video_urls[0].replace("p", "")))

    return video_urls
Exemple #2
0
def peliculas_topnew(item):
    logger.info("streamondemand-pureita altadefinizione01_wiki peliculas_new")

    itemlist = []

    # Descarga la pagina
    data = httptools.downloadpage(item.url).data
    bloque = scrapertools.get_match(
        data, 'I migliori film.*?</div>(.*?)</div>\s*</div>')

    patron = '<div class="slide img-box">\s*<img data-src="([^"]+)" alt[^>]+>\s*'
    patron += '<div class="rel-title">([^<]+)</div>\s*'
    patron += '<a class="carou-inner" href="([^"]+)"><i class[^>]+></i></a>'
    matches = re.compile(patron, re.DOTALL).findall(bloque)

    for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)

        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="findvideos",
                         title=scrapedtitle,
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         fulltitle=scrapedtitle,
                         show=scrapedtitle),
                    tipo="movie"))

    return itemlist
def top_10(item):
    logger.info("[pureita filmstream_biz] top_10")

    itemlist = []

    # Descarga la pagina
    data = httptools.downloadpage(item.url, headers=headers).data

    patron = '<a href="([^"]+)"\s*class="tptn_link"><img src="([^"]+)" alt="[^>]+" title="([^<]+)" width[^>]+class=[^>]+>'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        # ------------------------------------------------
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
        # ------------------------------------------------
        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="findvideos",
                         title=scrapedtitle,
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         fulltitle=scrapedtitle,
                         show=scrapedtitle),
                    tipo='movie'))

    return itemlist
def peliculas_top(item):
    logger.info("streamondemand-pureita.altadefinizioneone peliculas_top")

    itemlist = []

    # Descarga la pagina
    data = httptools.downloadpage(item.url, headers=headers).data

    patron = '<a class="carou img-box" href="([^"]+)">\s*'
    patron += '<img data-src="([^"]+)" alt[^>]+>\s*<div class="rel-title">([^<]+)</div>'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)

        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="findvideos",
                         title=scrapedtitle,
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         fulltitle=scrapedtitle,
                         show=scrapedtitle),
                    tipo="movie"))

    return itemlist
Exemple #5
0
def peliculas_search(item):
    logger.info("streamondemand-pureita altadefinizione01_wiki peliculas_new")

    itemlist = []

    # Descarga la pagina
    data = httptools.downloadpage(item.url, headers=headers).data

    patron = '<a class="sres-wrap clearfix" href="([^"]+)">\s*'
    patron += '<div class="sres-img"><img src="([^"]+)" alt="([^<]+)" />'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)

        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="findvideos",
                         title=scrapedtitle,
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         fulltitle=scrapedtitle,
                         show=scrapedtitle),
                    tipo="movie"))

    return itemlist
Exemple #6
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.debug("page_url=" + page_url)

    video_urls = []

    headers = [["User-Agent", "Mozilla/5.0 (Windows NT 6.1; rv:54.0) Gecko/20100101 Firefox/54.0"]]

    global data
    
    data_pack = scrapertools.find_single_match(data, r"(eval.function.p,a,c,k,e,.*?)\s*</script>")
    if data_pack:
        from lib import jsunpack
        data = jsunpack.unpack(data_pack)
    logger.debug("page_url=" + data)

    # URL
    url = scrapertools.find_single_match(data, r'"src"value="([^"]+)"')
    if not url:
        url = scrapertools.find_single_match(data, r'file\s*:\s*"([^"]+)"')
    logger.debug("URL=" + str(url))

    # URL del vídeo
    video_urls.append([".mp4" + " [backin]", url])

    for video_url in video_urls:
        logger.debug("%s - %s" % (video_url[0],  httptools.get_url_headers(video_url[1])))

    return video_urls
Exemple #7
0
def peliculas_search(item):
    logger.info("[thegroove360.ilgeniodellostreaming] peliculas_search")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url)

    # Extrae las entradas (carpetas)
    patron = '<a href="([^"]+)"><img src="([^"]+)"\s*alt="([^"]+)".*?>'
    patron += '<span class[^>]+>([^<]+)<\/span>.*?'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle, genre, in matches:
        scrapedplot = ""
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        itemlist.append(
            infoSod(Item(
                channel=__channel__,
                action="findvideos" if not "TV" in genre else "episodios",
                fulltitle=scrapedtitle,
                show=scrapedtitle,
                title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                url=scrapedurl,
                thumbnail=scrapedthumbnail,
                plot=scrapedplot,
                folder=True),
                    tipo='movie' if not "TV" in genre else "tv"))

    return itemlist
Exemple #8
0
def peliculas_search(item):
    logger.info("[streaminghd] peliculas_update")

    itemlist = []

    # Descarga la pagina
    data = httptools.downloadpage(item.url).data
    #bloque = scrapertools.get_match(data, '<h1>Risultati.*?</h1>(.*?)<div class="sidebar scrolling">')

    patron = '<a href="([^"]+)">\s*<img src="([^"]+)"\s*alt="([^<]+)"\s*\/>'
    patron += '.*?"meta">[^>]+>\s*([^<]+)<\/span>.*?<div class="contenido"><p>(.*?)<\/p>'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle, date, scrapedplot in matches:
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
        scrapedtitle = scrapedtitle.replace("’", "'").replace(
            " &amp; ", " ").replace("&#8217;", "")
        itemlist.append(
            Item(channel=__channel__,
                 action="findvideos" if item.extra == "movie" else "episodios",
                 contentType="movie" if item.extra == "movie" else "tv",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title='[COLOR azure]' + scrapedtitle + ' ([COLOR yellow]' +
                 date + "[/COLOR])",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 folder=True))

    return itemlist
def fichas(item):
    logger.info("[seriehd.py] fichas")
    itemlist = []

    data = scrapertools.anti_cloudflare(item.url, headers)

    patron = '<h2>(.*?)</h2>\s*'
    patron += '<img src="([^"]+)" alt="[^"]*" />\s*'
    patron += '<A HREF="([^"]+)">'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedtitle, scrapedthumbnail, scrapedurl in matches:
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="episodios",
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 fulltitle=scrapedtitle,
                 url=scrapedurl,
                 show=scrapedtitle,
                 thumbnail=scrapedthumbnail), tipo='tv'))

    patron = "<span class='current'>\d+</span><a rel='nofollow' class='page larger' href='([^']+)'>\d+</a>"
    next_page = scrapertools.find_single_match(data, patron)
    if next_page != "":
        itemlist.append(
            Item(channel=__channel__,
                 action="fichas",
                 title="[COLOR orange]Successivo>>[/COLOR]",
                 url=next_page))

    return itemlist
Exemple #10
0
def serietv_new(item):
    logger.info("[streamondemand-pureita.eurostreaming_video] serietv_new")
    itemlist = []

    # Descarga la pagina
    data = httptools.downloadpage(item.url, headers=headers).data
    bloque = scrapertools.get_match(data, 'Ultimi aggiornamenti Serie TV([^+]+)<a\s*href="[^"]+"> Tutte le Serie TV >> </a>')

    # Extrae las entradas (carpetas)
    patron = '<a\s*[^>]+[^>][^>]href="([^"]+)">\s*[^>]+ alt="([^>]+)"\s*src="([^>]+)" />'
    matches = re.compile(patron, re.DOTALL).findall(bloque)

    for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
        scrapedplot = ""
        scrapedtitle = scrapedtitle.replace("locandina", "")
        scrapedtitle = scrapedtitle.replace("serie tv", "")
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="cat_ep",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 extra=item.extra,
                 folder=True), tipo='tv'))

    return itemlist
Exemple #11
0
def peliculas_search(item):
    logger.info("[streamondemand-pureita.eurostreaming_video] peliculas_search")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url)

    # Extrae las entradas (carpetas)
    patron = '<a\s*href="([^>]+)"> <img\s*class="img-responsive "\s*'
    patron += 'title="([^>]+)"\s*alt="[^>]+" src="([^"]+)" />'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
        scrapedplot = ""
        scrapedtitle = scrapedtitle.replace("locandina", "")
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="cat_ep" if "serie tv" in scrapedtitle else "findvideos",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 extra=item.extra,
                 folder=True), tipo='tv' if "serie tv" in scrapedtitle else "movie"))

    return itemlist
Exemple #12
0
def peliculas(item):
    logger.info("[thegroove360.cb01io] peliculas")
    itemlist = []

    # Descarga la página
    data = httptools.downloadpage(item.url, headers=headers).data

    # Extrae las entradas (carpetas)
    patronvideos = '<div class="span4">\s*<a href="[^"]+">\s*(?:<p><img src="([^"]+)[^>]+></p>|).*?'
    patronvideos += '<div class="span8">\s*<a href="([^"]+)">\s*<h1>([^<]+)<\/h1>'
    patronvideos += '.*?</strong>\s*<br>\s*(?:<p[^>]+>|)(.*?)<'
    matches = re.compile(patronvideos, re.DOTALL).finditer(data)

    for match in matches:
        scrapedplot = scrapertools.unescape(match.group(4))
        scrapedtitle = scrapertools.unescape(match.group(3))
        scrapedurl = urlparse.urljoin(item.url, match.group(2))
        scrapedthumbnail = urlparse.urljoin(item.url, match.group(1))
        scrapedthumbnail = scrapedthumbnail.replace(" ", "%20")
        # scrapedplot = "" #scrapertools.unescape("[COLOR orange]" + match.group(4) + "[/COLOR]\n" + match.group(5).strip())
        # scrapedplot = "" #scrapertools.htmlclean(scrapedplot).strip()
        scrapedtitle = scrapedtitle.replace("&#8211;", "-").replace(
            "&#215;", "x").replace("[Sub-ITA]", "(Sub Ita)")
        scrapedtitle = scrapedtitle.replace("/", " - ").replace(
            "&#8217;", "'").replace("&#8230;", "...").replace("ò", "o")
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
        if "serie" in match.group(2):
            type = "tv"
        else:
            type = "movie"
        itemlist.append(
            infoSod(Item(
                channel=__channel__,
                action="findvid" if not "tv" in type else "season_serietv",
                contentType="movie",
                fulltitle=scrapedtitle,
                show=scrapedtitle,
                title=scrapedtitle,
                url=scrapedurl,
                thumbnail=scrapedthumbnail,
                plot=scrapedplot,
                extra=item.extra,
                folder=True),
                    tipo='movie' if not "tv" in type else "tv"))
    # Paginación
    next_page = scrapertools.find_single_match(
        data, '<a class="nextpostslink" rel="next" href="([^"]+)">&raquo;</a>')
    if next_page != "":
        itemlist.append(
            Item(
                channel=__channel__,
                action="peliculas",
                title="[COLOR orange]Successivo >>[/COLOR]",
                url=next_page,
                thumbnail=
                "https://raw.githubusercontent.com/stesev1/channels/master/images/channels_icon/next_1.png"
            ))

    return itemlist
Exemple #13
0
def list_titles(item):
    logger.info("[cb01anime.py] mainlist")
    itemlist = []

    # Carica la pagina
    data = httptools.downloadpage(item.url).data

    # Estrae i contenuti
    patronvideos = r'<div class="span4">\s*<a href="([^"]+)">'
    patronvideos += r'<img src="([^"]+)"[^>]+><\/a>[^>]+>[^>]+>'
    patronvideos += r'[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(.*?)<\/a>'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        scrapedtitle = scrapertools.htmlclean(scrapedtitle).strip()
        if not scrapedtitle in blacklist:
            if 'lista richieste' in scrapedtitle.lower(): continue

            patron = r'(?:\[[Ff][Uu][Ll]{2}\s*[Ii][Tt][Aa]\]|\[[Ss][Uu][Bb]\s*[Ii][Tt][Aa]\])'
            cleantitle = re.sub(patron, '', scrapedtitle).strip()

            ## ------------------------------------------------
            scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
            ## ------------------------------------------------

            # Añade al listado de XBMC
            itemlist.append(
                Item(channel=item.channel,
                     action="listacompleta"
                     if "Lista Alfabetica Completa Anime/Cartoon"
                     in scrapedtitle else "episodios",
                     contentType="tvshow",
                     title=scrapedtitle,
                     fulltitle=cleantitle,
                     text_color="azure",
                     show=cleantitle,
                     url=scrapedurl,
                     thumbnail=scrapedthumbnail,
                     viewmode="movie_with_plot"))

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    # Put the next page mark
    try:
        next_page = scrapertools.find_single_match(
            data, "<link rel='next' href='([^']+)'")
        itemlist.append(
            Item(
                channel=item.channel,
                action="list_titles",
                title="[COLOR lightgreen]" +
                config.get_localized_string(30992) + "[/COLOR]",
                url=next_page,
                thumbnail=
                "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"
            ))
    except:
        pass

    return itemlist
Exemple #14
0
def peliculas_requested(item):
    logger.info("[streamondemand-pureita.eurostreaming_video] peliculas_requested")
    itemlist = []

    # Descarga la pagina
    data = httptools.downloadpage(item.url, headers=headers).data
    bloque = scrapertools.get_match(data, 'Le serie più viste di questo mese:</h3>(.*?)<div\s*class="container-fluid wrap-cont-footer">')

    # Extrae las entradas (carpetas)
    patron = '<a\s*[^>]+[^>][^>]href="([^"]+)">\s*[^>]+ alt="([^>]+)"\s*src="([^>]+)" />'
    matches = re.compile(patron, re.DOTALL).findall(bloque)

    for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
        scrapedplot = ""
        scrapedtitle = scrapedtitle.replace("locandina", "")
        scrapedtitle = scrapedtitle.replace("serie tv", "")
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="cat_ep",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 extra=item.extra,
                 folder=True), tipo='tv'))

    return itemlist
Exemple #15
0
def fichas(item):
    logger.info("[seriehd.py] fichas")
    itemlist = []

    data = httptools.downloadpage(item.url, headers=headers).data

    patron = '<h2>(.*?)</h2>\s*'
    patron += '<img src="([^"]+)" alt="[^"]*" />\s*'
    patron += '<A HREF="([^"]+)">'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedtitle, scrapedthumbnail, scrapedurl in matches:
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="episodios",
                         title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                         fulltitle=scrapedtitle,
                         url=scrapedurl,
                         show=scrapedtitle,
                         thumbnail=scrapedthumbnail),
                    tipo='tv'))

    patron = "<span class='current'>\d+</span><a rel='nofollow' class='page larger' href='([^']+)'>\d+</a>"
    next_page = scrapertools.find_single_match(data, patron)
    if next_page != "":
        itemlist.append(
            Item(channel=__channel__,
                 action="fichas",
                 title="[COLOR orange]Successivo>>[/COLOR]",
                 url=next_page))

    return itemlist
Exemple #16
0
def peliculas_new(item):
    logger.info("[streamondemand-pureita.eurostreaming_video] peliculas_new")
    itemlist = []

    # Descarga la pagina
    data = httptools.downloadpage(item.url, headers=headers).data
	
    # Narrow search by selecting only the combo
    bloque = scrapertools.get_match(data, '<h2>Ultimi Film inseriti</h2>([^+]+)<div\s*class="w-sidebar-container">')

    # Extrae las entradas (carpetas)
    patron = '<a\s*[^>]+ href="([^"]+)">\s*<img\s*class="img-responsive " title="([^"]+)" alt=".*?" src="([^<]+)" />'
    patron += '<div\s*class="boxinfolocand"><h2>[^<]+</h2>'

    matches = re.compile(patron, re.DOTALL).findall(bloque)

    for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
        scrapedplot = ""
        scrapedtitle = scrapedtitle.replace("locandina film", "")
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="findvideos",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 extra=item.extra,
                 folder=True), tipo='movie'))

    return itemlist
Exemple #17
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("(page_url='%s')" % page_url)
    video_urls = []
    subtitle = ""
    data = response.json
    #logger.error(data['subtitles'])
    sub_data = data['subtitles'].get('data', '')

    try:

        sub_es = sub_data.get('es') or sub_data.get('en')
        subtitle = sub_es.get('urls', [])[0]
    except:
        pass
    # for s in sub_data:
    #     surl = sub_data[s].get('urls', [])[0]
    #     subtitle.append(surl)
    #subtitle = scrapertools.find_single_match(data, '"subtitles":.*?"es":.*?urls":\["([^"]+)"')
    #qualities = scrapertools.find_multiple_matches(data, '"([^"]+)":(\[\{"type":".*?\}\])')
    stream_url = data['qualities']['auto'][0]['url']

    #logger.error(stream_url)
    data_m3u8 = httptools.downloadpage(stream_url).data.decode('utf-8')

    patron = 'NAME="([^"]+)",PROGRESSIVE-URI="([^"]+)"'
    matches = scrapertools.find_multiple_matches(data_m3u8, patron)

    for calidad, url in matches:
        calidad = calidad.replace("@60","")
        url = httptools.get_url_headers(url, forced=True, dom='dailymotion.com')
        video_urls.append(["%sp .mp4 [dailymotion]" % calidad, url, 0, subtitle])
    return video_urls
def peliculas_search(item):
    logger.info("[pureita filmhd] peliculas_search")

    itemlist = []

    # Descarga la pagina
    data = httptools.downloadpage(item.url, headers=headers).data

    patron = '<\/div>\s*<a href="([^"]+)">\s*<div class="movie-play">\s*'
    patron += '<i class="icon-controller-play"></i>\s*</div>\s*<img src="([^"]+)">[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)</h2>'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)

        # ------------------------------------------------
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
        # ------------------------------------------------
        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="findvideos",
                         title=scrapedtitle,
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         fulltitle=scrapedtitle,
                         show=scrapedtitle),
                    tipo='movie'))

    return itemlist
Exemple #19
0
def peliculas_search(item):
    logger.info(
        "[streamondemand-pureita ilgeniodellostreaming] peliculas_search")
    itemlist = []

    # Descarga la pagina
    data = httptools.downloadpage(item.url, headers=headers).data

    # Extrae las entradas (carpetas)
    patron = '<a href="([^"]+)"><img src="([^"]+)"\s*alt="([^"]+)".*?>'
    patron += '<span class[^>]+>([^<]+)<\/span>.*?'
    patron += '<span class="rating">(.*?)<\/span>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle, genre, rating in matches:
        scrapedplot = ""
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        rating = "  [[COLOR yellow]" + rating + "[/COLOR]]"
        rating = rating.replace("  [[COLOR yellow]" + "" + "[/COLOR]]", "")
        itemlist.append(
            infoSod(Item(
                channel=__channel__,
                action="findvideos" if not "TV" in genre else "episodios",
                fulltitle=scrapedtitle,
                show=scrapedtitle,
                title="[COLOR azure]" + scrapedtitle + "[/COLOR]" + rating,
                url=scrapedurl,
                thumbnail=scrapedthumbnail,
                plot=scrapedplot,
                folder=True),
                    tipo='movie' if not "TV" in genre else "tv"))

    return itemlist
Exemple #20
0
def peliculas_new_tv(item):
    logger.info("[streaminghd] peliculas")
    itemlist = []

    # Descarga la pagina
    data = httptools.downloadpage(item.url).data

    # Extrae las entradas (carpetas)
    patron = '<img src="([^"]+)" alt="[^>]+">.*?<div class="rating"><span class="icon-star2">'
    patron += '<\/span>\s*([^<]+)<\/div>.*?[^>]+>.*?<span class="quality">([^<]+).*?<a\s*href="([^"]+)">([^<]+)<\/a><\/h3>\s*<span>([^<]+)<\/span>'
    matches = re.compile(patron, re.DOTALL).finditer(data)

    for match in matches:
        scrapedplot = ""
        date = scrapertools.unescape(match.group(6))
        scrapedtitle = scrapertools.unescape(match.group(5))
        scrapedurl = scrapertools.unescape(match.group(4))
        quality = scrapertools.unescape(match.group(3))
        scrapedep = scrapertools.unescape(match.group(2))
        scrapedthumbnail = urlparse.urljoin(item.url, match.group(1))
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
        scrapedep = scrapedep.replace("[sub-ita]",
                                      "- Sub").replace("[ita]", "")
        scrapedep = " ([COLOR yellow]" + scrapedep.strip() + "[/COLOR])"
        if quality:
            quality = " ([COLOR yellow]" + quality.strip() + "[/COLOR])"
        if date:
            date = " ([COLOR orange][I]" + date.strip() + "[/I][/COLOR] )"

        scrapedtitle = scrapedtitle.replace("’", "'").replace(
            " &amp; ", " ").replace("&#8217;", "")
        itemlist.append(
            Item(channel=__channel__,
                 action="episodios",
                 contentType="tv",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title=scrapedtitle + scrapedep + quality + date,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 folder=True))

    # Extrae el paginador
    patronvideos = '<a href="([^"]+)"><span class="icon-chevron-right">'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(
                channel=__channel__,
                action="peliculas_tv",
                title="[COLOR orange]Successivi >>[/COLOR]",
                url=scrapedurl,
                thumbnail=
                "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png",
                folder=True))

    return itemlist
Exemple #21
0
def peliculas_top(item):
    logger.info("streamondemand-pureita cb01_wiki peliculas_top")

    itemlist = []

    # Descarga la pagina
    data = httptools.downloadpage(item.url, headers=headers).data

    patron = '<div class="skoro-img img-box pseudo-link" data-link="([^"]+)">\s*'
    patron += '<img src="([^"]+)" alt="(.*?)"\s*\/>'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)

        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="findvideos",
                         title=scrapedtitle,
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         fulltitle=scrapedtitle,
                         show=scrapedtitle),
                    tipo="movie"))

    return itemlist
Exemple #22
0
def fichas_tv(item):
    logger.info("[seriehd.py] fichas")
    itemlist = []

    data = httptools.downloadpage(item.url, headers=headers).data

    patron = '<a class="poster" href="([^"]+)" title="(.*?)">\s*'
    patron += '<img src="([^"]+)" alt=".*?" />'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedthumbnail  in matches:
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="episodios",
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 fulltitle=scrapedtitle,
                 url=scrapedurl,
                 show=scrapedtitle,
                 thumbnail=scrapedthumbnail), tipo='tv'))

    patron = '<a href="([^"]+)"\s*><span aria-hidden="true">&raquo;'
    next_page = scrapertools.find_single_match(data, patron)
    if next_page != "":
        itemlist.append(
            Item(channel=__channel__,
                 action="fichas_tv",
                 title="[COLOR orange]Successivi >>[/COLOR]",
                 url=next_page,
                 thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png"))

    return itemlist
Exemple #23
0
def findvideos(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url, canonical=canonical).data

    subs = scrapertools.find_multiple_matches(
        data, 'file: "(/webvtt[^"]+)".*?label: "([^"]+)"')
    bloque = scrapertools.find_single_match(data, 'title.*?track')
    patron = 'file:\s*"([^"]+).*?label:\s*"([^"]+)"'
    matches = scrapertools.find_multiple_matches(bloque, patron)
    for url, quality in matches:
        url = httptools.get_url_headers(host + url, forced=True)
        for url_sub, label in subs:
            url_sub = host + urllib.quote(url_sub)
            title = "Ver video en [[COLOR %s]%s[/COLOR]] Sub %s" % (
                color3, quality, label)
            itemlist.append(
                item.clone(action="play",
                           server="directo",
                           title=title,
                           url=url,
                           subtitle=url_sub,
                           extra=item.url,
                           quality=quality,
                           language=label))

    return itemlist
Exemple #24
0
def peliculas_update(item):
    logger.info("[thegroove360.cb01io] peliculas_update")
    itemlist = []
    numpage = 14

    p = 1
    if '{}' in item.url:
        item.url, p = item.url.split('{}')
        p = int(p)

    # Descarga la pagina

    data = httptools.downloadpage(item.url, headers=headers).data

    # Estrae i contenuti
    patron = '<li><a href="([^"]+)"\s*><div style="background[^(]+\((.*?)\)">\s*<div class[^>]+>([^<]+)<\/div>[^=]+=[^>]+>([^<]+)<'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for i, (scrapedurl, scrapedthumbnail, scrapedtitle,
            ep) in enumerate(matches):
        if (p - 1) * numpage > i: continue
        if i >= p * numpage: break
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).replace(
            "Privato: ", "")
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
        scrapedplot = ""
        ep = ep.replace("||", "").replace("/", " - ").strip()
        ep = "  ([COLOR orange]" + ep + "[/COLOR])"
        title = scrapedtitle.strip()  # .title()
        # title = scrapertools.decodeHtmlentities(scrapedtitle)
        itemlist.append(
            infoSod(Item(channel=__channel__,
                         extra=item.extra,
                         action="season_serietv",
                         contentType="tv",
                         title=title + ep,
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail.strip(),
                         fulltitle=title,
                         show=title,
                         plot=scrapedplot,
                         folder=True),
                    tipo='tv'))

    # Extrae el paginador
    if len(matches) >= p * numpage:
        scrapedurl = item.url + '{}' + str(p + 1)
        itemlist.append(
            Item(
                channel=__channel__,
                extra=item.extra,
                action="peliculas_update",
                title="[COLOR orange]Successivi >>[/COLOR]",
                url=scrapedurl,
                thumbnail=
                "https://raw.githubusercontent.com/stesev1/channels/master/images/channels_icon/next_1.png",
                folder=True))

    return itemlist
Exemple #25
0
def novita(item):
    logger.info("[cb01anime.py] mainlist")
    itemlist = []

    # Descarga la página
    data = scrapertools.anti_cloudflare(item.url, headers)

    # Extrae las entradas (carpetas)
    patronvideos = '<div class="span4"> <a.*?<img src="(.*?)".*?'
    patronvideos += '<div class="span8">.*?<a href="(.*?)">.*?'
    patronvideos += '<h1>(.*?)</h1></a>.*?<br />(.*?)<br>.*?'
    matches = re.compile(patronvideos, re.DOTALL).finditer(data)

    for match in matches:
        scrapedthumbnail = match.group(1)
        scrapedurl = match.group(2)
        scrapedtitle = scrapertools.unescape(match.group(3))
        scrapedplot = scrapertools.unescape(match.group(4))
        scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        if scrapedplot.startswith(""):
            scrapedplot = scrapedplot[64:]
        if DEBUG: logger.info(
            "title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")

        ## ------------------------------------------------
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
        ## ------------------------------------------------				

        # Añade al listado de XBMC
        itemlist.append(
            Item(channel=__channel__,
                 action="listacompleta" if scrapedtitle == "Lista Alfabetica Completa Anime/Cartoon" else "episodios",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 viewmode="movie_with_plot",
                 plot=scrapedplot))

    # Put the next page mark
    try:
        next_page = scrapertools.get_match(data, "<link rel='next' href='([^']+)'")
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Torna Home[/COLOR]",
                 folder=True)),
        itemlist.append(
            Item(channel=__channel__,
                 action="novita",
                 title="[COLOR orange]Successivo>>[/COLOR]",
                 url=next_page,
                 thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
    except:
        pass

    return itemlist
def peliculas_tv(item):
    logger.info("streamondemand-pureita streaminghd peliculas")
    itemlist = []

    # Descarga la pagina
    data = httptools.downloadpage(item.url, headers=headers).data

    # Extrae las entradas (carpetas)
    patron = '<img src="([^"]+)" alt="[^>]+">.*?<div class="rating"><span class="icon-star2">'
    patron += '</span>\s*([^<]+)</div>.*?[^>]+>.*?<a\s*href="([^"]+)">([^<]+)</a>'
    matches = re.compile(patron, re.DOTALL).finditer(data)

    for match in matches:
        scrapedplot = ""
        scrapedtitle = scrapertools.unescape(match.group(4))
        scrapedurl = urlparse.urljoin(item.url, match.group(3))
        votes = scrapertools.unescape(match.group(2))
        scrapedthumbnail = urlparse.urljoin(item.url, match.group(1))

        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        votes = votes.replace("[", "")
        votes = votes.replace("]", "")
        votes = " - [[COLOR yellow]" + votes + "[/COLOR]]"
        if "0" in votes:
            votes = "  [[COLOR yellow]" + "N/A" + "[/COLOR]]"
        scrapedtitle = scrapedtitle.replace("’", "'").replace(
            " &amp; ", " ").replace("&#8217;", "")
        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="episodios",
                         contentType="serie",
                         fulltitle=scrapedtitle,
                         show=scrapedtitle,
                         title=scrapedtitle + votes,
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         plot=scrapedplot,
                         folder=True),
                    tipo='tv'))

    # Extrae el paginador
    patronvideos = '<a href="([^"]+)"><span class="icon-chevron-right">'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(
                channel=__channel__,
                action="peliculas_tv",
                title="[COLOR orange]Successivi >>[/COLOR]",
                url=scrapedurl,
                thumbnail=
                "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png",
                folder=True))

    return itemlist
Exemple #27
0
def fichas(item):
    logger.info("[italiafilmvideohd.py] fichas")

    itemlist = []

    # Descarga la pagina
    data = httptools.downloadpage(item.url, headers=headers).data
    # fix - calidad

    patron = '<li class="item">.*?'
    patron += 'href="([^"]+)".*?'
    patron += 'title="([^"]+)".*?'
    patron += '<img src="([^"]+)".*?'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scraped_2, scrapedtitle, scrapedthumbnail in matches:
        scrapedurl = scraped_2

        title = scrapertools.decodeHtmlentities(scrapedtitle)
        # title += " (" + scrapedcalidad + ")

        # ------------------------------------------------
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
        # ------------------------------------------------
        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="findvideos",
                         contentType="movie",
                         title=title,
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         fulltitle=title,
                         show=scrapedtitle),
                    tipo='movie'))

    itemlist.append(
        Item(channel=__channel__,
             action="HomePage",
             title="[COLOR yellow]Torna Home[/COLOR]",
             folder=True)),

    # Paginación
    next_page = scrapertools.find_single_match(
        data, '<a href="([^"]+)"\s*><span aria-hidden="true">&raquo;')

    if next_page != "":
        itemlist.append(
            Item(
                channel=__channel__,
                action="fichas",
                title="[COLOR orange]Successivo >>[/COLOR]",
                url=next_page,
                thumbnail=
                "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"
            ))

    return itemlist
Exemple #28
0
def video(item):
    logger.info('[filmsenzalimiticc.py] video')
    itemlist = []

    # Carica la pagina 
    data = httptools.downloadpage(item.url).data.replace('\n','').replace('\t','')

    # Estrae i contenuti 
    patron = r'<div class="mediaWrap mediaWrapAlt">.*?<a href="([^"]+)".*?src="([^"]+)".*?<p>([^"]+) (\(.*?)streaming<\/p>.*?<p>\s*(\S+).*?<\/p>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedquality in matches:
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        scrapedyear = scrapertools.decodeHtmlentities(scrapedyear)
        scrapedquality = scrapertools.decodeHtmlentities(scrapedquality)

        year = scrapedyear.replace('(','').replace(')','')
        infolabels = {}
        if year:
            infolabels['year'] = year

        title = scrapedtitle + ' '+ scrapedyear +' [' + scrapedquality + ']'
        
        # Seleziona fra Serie TV e Film
        if item.contentType == 'movie':
            azione = 'findvideos'
            tipologia = 'movie'
        if item.contentType == 'tvshow':
            azione='episodios'
            tipologia = 'tv'
        
        itemlist.append(
            Item(channel=item.channel,
                 action=azione,
                 contentType=item.contentType,
                 title=title,
                 fulltitle=scrapedtitle,
                 text_color='azure',
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 infoLabels=infolabels,
                 show=scrapedtitle))

    # Next page
    next_page = scrapertools.find_single_match(data, '<a class="nextpostslink".*?href="([^"]+)">')

    if next_page != '':
        itemlist.append(
            Item(channel=item.channel,
                 action='film',
                 title='[COLOR lightgreen]' + config.get_localized_string(30992) + '[/COLOR]',
                 url=next_page,
                 contentType=item.contentType,
                 thumbnail='http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png'))

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
    return itemlist
Exemple #29
0
def peliculas_new(item):
    logger.info("[pureita altadefinizione01_video] peliculas_new")

    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url)

    # Narrow search by selecting only the combo
    bloque = scrapertools.get_match(
        data,
        '<section class="showpeliculas col-mt-8">\s*<h\d+>[^<]+</h\d+>(.*?)<aside'
    )

    patron = '<a href="([^"]+)" title="[^>]+">\s*<div class="poster">\s*<span class="rating">\s*'
    patron += '<i class="glyphicon glyphicon-star"></i><span class="rating-number">([^"]+)</span>\s*</span>\s*<div class="poster-image-container">\s*'
    patron += '<img src="([^"]+)" title="([^<]+)" />\s*</div>'

    matches = re.compile(patron, re.DOTALL).findall(bloque)

    for scrapedurl, scrapedpuntuacion, scrapedthumbnail, scrapedtitle in matches:
        scrapedtitle = scrapedtitle.replace(",", " ").replace(":",
                                                              " ").replace(
                                                                  "–", " - ")
        scrapedtitle = scrapedtitle.replace("&#8211;",
                                            " - ").replace("&#8217;", "'")
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)

        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="findvideos",
                         title="[COLOR azure]" + scrapedtitle +
                         " ([COLOR yellow]TMDb: " + scrapedpuntuacion +
                         "[/COLOR])",
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         fulltitle=scrapedtitle,
                         show=scrapedtitle),
                    tipo='movie'))

    # Paginación
    next_page = scrapertools.find_single_match(
        data,
        '<a href="([^"]+)"><i class="glyphicon glyphicon-chevron-right" aria-hidden="true"></i></a></div>'
    )
    if next_page != "":
        itemlist.append(
            Item(
                channel=__channel__,
                action="peliculas_new",
                title="[COLOR orange]Successivo >>[/COLOR]",
                url=next_page,
                thumbnail=
                "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png"
            ))

    return itemlist
Exemple #30
0
def peliculas(item):
    logger.info("streamondemand.altadefinizione01 peliculas")
    itemlist = []

    # Descarga la pagina
    # data = scrapertools.cache_page(item.url)

    data = scrapertools.anti_cloudflare(item.url, headers)

    # Extrae las entradas (carpetas)
    patron = '<a\s+href="([^"]+)"\s+title="[^"]*">\s+<img\s+width="[^"]*"\s+height="[^"]*"\s+src="([^"]+)"\s+class="[^"]*"\s+alt="([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        scrapedplot = ""
        scrapedtitle = scrapertools.decodeHtmlentities(
            scrapedtitle.replace("Streaming", ""))
        if DEBUG:
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")
        ## ------------------------------------------------
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
        ## ------------------------------------------------

        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="findvideos",
                         contentType="movie",
                         title=scrapedtitle,
                         fulltitle=scrapedtitle,
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail),
                    tipo="movie"))

    # Extrae el paginador
    patronvideos = 'class="nextpostslink" rel="next" href="([^"]+)">&raquo;'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Torna Home[/COLOR]",
                 folder=True)),
        itemlist.append(
            Item(
                channel=__channel__,
                action="peliculas",
                title="[COLOR orange]Successivo >>[/COLOR]",
                url=scrapedurl,
                thumbnail=
                "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                folder=True))

    return itemlist
def peliculas_update(item):
    logger.info("[streamondemand-pureita guarda_serie] peliculas_last")

    itemlist = []

    # Descarga la pagina
    data = httptools.downloadpage(item.url, headers=headers).data
    bloque = scrapertools.get_match(
        data, '<h2>Aggiornamenti Serie Tv</h2><(.*?)Vedi tutte</a></span>')

    patron = '<img src="([^"]+)" alt="([^"]+)"><div class="rating">[^>]+>'
    patron += '</span>\s*([^<]+)</div><div class="featu">.*?</div>'
    patron += '<a href="([^"]+)">[^>]+></div>'

    matches = re.compile(patron, re.DOTALL).findall(bloque)

    for scrapedthumbnail, scrapedtitle, rating, scrapedurl in matches:
        rating = " ([COLOR yellow]" + rating + "[/COLOR])"
        if rating == " ([COLOR yellow]" + "10" + "[/COLOR])":
            rating = ""
        scrapedtitle = scrapedtitle.replace(" Streaming HD",
                                            "").replace(" streaming", "")
        scrapedtitle = scrapedtitle.replace("-)",
                                            ")").replace("’", "'").replace(
                                                "&#8217;", "'")
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)

        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="episodios",
                         title="[COLOR azure]" + scrapedtitle + '[/COLOR]' +
                         rating,
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         fulltitle=scrapedtitle,
                         show=scrapedtitle,
                         plot="",
                         folder=True),
                    tipo='tv'))

    # Paginación
    next_page = scrapertools.find_single_match(
        data, '<a href="([^"]+)" ><span class="icon-chevron-right">')
    if next_page != "":
        itemlist.append(
            Item(
                channel=__channel__,
                action="peliculas_update",
                title="[COLOR orange]Successivi >>[/COLOR]",
                url=next_page,
                thumbnail=
                "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png"
            ))

    return itemlist
def searchfilm(item):
    logger.info("[itastreaming.py] fichas")

    itemlist = []

    # Descarga la pagina
    data = scrapertools.anti_cloudflare(item.url, headers)
    # fix - calidad
    data = re.sub(
        r'<div class="wrapperImage"[^<]+<a',
        '<div class="wrapperImage"><fix>SD</fix><a',
        data
    )
    # fix - IMDB
    data = re.sub(
        r'<h5> </div>',
        '<fix>IMDB: 0.0</fix>',
        data
    )

    patron = '<li class="s-item">.*?'
    patron += 'src="([^"]+)".*?'
    patron += 'alt="([^"]+)".*?'
    patron += 'href="([^"]+)".*?'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)

        # ------------------------------------------------
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
        # ------------------------------------------------
        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="findvideos",
                 title=scrapedtitle,
                 contentType="movie",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 fulltitle=scrapedtitle,
                 show=scrapedtitle), tipo='movie'))

    # Paginación
    next_page = scrapertools.find_single_match(data, "href='([^']+)'>Seguente &rsaquo;")
    if next_page != "":
        itemlist.append(
            Item(channel=__channel__,
                 action="searchfilm",
                 title="[COLOR orange]Successivo >>[/COLOR]",
                 url=next_page,
                 thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))

    return itemlist
def fichas(item):
    logger.info("[italiafilmvideohd.py] fichas")

    itemlist = []

    # Descarga la pagina
    data = scrapertools.anti_cloudflare(item.url, headers)
    # fix - calidad

    patron = '<li class="item">.*?'
    patron += 'href="([^"]+)".*?'
    patron += 'title="([^"]+)".*?'
    patron += '<img src="([^"]+)".*?'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scraped_2, scrapedtitle, scrapedthumbnail in matches:
        scrapedurl = scraped_2

        title = scrapertools.decodeHtmlentities(scrapedtitle)
        # title += " (" + scrapedcalidad + ")

        # ------------------------------------------------
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
        # ------------------------------------------------
        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="findvideos",
                 contentType="movie",
                 title=title,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 fulltitle=title,
                 show=scrapedtitle), tipo='movie'))

    itemlist.append(
        Item(channel=__channel__,
             action="HomePage",
             title="[COLOR yellow]Torna Home[/COLOR]",
             folder=True)),

    # Paginación
    next_page = scrapertools.find_single_match(data, '<a href="([^"]+)"\s*><span aria-hidden="true">&raquo;')

    if next_page != "":
        itemlist.append(
            Item(channel=__channel__,
                 action="fichas",
                 title="[COLOR orange]Successivo >>[/COLOR]",
                 url=next_page,
                 thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))

    return itemlist
def peliculas(item):
    logger.info("streamondemand.altadefinizione01 peliculas")
    itemlist = []

    # Descarga la pagina
    # data = scrapertools.cache_page(item.url)

    data = scrapertools.anti_cloudflare(item.url, headers)

    # Extrae las entradas (carpetas)
    patron = '<a\s+href="([^"]+)"\s+title="[^"]*">\s+<img\s+width="[^"]*"\s+height="[^"]*"\s+src="([^"]+)"\s+class="[^"]*"\s+alt="([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        scrapedplot = ""
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.replace("Streaming", ""))
        if DEBUG: logger.info(
            "title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
        ## ------------------------------------------------
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
        ## ------------------------------------------------

        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="findvideos",
                 contentType="movie",
                 title=scrapedtitle,
                 fulltitle=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail), tipo="movie"))

    # Extrae el paginador
    patronvideos = 'class="nextpostslink" rel="next" href="([^"]+)">&raquo;'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Torna Home[/COLOR]",
                 folder=True)),
        itemlist.append(
            Item(channel=__channel__,
                 action="peliculas",
                 title="[COLOR orange]Successivo >>[/COLOR]",
                 url=scrapedurl,
                 thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                 folder=True))

    return itemlist
def fichas(item):
    logger.info("[vediserie.py] fichas")
    itemlist = []

    data = scrapertools.anti_cloudflare(item.url, headers)

    patron = '<h2>[^>]+>\s*'
    patron += '<img[^=]+=[^=]+=[^=]+="([^"]+)"[^>]+>\s*'
    patron += '<A HREF=([^>]+)>[^>]+>[^>]+>[^>]+>\s*'
    patron += '[^>]+>[^>]+>(.*?)</[^>]+>[^>]+>'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        if scrapedtitle.startswith('<span class="year">'):
            scrapedtitle = scrapedtitle[19:]

        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="episodios",
                 title=scrapedtitle,
                 fulltitle=scrapedtitle,
                 url=scrapedurl.replace('"', ''),
                 show=scrapedtitle,
                 thumbnail=scrapedthumbnail), tipo='tv'))

    patron = '<span class=\'current\'>[^<]+</span><a class="page larger" href="(.*?)">'
    next_page = scrapertools.find_single_match(data, patron)
    if next_page != "":
        itemlist.append(
            Item(channel=__channel__,
                 action="fichas",
                 title="[COLOR orange]Successivo>>[/COLOR]",
                 url=next_page))

    return itemlist
Exemple #36
0
    def render_items(self, itemlist, parent_item):
        """
        Función encargada de mostrar el itemlist, se pasa como parametros el itemlist y el item del que procede
        @type itemlist: list
        @param itemlist: lista de elementos a mostrar

        @type parent_item: item
        @param parent_item: elemento padre
        """

        # Si el itemlist no es un list salimos
        if not type(itemlist) == list:
            JsonData = {}
            JsonData["action"]="HideLoading"
            JsonData["data"] = {}
            self.send_message(JsonData)
            return

        # Si no hay ningun item, mostramos un aviso
        if not len(itemlist):
            itemlist.append(Item(title="No hay elementos que mostrar"))
        
        if parent_item.channel == "channelselector" and not parent_item.action == "filterchannels":
          parent_item.viewmode = "banner"
        elif parent_item.channel == "channelselector" and  parent_item.action == "filterchannels":
          parent_item.viewmode = "channel"
        if not parent_item.viewmode:
          parent_item.viewmode = "list"
        
        #Item Atrás
        if not (parent_item.channel=="channelselector" and parent_item.action=="mainlist") and not itemlist[0].action=="go_back":
          if parent_item.viewmode in ["banner", "channel"]:
            itemlist.insert(0,Item(title="Atrás", action="go_back",thumbnail=os.path.join(config.get_runtime_path(),"resources","images","bannermenu","thumb_atras.png")))
          else:
            itemlist.insert(0,Item(title="Atrás", action="go_back",thumbnail=os.path.join(config.get_runtime_path(),"resources","images","squares","thumb_atras.png")))
               
        JsonData = {}
        JsonData["action"] = "EndItems"
        JsonData["data"] = {}
        JsonData["data"]["itemlist"] = []
        JsonData["data"]["viewmode"] = parent_item.viewmode   
        JsonData["data"]["category"] = parent_item.category.capitalize()
        JsonData["data"]["host"] = self.controller.host
        
        # Recorremos el itemlist
        for item in itemlist:
                
            if not item.thumbnail and item.action == "search": item.thumbnail = channelselector.get_thumbnail_path() + "thumb_buscar.png"
            if not item.thumbnail and item.folder == True: item.thumbnail = "http://media.tvalacarta.info/pelisalacarta/thumb_folder.png"
            if not item.thumbnail and item.folder == False: item.thumbnail = "http://media.tvalacarta.info/pelisalacarta/thumb_nofolder.png"
            if "http://media.tvalacarta.info/" in item.thumbnail and not item.thumbnail.startswith("http://media.tvalacarta.info/pelisalacarta/thumb_"):
            
              if parent_item.viewmode in ["banner", "channel"]: 
                item.thumbnail = channelselector.get_thumbnail_path("bannermenu") + os.path.basename(item.thumbnail)
              else:
                item.thumbnail = channelselector.get_thumbnail_path() + os.path.basename(item.thumbnail)
            
            #Estas imagenes no estan en bannermenu, asi que si queremos bannermenu, para que no se vean mal las quitamos    
            elif parent_item.viewmode in ["banner", "channel"] and item.thumbnail.startswith("http://media.tvalacarta.info/pelisalacarta/thumb_"):
              item.thumbnail = ""
              

            # Si el item no contiene categoria,le ponemos la del item padre
            if item.category == "":
                item.category = parent_item.category

            # Si el item no contiene fanart,le ponemos la del item padre
            if item.fanart == "":
                item.fanart = parent_item.fanart
            
            title = item.title.replace(item.title.lstrip(), ""). replace(" ", "&nbsp;") + item.title.lstrip()
            
            # Formatear titulo
            if item.text_color:
                title = '[COLOR %s]%s[/COLOR]' % (item.text_color, title)
            if item.text_blod:
                title = '[B]%s[/B]' % title
            if item.text_italic:
                title = '[I]%s[/I]' % title
            
            title = self.kodi_labels_to_html(title)
            
            #Añade headers a las imagenes si estan en un servidor con cloudflare    
            from core import httptools
            item.thumbnail = httptools.get_url_headers(item.thumbnail)
            item.fanart = httptools.get_url_headers(item.fanart)
          
            JsonItem = {}
            JsonItem["title"]=title
            JsonItem["thumbnail"]= item.thumbnail
            JsonItem["fanart"]=item.fanart
            JsonItem["plot"]=item.plot
            JsonItem["action"]=item.action
            JsonItem["url"]=item.tourl()
            JsonItem["context"]=[]
            if not item.action == "go_back":
              for Comando in self.set_context_commands(item, parent_item):
                JsonItem["context"].append({"title":Comando[0],"url": Comando[1]})
              
            JsonData["data"]["itemlist"].append(JsonItem)


        ID = self.send_message(JsonData)
        self.get_data(ID)
def render_items(itemlist, parent_item):
    """
    Función encargada de mostrar el itemlist en kodi, se pasa como parametros el itemlist y el item del que procede
    @type itemlist: list
    @param itemlist: lista de elementos a mostrar

    @type parent_item: item
    @param parent_item: elemento padre
    """
    # Si el itemlist no es un list salimos
    if not type(itemlist) == list:
        if config.get_platform() == "boxee":
          xbmcplugin.endOfDirectory(handle=int(sys.argv[1]), succeeded=True)
        return

    # Si no hay ningun item, mostramos un aviso
    if not len(itemlist):
        itemlist.append(Item(title="No hay elementos que mostrar"))

    # Recorremos el itemlist
    for item in itemlist:
        #logger.debug(item)
        # Si el item no contiene categoria, le ponemos la del item padre
        if item.category == "":
            item.category = parent_item.category

        # Si el item no contiene fanart, le ponemos el del item padre
        if item.fanart == "":
            item.fanart = parent_item.fanart


        # Formatear titulo
        if item.text_color:
            item.title = '[COLOR %s]%s[/COLOR]' % (item.text_color, item.title)
        if item.text_blod:
            item.title = '[B]%s[/B]' % item.title
        if item.text_italic:
            item.title = '[I]%s[/I]' % item.title

        #Añade headers a las imagenes si estan en un servidor con cloudflare    
        from core import httptools
        item.thumbnail = httptools.get_url_headers(item.thumbnail)
        item.fanart = httptools.get_url_headers(item.fanart)

        # IconImage para folder y video
        if item.folder:
            icon_image = "DefaultFolder.png"
        else:
            icon_image = "DefaultVideo.png"

        # Creamos el listitem
        listitem = xbmcgui.ListItem(item.title, iconImage=icon_image, thumbnailImage=item.thumbnail)

        # Ponemos el fanart
        if item.fanart:
          listitem.setProperty('fanart_image', item.fanart)
        else:
          listitem.setProperty('fanart_image', os.path.join(config.get_runtime_path(), "fanart.jpg"))                  
                             

        # TODO: ¿Se puede eliminar esta linea? yo no he visto que haga ningun efecto.
        xbmcplugin.setPluginFanart(int(sys.argv[1]), os.path.join(config.get_runtime_path(), "fanart.jpg"))

        # Añadimos los infoLabels
        set_infolabels(listitem, item)

        # Montamos el menu contextual
        context_commands = set_context_commands(item, parent_item)

        # Añadimos el item
        if config.get_platform() == "boxee":
            xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url='%s?%s' % (sys.argv[0], item.tourl()),
                                        listitem=listitem, isFolder=item.folder)
        else:
            listitem.addContextMenuItems(context_commands, replaceItems=True)
            
            if not item.totalItems: item.totalItems = 0
            xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url='%s?%s' % (sys.argv[0], item.tourl()),
                                        listitem=listitem, isFolder=item.folder,
                                        totalItems=item.totalItems)


    # Fijar los tipos de vistas...
    if config.get_setting("forceview") == "true":
        # ...forzamos segun el viewcontent
        xbmcplugin.setContent(int(sys.argv[1]), parent_item.viewcontent)
        #logger.debug(parent_item)
    elif parent_item.channel not in ["channelselector", ""]:
        # ... o segun el canal
        xbmcplugin.setContent(int(sys.argv[1]), "movies")


    # Fijamos el "breadcrumb"
    xbmcplugin.setPluginCategory(handle=int(sys.argv[1]), category=parent_item.category.capitalize())

    # No ordenar items
    xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_NONE)

    # Cerramos el directorio
    xbmcplugin.endOfDirectory(handle=int(sys.argv[1]), succeeded=True)

    # Fijar la vista
    if config.get_setting("forceview") == "true":
        viewmode_id = get_viewmode_id(parent_item)
        xbmc.executebuiltin("Container.SetViewMode(%s)" % viewmode_id)