コード例 #1
0
def getsearch(item):
    logger.info("[hdgratis.py] fichas")

    itemlist = []

    # Descarga la pagina
    data = scrapertools.anti_cloudflare(item.url, headers)
    # fix - calidad

    # ------------------------------------------------
    cookies = ""
    matches = re.compile('(.hdgratis.org.*?)\n', re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    patron = '<div class="col-xs-2">.*?'
    patron += 'href="([^"]+)".*?'
    patron += 'title="([^"]+)".*?'
    patron += '<img src="([^"]+)".*?'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scraped_2, scrapedtitle, scrapedthumbnail in matches:
        scrapedurl = scraped_2

        title = scrapertools.decodeHtmlentities(scrapedtitle)
        # title += " (" + scrapedcalidad + ")

        # ------------------------------------------------
        scrapedthumbnail += "|" + _headers
        # ------------------------------------------------

        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="findvideos",
                 title=title,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 fulltitle=title,
                 show=scrapedtitle), tipo='movie'))

    # Paginación
    next_page = re.compile('<link rel="next" href="(.+?)"/>', re.DOTALL).findall(data)
    for page in next_page:
        next_page = page
    if next_page != "":
        itemlist.append(
            Item(channel=__channel__,
                 action="fichas",
                 title="[COLOR orange]Successivo >>[/COLOR]",
                 url=next_page,
                 thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))

    return itemlist
コード例 #2
0
def episodios(item):
    logger.info("streamondemand.channels.dreamsub episodios")

    itemlist = []

    data = scrapertools.cache_page(item.url, headers=headers)
    bloque = scrapertools.get_match(data, '<div class="seasonEp">(.*?)<div class="footer">')

    patron = '<li><a href="([^"]+)"[^<]+<b>(.*?)<\/b>[^>]+>([^<]+)<\/i>(.*?)<'
    matches = re.compile(patron, re.DOTALL).findall(bloque)

    for scrapedurl, title1, title2, title3  in matches:
        scrapedurl = host + scrapedurl
        scrapedplot = ""
        scrapedthumbnail = ""
        scrapedtitle = title1 + " " + title2 + title3
        scrapedtitle = scrapedtitle.replace("Download", "")
        scrapedtitle = scrapedtitle.replace("Streaming", "")
        scrapedtitle = scrapedtitle.replace("& ", "")

        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="findvideos",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 folder=True), tipo='tv'))

    return itemlist
コード例 #3
0
def episodios(item):
    logger.info("fusionse.channels.altastreaming episodios")

    itemlist = []

    data = scrapertools.cache_page(item.url)

    patron = '<li id="serie-[^-]+-title="([^"]+)">\s*<span[^<]+<\/span>\s*<span[^<]+<\/span>\s*<a[^=]+=[^=]+=[^=]+=[^=]+=[^=]+="([^"]+)">'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedtitle, scrapedurl  in matches:
        scrapedplot = ""
        scrapedthumbnail = ""
        scrapedtitle = scrapedtitle.replace('Stai guardando: ', '')

        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="findvideos_tv",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 folder=True), tipo='tv'))

    return itemlist
コード例 #4
0
def elenco_ten(item):
    logger.info("filmhdstreaming elenco_ten")

    itemlist = []
    data = scrapertools.cache_page(item.url)
    patron = '<ul class="lista">(.*?)</ul>'

    filtro = scrapertools.find_single_match(data, patron)
    patron = '<li>.*?href="(.*?)">(.*?)</a>'
    matches = scrapertools.find_multiple_matches(filtro, patron)

    for scrapedurl, scrapedtitle in matches:
        logger.info("Url:" + scrapedurl + " title:" + scrapedtitle)
        itemlist.append(
            infoSod(
                Item(
                    channel=item.channel,
                    action="findvideos",
                    title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                    fulltitle=scrapedtitle,
                    url=scrapedurl,
                    thumbnail="",
                    fanart="",
                )
            )
        )

    return itemlist
コード例 #5
0
def fichas(item):
    logger.info("[hdgratis.py] fichas")

    itemlist = []

    # Descarga la pagina
    data = scrapertools.anti_cloudflare(item.url, headers)
    # fix - calidad

    # ------------------------------------------------
    cookies = ""
    matches = re.compile('(.altadefinizione.black.*?)\n', re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    if "/?s=" in item.url:
        patron = '<div class="col-lg-3 col-md-3 col-xs-3">\s*<a href="([^"]+")>\s*<div class="wrapperImage">[^i]+i[^s]+src="([^"]+)"[^>]+> <div class="info">\s*<h5[^>]+>(.*?)<'
    else:
        patron = '<span class="hd">HD</span>\s*<a href="([^"]+)"><img[^s]+src="([^"]+)"[^>]+></a> <div class="info">\s*<[^>]+>[^>]+>(.*?)</a>'


    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle  in matches:

        title = scrapertools.decodeHtmlentities(scrapedtitle)

        # ------------------------------------------------
        scrapedthumbnail += "|" + _headers
        # ------------------------------------------------

        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="findvideos",
                 title=title,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 fulltitle=title,
                 show=scrapedtitle), tipo='movie'))

    # Paginación
    next_page = re.compile('<link rel="next" href="(.+?)"/>', re.DOTALL).findall(data)
    for page in next_page:
        next_page = page

    if next_page != "":
        itemlist.append(
            Item(channel=__channel__,
                 action="fichas",
                 title="[COLOR orange]Successivo >>[/COLOR]",
                 url=next_page,
                 thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/vari/successivo_P.png"))

    return itemlist
コード例 #6
0
def peliculas_rss(item):
    logger.info("streamondemand.videotecadiclass peliculas_rss")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url)

    # Extrae las entradas (carpetas)
    patron = '<div class="fetch-rss-content ">\s*(.*?)<\/div>\s*<a\s*href="([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedtitle, scrapedurl in matches:
        scrapedthumbnail = ""
        scrapedplot = ""
        scrapedurl = scrapertools.get_header_from_response(scrapedurl, header_to_get="Location")
        txt = "streaming"
        if txt not in scrapedtitle: continue
        old = "blogspot"
        if old in scrapedtitle: continue
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        scrapedtitle = scrapedtitle.split("(")[0]
        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="findvideos",
                 contentType="movie",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 folder=True), tipo='movie'))

    return itemlist
コード例 #7
0
def peliculas_tv(item):
    logger.info("streamondemand.misterstreaming peliculas")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url, headers=headers)
    bloque = scrapertools.get_match(data, '<div id="text-2" class="widget widget_text">(.*?)</div></div><div class="widgetwrap">')

    # Extrae las entradas (carpetas)
    patron = '<a href="(.*?)">(.*?)</a>'
    matches = re.compile(patron, re.DOTALL).findall(bloque)

    for scrapedurl, scrapedtitle  in matches:
        scrapedplot = ""
        scrapedthumbnail = ""
        if DEBUG: logger.info(
            "title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="episodios",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 folder=True), tipo='tv'))


    return itemlist
コード例 #8
0
def pelis_tv_src(item):
    logger.info("streamondemand.streamblog peliculas")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url, headers=headers)

    # Extrae las entradas (carpetas)
    patron = '<a href="([^"]+)"><div class="form-group">[^>]+><img src="([^"]+)"[^=]+=[^=]+="(.*?)"[^>]+>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle  in matches:
        scrapedplot = ""
        scrapedthumbnail = host + scrapedthumbnail
        if DEBUG: logger.info(
            "title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="episodios",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 folder=True), tipo='tv'))

    return itemlist
コード例 #9
0
def ultimiep(item):
    log("ultimiep", "ultimiep")
    itemlist = []

    data = scrapertools.cache_page(item.url, headers=headers)

    patron = r'<img.*?src="([^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+><a href="([^"]+)">([^<]+)</a>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        if 'Streaming' in scrapedtitle:
            # Pulizia titolo
            scrapedtitle = scrapedtitle.replace("Streaming", "").replace("&", "")
            scrapedtitle = scrapedtitle.replace("Download", "")
            scrapedtitle = scrapedtitle.replace("Sub Ita", "").strip()
            cleantitle = re.sub(r'Episodio?\s*\d+\s*(?:\(\d+\)|)', '', scrapedtitle).strip()
            # Creazione URL
            episodio = scrapertools.find_single_match(scrapedtitle.lower(), r'episodio?\s*(\d+)')
            scrapedurl = re.sub(r'episodio?-?\d+-?(?:\d+-|)[oav]*', '', scrapedurl).replace('-fine', '')
            if 'download' not in scrapedurl:
                scrapedurl = scrapedurl.replace('-streaming', '-download-streaming')
            extra = "<tr>\s*<td[^>]+><strong>Episodio %s(?:[^>]+>[^>]+>|[^<]*)</strong></td>" % episodio
            print "EPISODIO: " + episodio + "\nTITLE: " + scrapedtitle + "\nExtra: " + extra + "\nURL: " + scrapedurl
            itemlist.append(infoSod(
                Item(channel=__channel__,
                    action="findvideos",
                    title=scrapedtitle,
                    url=scrapedurl,
                    fulltitle=cleantitle,
                    extra=extra,
                    show=re.sub(r'Episodio\s*', '', scrapedtitle),
                    thumbnail=scrapedthumbnail), tipo="tv"))

    return itemlist
コード例 #10
0
def peliculas(item):
    logger.info("streamondemand.altadefinizioneclub peliculas")
    itemlist = []

    patron = '<li><a href="([^"]+)" data-thumbnail="([^"]+)"><div>\s*<div class="title">(.*?)</div>'
    for scrapedurl,scrapedthumbnail,scrapedtitle  in scrapedAll(item.url,patron):
        logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
        xbmc.log(("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]"))
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        scrapedtitle = scrapedtitle.replace("[HD]","")
        itemlist.append(infoSod(
                        Item(channel=__channel__,
                              action="findvideos",
                               title=scrapedtitle,
                           fulltitle=scrapedtitle,
                                 url=scrapedurl,
                           thumbnail=scrapedthumbnail,
                            viewmode="movie"),
                                tipo="movie",))

    # Paginazione
    # ===========================================================================================================================
    matches = scrapedSingle(item.url, '<span class=\'pages\'>(.*?)class="clearfix"',"class='current'>.*?</span>.*?href=\"(.*?)\">.*?</a>")
    if len(matches) > 0:
        paginaurl = scrapertools.decodeHtmlentities(matches[0])
        itemlist.append(Item(channel=__channel__, action="peliculas", title=AvantiTxt, url=paginaurl, thumbnail=AvantiImg))
        itemlist.append(Item(channel=__channel__, action="HomePage", title=HomeTxt, thumbnail=ThumbnailHome, folder=True))
    else:
        itemlist.append(Item(channel=__channel__, action="mainlist", title=ListTxt, thumbnail=ThumbnailHome,folder=True))
    # ===========================================================================================================================
    return itemlist
コード例 #11
0
def peliculas_src(item):
    logger.info("streamondemand.cinesuggestions peliculas_src")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url, headers=headers_src)

    # Extrae las entradas (carpetas)
    patron = '<h2 class="post-title entry-title">\s*<a href="([^"]+)">(.*?)</a>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle in matches:
        scrapedthumbnail = ""
        scrapedplot = ""
        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="findvideos",
                 contentType="movie",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 folder=True), tipo='movie'))


    return itemlist
コード例 #12
0
def search(item, texto):
    logger.info("[laserietv.py] " + item.url + " search " + texto)
    itemlist = []
    url = "%s/index.php?do=search" % host
    post = "do=search&subaction=search&search_start=0&full_search=0&result_from=1&story=" + texto
    logger.debug(post)
    data = scrapertools.cachePagePost(url, post=post)

    patron = '<div class="video-item-cover"[^<]+<a href="(.*?)">[^<]+<img src="(.*?)" alt="(.*?)">'
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        scrapedthumbnail = host + scrapedthumbnail
        logger.info(scrapedurl + " " + scrapedtitle + scrapedthumbnail)
        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="episodi",
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 fulltitle=scrapedtitle,
                 show=scrapedtitle), tipo='tv'))

    return itemlist
コード例 #13
0
def fichas(item):
    logger.info("streamondemand.channels.guardaserie fichas")

    itemlist = []

    # data = scrapertools.cache_page(item.url)

    ## Descarga la página
    data = re.sub(
        r'\t|\n|\r',
        '',
        scrapertools.anti_cloudflare(item.url, headers)
    )

    data = scrapertools.find_single_match(data, '<a[^>]+>Serie Tv</a><ul>(.*?)</ul>')

    patron = '<li><a href="([^"]+)[^>]+>([^<]+)</a></li>'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle in matches:
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)

        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="episodios",
                 title=scrapedtitle,
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 url=scrapedurl), tipo='tv'))

    return itemlist
コード例 #14
0
def peliculasx(item):
    logger.info("streamondemand.filmstreampw peliculas")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url, headers=headers)

    # Extrae las entradas (carpetas)
    patron = '<div class="news2 float">.*?<div class="boxgrid2 caption2">.*?<a href="([^"]+)">.*?<img.*?src="([^"]+)"/>.*?<div class="cover2 boxcaption2">.*?<div class="boxgridtext">(.*?)</div>.*?<br>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        html = scrapertools.cache_page(scrapedurl, headers=headers)
        start = html.find("<li class=\"current\" style=\"font-size: 15px; line-height: 18px;\">")
        end = html.find("</div></li>", start)
        scrapedplot = html[start:end]
        scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
        scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        scrapedtitle = scrapedtitle.strip()
        if (DEBUG): logger.info(
            "title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(infoSod(
            Item(channel=__channel__,
                 extra=item.extra,
                 action="episodios" if item.extra == "serie" else "findvideos",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 folder=True), tipo='movie'))

    return itemlist
コード例 #15
0
def cartoni(item):
    logger.info("streamondemand.channels.guardaserie fichas")

    itemlist = []

    ## Descarga la página
    data = re.sub(
        r'\t|\n|\r',
        '',
        scrapertools.anti_cloudflare(item.url, headers)
    )

    data = scrapertools.find_single_match(data, '<a[^>]+>Cartoni</a><ul>(.*?)</ul>')

    patron = '<li><a href="([^"]+)[^>]+>([^<]+)</a></li>'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle in matches:
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)

        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="episodios",
                 title=scrapedtitle,
                 fulltitle=scrapedtitle,
                 url=scrapedurl,
                 show=scrapedtitle,
                 thumbnail="http://www.itrentenni.com/wp-content/uploads/2015/02/tv-series.jpg"), tipo='tv'))

    return itemlist
コード例 #16
0
def cerca(item):
    logger.info("streamondemand.channels.guardaserie fichas")

    itemlist = []

    ## Descarga la página
    data = re.sub(
        r'\t|\n|\r',
        '',
        scrapertools.anti_cloudflare(item.url, headers)
    )

    patron = '<div class="search_thumbnail">.*?<a class="search_link" href="([^"]+)" rel="bookmark" title="([^"]+)">.*?<img src="([^"]+)" />.*?</a>'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)

        if scrapedtitle.startswith("Guarda "):
            scrapedtitle = scrapedtitle[7:]

        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="episodios",
                 title=scrapedtitle,
                 fulltitle=scrapedtitle,
                 url=scrapedurl,
                 show=scrapedtitle,
                 thumbnail=scrapedthumbnail), tipo='tv'))

    return itemlist
コード例 #17
0
def latestep(item):
    logger.info("[SerieTVU.py]==> latestep")
    itemlist = []

    data = scrapertools.anti_cloudflare(item.url, headers=headers)

    patron = r'<div class="item">\s*<a href="([^"]+)" data-original="([^"]+)" class="lazy inner">'
    patron += r'[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<small>([^<]+)<'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedimg, scrapedtitle, scrapedinfo in matches:
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
        episodio = re.compile(r'(\d+)x(\d+)', re.DOTALL).findall(scrapedinfo)
        title = "%s %s" % (scrapedtitle, scrapedinfo)
        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="findepisodevideo",
                 title=title,
                 fulltitle=scrapedtitle,
                 url=scrapedurl,
                 extra=episodio,
                 thumbnail=scrapedimg,
                 show=title,
                 folder=True), tipo="tv"))
    return itemlist
コード例 #18
0
def peliculasx_tv(item):
    logger.info("streamondemand.filmstreampw peliculas")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url, headers=headers)

    # Extrae las entradas (carpetas)
    patron = '<div class="news2 float">.*?<div class="boxgrid2 caption2">.*?<a href="([^"]+)">.*?<img.*?src="([^"]+)"/>.*?<div class="cover2 boxcaption2">.*?<div class="boxgridtext">(.*?)</div>.*?<br>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        scrapedplot = ""
        scrapedtitle = scrapedtitle.strip()
        if (DEBUG): logger.info(
            "title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="episodios" if item.extra == "serie" else "findvideos",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 folder=True), tipo='tv'))

    return itemlist
コード例 #19
0
def search(item, texto):
    logger.info("[filmissimi.py] init texto=[" + texto + "]")
    itemlist = []
    url = "http://www.filmissimi.net/?s=" + texto

    data = scrapertools.cache_page(url, headers=headers)

    patron = 'class="s-img">[^<]+<.*?src="(.*?)"[^<]+<[^<]+<[^<]+</div>[^<]+<[^<]+<[^<]+<[^<]+</span>[^<]+</span>[^<]+<h3><a href="(.*?)">(.*?)</a></h3>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedthumbnail,scrapedurl, scrapedtitle in matches:
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        log("elenco", "title=[" + scrapedtitle + "] url=[" + scrapedurl + "] thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="findvideos",
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 fulltitle=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail), tipo="movie"))

    # Paginazione
    # ===========================================================================================================================
    matches = scrapedSingle(url, '<div class="paginacion">(.*?)</div>',"current'>.*?<\/span><.*?href='(.*?)'>.*?</a>")

    if len(matches) > 0:
        paginaurl = matches[0]
        itemlist.append(Item(channel=__channel__, action="elenco", title=AvantiTxt, url=paginaurl, thumbnail=AvantiImg))
        itemlist.append(Item(channel=__channel__, action="HomePage", title=HomeTxt, folder=True))
    else:
        itemlist.append(Item(channel=__channel__, action="mainlist", title=ListTxt, folder=True))
    # ===========================================================================================================================
    return itemlist
コード例 #20
0
def pelisrc(item):
    logger.info("streamondemand.eurostreaminginfo peliculas")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url, headers=headers)

    # Extrae las entradas (carpetas)
    patron = '<div class="post-thumb">\s*<a href="([^"]+)" title="([^"]+)">\s*<img src="([^"]+)"[^>]+>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
        scrapedplot = ""
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        if DEBUG:
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(
            infoSod(
                Item(
                    channel=__channel__,
                    action="findvideos",
                    fulltitle=scrapedtitle,
                    show=scrapedtitle,
                    title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                    url=scrapedurl,
                    thumbnail=scrapedthumbnail,
                    plot=scrapedplot,
                    folder=True,
                ),
                tipo="movie",
            )
        )

    return itemlist
コード例 #21
0
def search(item, texto):
    log("search", "init texto=[" + texto + "]")
    itemlist = []
    url = "http://www.filmissimi.net/?s=" + texto

    patron = '<img src="(.*?)"[^=]+=.*?[^=]+="Thumbnail[^>]+>[^>]+><h2><a.*?href="(.*?)"[^>]+>(.*?)</a></h2>'
    for scrapedthumbnail, scrapedurl, scrapedtitle in scrapedSingle(url, '<ul class="recent-posts">(.*?)</ul>', patron):
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        log("elenco", "title=[" + scrapedtitle + "] url=[" + scrapedurl + "] thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="findvideos",
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 fulltitle=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail), tipo="movie"))

    # Paginazione
    # ===========================================================================================================================
    matches = scrapedSingle(url, '<div class="navigation">(.*?)</div>', "current'>.*?</span>.*?class='page-numbers'.*?href='(.*?)'>.*?</a>")

    if len(matches) > 0:
        paginaurl = matches[0]
        itemlist.append(Item(channel=__channel__, action="elenco", title=AvantiTxt, url=paginaurl, thumbnail=AvantiImg))
        itemlist.append(Item(channel=__channel__, action="HomePage", title=HomeTxt, folder=True))
    else:
        itemlist.append(Item(channel=__channel__, action="mainlist", title=ListTxt, folder=True))
    # ===========================================================================================================================
    return itemlist
コード例 #22
0
def elenco(item):
    logger.info("[filmissimi.py] elenco")
    itemlist = []

    data = scrapertools.cache_page(item.url, headers=headers)

    elemento = scrapertools.find_single_match(data,'<div class="estre">(.*?)<div class="paginacion">')

    patron='<div class="item">[^<]+<a href="(.*?)"[^<]+<[^<]+<img.*?icon[^<]+<img src="(.*?)" alt="(.*?)"[^<]+<[^<]+<[^<]+<[^<]+<[^<]+<[^<]+<[^<]+<[^<]+<[^<]+<[^<]+<[^<]+<[^<]+<[^<]+</div>'
    matches = re.compile(patron, re.DOTALL).findall(elemento)

    for scrapedurl, scrapedthumbnail,scrapedtitle  in matches:
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        scrapedtitle = scrapedtitle.split("(")[0]
        logger.info("title=[" + scrapedtitle + "] url=[" + scrapedurl + "] thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="findvideos",
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 fulltitle=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail), tipo="movie"))

    # Paginazione
    # ===========================================================================================================================
    matches = scrapedSingle(item.url, '<div class="paginacion">(.*?)</div>',"current'>.*?<\/span><.*?href='(.*?)'>.*?</a>")
    if len(matches) > 0:
        paginaurl = matches[0]
        itemlist.append(
            Item(channel=__channel__, action="elenco", title=AvantiTxt, url=paginaurl, thumbnail=AvantiImg))
        itemlist.append(Item(channel=__channel__, action="HomePage", title=HomeTxt, folder=True))
    else:
        itemlist.append(Item(channel=__channel__, action="mainlist", title=ListTxt, folder=True))
    # ===========================================================================================================================
    return itemlist
コード例 #23
0
def fichas(item):
    logger.info("[seriehd.py] fichas")
    itemlist = []

    data = scrapertools.anti_cloudflare(item.url, headers)

    patron = '<h2>(.*?)</h2>\s*'
    patron += '<img src="([^"]+)" alt="[^"]*" />\s*'
    patron += '<A HREF="([^"]+)">'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedtitle, scrapedthumbnail, scrapedurl in matches:
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="episodios",
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 fulltitle=scrapedtitle,
                 url=scrapedurl,
                 show=scrapedtitle,
                 thumbnail=scrapedthumbnail), tipo='tv'))

    patron = "<span class='current'>\d+</span><a rel='nofollow' class='page larger' href='([^']+)'>\d+</a>"
    next_page = scrapertools.find_single_match(data, patron)
    if next_page != "":
        itemlist.append(
            Item(channel=__channel__,
                 action="fichas",
                 title="[COLOR orange]Successivo>>[/COLOR]",
                 url=next_page))

    return itemlist
コード例 #24
0
def elenco(item):
    log("elenco", "init")
    itemlist = []

    patron = 'class="bottom_line"></div>[^<]+<[^<]+<img.*?src="(.*?)"[^<]+</a>[^>]+<[^<]+<[^<]+<[^<]+<.*?class="movie_title"><a href="(.*?)">(.*?)</a>'
    for scrapedthumbnail, scrapedurl, scrapedtitle in scrapedSingle(item.url, 'div id="movie_post_content">(.*?)</ul>', patron):
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        log("elenco", "title=[" + scrapedtitle + "] url=[" + scrapedurl + "] thumbnail=[" + scrapedthumbnail + "]")

        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="findvideos",
                 title=scrapedtitle,
                 fulltitle=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail), tipo="movie"))

    # Paginazione
    # ===========================================================================================================================
    matches = scrapedSingle(item.url, 'class="vh-pages-wrapper span12 body-bg">(.*?)</div>', 'class="current">.*?</span><.*?href="(.*?)"')
    if len(matches) > 0:
        paginaurl = matches[0]
        itemlist.append(Item(channel=__channel__, action="elenco", title=AvantiTxt, url=paginaurl, thumbnail=AvantiImg))
        itemlist.append(Item(channel=__channel__, action="HomePage", title=HomeTxt, folder=True))
    else:
        itemlist.append(Item(channel=__channel__, action="mainlist", title=ListTxt, folder=True))
    # ===========================================================================================================================
    return itemlist
コード例 #25
0
def seasons(item):
    logger.info("streamondemand.channels.cinemasubito episodios")

    itemlist = []

    data = scrapertools.cache_page(item.url, headers=headers)

    patron = '<h3 dir="ltr"><a style=[^h]+href="([^"]+)" class=[^=]+="([^"]+)">(.*?)</a></h3>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedseason  in matches:
        scrapedplot = ""
        scrapedthumbnail = ""
        scrapedtitle = scrapedtitle + scrapedseason

        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="episodios",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 folder=True), tipo='tv'))

    return itemlist
コード例 #26
0
def episodios(item):
    logger.info("streamondemand.channels.cinemasubito episodios")

    itemlist = []

    data = scrapertools.cache_page(item.url, headers=headers)
    bloque = scrapertools.get_match(data, 'Lista Episodi(.*?)</ul>')

    patron = '<li>\s*<a href="(.*?)">\s*(.*?)\s*<\/a>\s*<\/li>'
    matches = re.compile(patron, re.DOTALL).findall(bloque)

    for scrapedurl, scrapedtitle  in matches:
        scrapedplot = ""
        scrapedthumbnail = ""

        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="findvideos",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 folder=True), tipo='tv'))

    return itemlist
コード例 #27
0
def pelis_movie_src(item):
    logger.info("streamondemand.mondolunatico_new peliculas")

    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url, headers=headers)

    # Extrae las entradas (carpetas)
    patron = '<div class="thumbnail animation-2">\s*<a href="([^"]+)">\s*<img src="([^"]+)" alt="(.*?)" />'
    matches = re.compile(patron, re.DOTALL).findall(data)

    scrapedplot = ""
    for scrapedurl, scrapedthumbnail, scrapedtitle, in matches:
        title = scrapertools.decodeHtmlentities(scrapedtitle)
        itemlist.append(infoSod(
            Item(channel=__channel__,
                 extra=item.extra,
                 action="findvideos",
                 contentType="movie",
                 title=title,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 fulltitle=title,
                 show=title,
                 plot=scrapedplot,
                 folder=True), tipo='movie'))

    return itemlist
コード例 #28
0
def elenco_film(item):
    logger.info("megafiletube elenco_film")
    itemlist=[]

    patron = "<a onmouseover=\".*?img src=.'(.*?)'.*?href=\"(.*?)\".*?target='.*?'.*?><b>(.*?)</b></a>"
    for scrapedimg, scrapedurl, scrapedtitolo in scrapedAll(item.url, patron):
        scrapedimg = scrapedimg.replace('\\', '')
        base = scrapedtitolo.replace(".", "")
        base = base.replace("(", "")
        titolo = base.split("20")[0]
        itemlist.append(infoSod(Item(channel=__channel__,
                             action="torrent",
                             title="[COLOR darkkhaki].torrent [/COLOR]""[COLOR azure]" + scrapedtitolo + "[/COLOR]",
                             fulltitle=scrapedtitolo,
                             url=host + "/" + scrapedurl,
                             thumbnail=scrapedimg,
                             fanart=scrapedimg),
                        tipo="movie"))

    # Paginazione
    # ===========================================================
    pagina = scrapedAll(item.url, '<td class="highlight">.*?class="pager"><a.*?href="(.*?)"')
    if len(pagina) > 0:
        pagina=scrapertools.decodeHtmlentities(pagina[0])
        itemlist.append(Item(channel=__channel__, action="elenco_film", title=AvantiTxt, url=pagina,thumbnail=AvantiImg, folder=True))
        itemlist.append(Item(channel=__channel__, action="HomePage", title=HomeTxt,thumbnail=ThumbnailHome, folder=True))
    return itemlist
コード例 #29
0
def search(item, texto):
    logger.info("[laserietv.py] " + host + " search " + texto)

    itemlist = []

    post = "do=search&subaction=search&story=" + texto
    data = scrapertools.cache_page(host, post=post, headers=headers)

    patron = '<div class="video-item-cover"[^<]+<a href="(.*?)">[^<]+<img src="(.*?)" alt="(.*?)">'
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        scrapedthumbnail = host + scrapedthumbnail
        logger.info(scrapedurl + " " + scrapedtitle + scrapedthumbnail)
        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="episodios",
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 fulltitle=scrapedtitle,
                 show=scrapedtitle), tipo='tv'))

    return itemlist
コード例 #30
0
def tvoggi(item):
    logger.info("streamondemand.filmontv tvoggi")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url)

    # Extrae las entradas (carpetas)
    patron = '<div class="col-xs-5 box-immagine">\s*<img src="([^"]+)"[^>]+>\s*</div>\s*[^>]+>[^>]+>\s*[^>]+>\s*[^>]+>(.*?)</div>\s*[^>]+>[^>]+>[^>]+>[^>]+>(.*?)</div>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedthumbnail, scrapedtitle, scrapedtv in matches:
        scrapedurl = ""
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
        titolo = urllib.quote_plus(scrapedtitle)
        if (DEBUG): logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl + "]")

        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="do_search",
                 extra=titolo,
                 title=scrapedtitle + "[COLOR yellow]   " + scrapedtv + "[/COLOR]",
                 fulltitle=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 folder=True), tipo="movie"))

    return itemlist
コード例 #31
0
def elenco(item):
    logger.info("filmhdstreaming elenco")

    itemlist = []
    data = scrapertools.cache_page(item.url)
    patron = 'id="box_movies">(.*?)id="containerpage"'
    filtro = scrapertools.find_single_match(data, patron)

    patron = 'class="movie">[^>]+><a href="(.*?)"><img src="(.*?)".*?<h2>(.*?)<\/h2>'

    matches = scrapertools.find_multiple_matches(filtro, patron)

    for scrapedurl, scrapedimg, scrapedtitle in matches:
        logger.info("Url:" + scrapedurl + " thumbnail:" + scrapedimg +
                    " title:" + scrapedtitle)
        title = scrapedtitle.split("(")[0]
        itemlist.append(
            infoSod(
                Item(channel=item.channel,
                     action="findvideos",
                     title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                     fulltitle=scrapedtitle,
                     url=scrapedurl,
                     thumbnail=scrapedimg,
                     fanart="")))
    # Paginazione
    # ===========================================================
    patron = '<div class="pagination dark">(.*?)</div>'
    paginazione = scrapertools.find_single_match(data, patron)

    if item.extra == "genere":
        patron = 'class="page dark active">.*?</a>.*?href=\'(.*?)\'>'
    else:
        patron = 'class="page dark active">.*?</a>.*?href="(.*?)">'

    matches = re.compile(patron, re.DOTALL).findall(paginazione)
    scrapertools.printMatches(matches)
    # ===========================================================
    if len(matches) > 0:
        paginaurl = matches[0]
        if item.extra == "genere":
            url = scrapertools.find_single_match(
                item.url, "http://(.*?)/(.*?)/(.*?)\/.*")
            ind = "http://" + url[0] + "/" + url[1] + "/" + url[
                2] + "/" + paginaurl
            itemlist.append(
                Item(channel=item.channel,
                     action="elenco",
                     title=AvantiTxt,
                     url=ind,
                     thumbnail=AvantiImg,
                     folder=True))
        else:
            itemlist.append(
                Item(channel=item.channel,
                     action="elenco",
                     title=AvantiTxt,
                     url=host + paginaurl,
                     thumbnail=AvantiImg,
                     folder=True))

        itemlist.append(
            Item(channel=item.channel,
                 action="HomePage",
                 title=HomeTxt,
                 thumbnail=ThumbnailHome,
                 folder=True))

    return itemlist
コード例 #32
0
def peliculas(item):
    logger.info("streamondemand.playcinema peliculas")
    itemlist = []

    data = scrapertools.anti_cloudflare(item.url, headers)

    # ------------------------------------------------
    cookies = ""
    matches = re.compile('(.playcinema.org.*?)\n',
                         re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    # Extrae las entradas (carpetas)
    patron = '<div class="moviefilm">\s*'
    patron += '<a href="([^"]+)">\s*'
    patron += '<img src="([^"]+)" alt="([^"]+)"[^>]+></a>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        # response = urllib2.urlopen(scrapedurl)
        # html = response.read()
        # start = html.find("<div class=\"filmicerik\">")
        # start = html.find("<p><span style=\"font-family: Arial, Helvetica, sans-serif;\">")
        # end = html.find("<span style=\"font-size: xx-small;\">+Info", start)
        # end = html.find("</font></a><br />", start)
        # scrapedplot = html[start:end]
        # scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
        # scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        scrapedplot = ""
        scrapedtitle = scrapertools.decodeHtmlentities(
            scrapedtitle.replace("Streaming", ""))
        scrapedthumbnail += '|' + _headers
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="findvideos",
                         fulltitle=scrapedtitle,
                         show=scrapedtitle,
                         title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         plot=scrapedplot,
                         folder=True),
                    tipo='movie'))

    # Extrae el paginador
    patronvideos = '<a class="nextpostslink" rel="next" href="([^"]+)">&raquo;</a>'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Torna Home[/COLOR]",
                 folder=True)),
        itemlist.append(
            Item(
                channel=__channel__,
                action="peliculas",
                title="[COLOR orange]Successivo >>[/COLOR]",
                url=scrapedurl,
                thumbnail=
                "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/successivo_P.png",
                folder=True))

    return itemlist
コード例 #33
0
def peliculas(item):
    logger.info("[streamondemand-pureita cineblog01] peliculas")
    itemlist = []

    if item.url == "":
        item.url = sito

    # Descarga la página
    data = scrapertools.cache_page(item.url)

    # Extrae las entradas (carpetas)
    patronvideos = '<div class="span4".*?<a.*?<p><img src="([^"]+)".*?'
    patronvideos += '<div class="span8">.*?<a href="([^"]+)"> <h1>([^"]+)</h1></a>.*?'
    patronvideos += '<strong>([^<]*)</strong>.*?<br />([^<+]+)'
    matches = re.compile(patronvideos, re.DOTALL).finditer(data)

    for match in matches:
        scrapedtitle = scrapertools.unescape(match.group(3))
        scrapedurl = urlparse.urljoin(item.url, match.group(2))
        scrapedthumbnail = urlparse.urljoin(item.url, match.group(1))
        scrapedthumbnail = scrapedthumbnail.replace(" ", "%20")
        scrapedplot = scrapertools.unescape("[COLOR orange]" + match.group(4) +
                                            "[/COLOR]\n" +
                                            match.group(5).strip())
        scrapedplot = scrapertools.htmlclean(scrapedplot).strip()
        scrapedtitle = scrapedtitle.replace("&#8211;", "-").replace(
            "&#215;", "x").replace("[Sub-ITA]", "(Sub Ita)")
        scrapedtitle = scrapedtitle.replace("/", " - ").replace(
            "&#8217;", "'").replace("&#8230;", "...").replace("ò", "o")
        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="findvid_film",
                         contentType="movie",
                         fulltitle=scrapedtitle,
                         show=scrapedtitle,
                         title=scrapedtitle,
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         plot=scrapedplot,
                         extra=item.extra,
                         viewmode="movie_with_plot",
                         folder=True),
                    tipo='movie'))

    # Next page mark
    try:
        bloque = scrapertools.get_match(
            data, "<div id='wp_page_numbers'>(.*?)</div>")
        patronvideos = '<a href="([^"]+)">></a></li>'
        matches = re.compile(patronvideos, re.DOTALL).findall(bloque)
        scrapertools.printMatches(matches)

        if len(matches) > 0:
            scrapedtitle = "[COLOR orange]Successivi >>[/COLOR]"
            scrapedurl = matches[0]
            scrapedthumbnail = ""
            scrapedplot = ""

            itemlist.append(
                Item(
                    channel=__channel__,
                    action="peliculas",
                    title=scrapedtitle,
                    url=scrapedurl,
                    thumbnail=
                    "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png",
                    extra=item.extra,
                    plot=scrapedplot))
    except:
        pass

    return itemlist
コード例 #34
0
def fichas(item):
    logger.info("[streamondemand-pureita altadefinizione_pink ] fichas")

    itemlist = []

    # Descarga la pagina
    data = scrapertools.anti_cloudflare(item.url, headers)
    # fix - calidad
    data = re.sub(
        r'<div class="wrapperImage"[^<]+<a',
        '<div class="wrapperImage"><fix>SD</fix><a',
        data
    )
    # fix - IMDB
    data = re.sub(
        r'<h5> </div>',
        '<fix>IMDB: 0.0</fix>',
        data
    )

    if "/?s=" in item.url:
        patron = '<div class="col-lg-3 col-md-3 col-xs-3">.*?'
        patron += 'href="([^"]+)".*?'
        patron += '<div class="wrapperImage"[^<]+'
        patron += '<[^>]+>([^<]+)<.*?'
        patron += 'src="([^"]+)".*?'
        patron += 'class="titleFilm">([^<]+)<.*?'
        patron += 'IMDB: ([^<]+)<'
    else:
        patron = '<div class="wrapperImage"[^<]+\s*[^>]+>([^<]+).*?\s*<a href="([^"]+)">'
        patron += '<img width=".*?" height=".*?" src="([^"]+)" class="attachment[^>]+>'
        patron += '</a>\s*<div class="info">\s*<h2 class="titleFilm"><a href[^>]+>([^<]+)</a></h2>\s*[^>]+>[^>]+>\s*(.*?)<'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scraped_1, scraped_2, scrapedthumbnail, scrapedtitle, scrapedpuntuacion in matches:

        scrapedurl = scraped_2
        scrapedcalidad = scraped_1
        if "/?s=" in item.url:
            scrapedurl = scraped_1
            scrapedcalidad = scraped_2
        if scrapedpuntuacion=="":
           scrapedpuntuacion="N/A"
        title = scrapertools.decodeHtmlentities(scrapedtitle)
        title_f = scrapertools.decodeHtmlentities(scrapedtitle)
        title += " (" + scrapedcalidad + ") (" + scrapedpuntuacion + ")"
        scraped_calidad = " ([COLOR yellow]" + scrapedcalidad + "[/COLOR])"
        scraped_puntuacion = " ([COLOR yellow]" + scrapedpuntuacion + "[/COLOR])"
        title_f += scraped_calidad +  scraped_puntuacion

        # ------------------------------------------------
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
        # ------------------------------------------------

        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="findvideos",
                 contentType="movie",
                 title=title_f,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 fulltitle=title,
                 show=title), tipo='movie'))

    # Paginación
    next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
    if next_page != "":
        itemlist.append(
            Item(channel=__channel__,
                 action="fichas",
                 title="[COLOR orange]Successivo >>[/COLOR]",
                 url=next_page,
                 thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png"))

    return itemlist
コード例 #35
0
def peliculas(item):
    logger.info("streamondemand-pureita.filmstreamingita peliculas")
    itemlist = []

    data = scrapertools.anti_cloudflare(item.url, headers=headers)

    patron = r'<div class="home_tall_box">\s*'
    patron += r'<a href="([^"]+)".*?>\s*<img.*?alt="([^"]+)".*?src="([^"]+)">'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedthumbnail in matches:

        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="findvideos",
                 contentType="movie",
                 title=scrapedtitle,
                 fulltitle=scrapedtitle,
                 url=scrapedurl,
                 extra="movie",
                 thumbnail=scrapedthumbnail,
                 folder=True), tipo="movie"))


    patron = '<li><a href="([^"]+)" class="next">&raquo;</a></li>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = matches[0]
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Torna Home[/COLOR]",
                 thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/return_home_P.png",
                 folder=True)),
        itemlist.append(
            Item(channel=__channel__,
                 action="peliculas",
                 title="[COLOR orange]Successivo >>[/COLOR]",
                 url=scrapedurl,
                 thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/successivo_P.png",
                 extra=item.extra,
                 folder=True))

    patron = '<a class="next page-numbers" href="([^"]+)">Successivo &raquo;</a></center>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = matches[0]
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Torna Home[/COLOR]",
                 thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/return_home_P.png",
                 folder=True)),
        itemlist.append(
            Item(channel=__channel__,
                 action="peliculas",
                 title="[COLOR orange]Successivo >>[/COLOR]",
                 url=scrapedurl,
                 thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/successivo_P.png",
                 extra=item.extra,
                 folder=True))
    return itemlist
コード例 #36
0
def fichas(item):
    logger.info("[altadefinizioneclick.py] fichas")

    itemlist = []

    # Descarga la pagina
    data = scrapertools.anti_cloudflare(item.url, headers)
    # fix - calidad
    data = re.sub(
        r'<div class="wrapperImage"[^<]+<a',
        '<div class="wrapperImage"><fix>SD</fix><a',
        data
    )
    # fix - IMDB
    data = re.sub(
        r'<h5> </div>',
        '<fix>IMDB: 0.0</fix>',
        data
    )
    # ------------------------------------------------
    cookies = ""
    matches = config.get_cookie_data(item.url).splitlines()[4:]
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    if "/?s=" in item.url:
        patron = '<div class="col-lg-3 col-md-3 col-xs-3">.*?'
        patron += 'href="([^"]+)".*?'
        patron += '<div class="wrapperImage"[^<]+'
        patron += '<[^>]+>([^<]+)<.*?'
        patron += 'src="([^"]+)".*?'
        patron += 'class="titleFilm">([^<]+)<.*?'
        patron += 'IMDB: ([^<]+)<'
    else:
        patron = '<div class="wrapperImage"[^<]+'
        patron += '<[^>]+>([^<]+)<.*?'
        patron += 'href="([^"]+)".*?'
        patron += 'src="([^"]+)".*?'
        patron += 'href[^>]+>([^<]+)</a>.*?'
        patron += 'IMDB: ([^<]+)<'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scraped_1, scraped_2, scrapedthumbnail, scrapedtitle, scrapedpuntuacion in matches:

        scrapedurl = scraped_2
        scrapedcalidad = scraped_1
        if "/?s=" in item.url:
            scrapedurl = scraped_1
            scrapedcalidad = scraped_2

        title = scrapertools.decodeHtmlentities(scrapedtitle)
        title += " (" + scrapedcalidad + ") (" + scrapedpuntuacion + ")"

        # ------------------------------------------------
        scrapedthumbnail += "|" + _headers
        # ------------------------------------------------

        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="findvideos",
                 title="[COLOR azure]" + title + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 fulltitle=title,
                 show=title), tipo='movie'))

    # Paginación
    next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
    if next_page != "":
        itemlist.append(
            Item(channel=__channel__,
                 action="fichas",
                 title="[COLOR orange]Successivo >>[/COLOR]",
                 url=next_page,
                 thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))

    return itemlist
コード例 #37
0
def fichas(item):
    logger.info("[italiafilmvideohd.py] fichas")

    itemlist = []

    # Descarga la pagina
    data = scrapertools.anti_cloudflare(item.url, headers)
    # fix - calidad

    # ------------------------------------------------
    cookies = ""
    matches = config.get_cookie_data(item.url).splitlines()[4:]
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    patron = '<li class="item">.*?'
    patron += 'href="([^"]+)".*?'
    patron += 'title="([^"]+)".*?'
    patron += '<img src="([^"]+)".*?'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scraped_2, scrapedtitle, scrapedthumbnail in matches:
        scrapedurl = scraped_2

        title = scrapertools.decodeHtmlentities(scrapedtitle)
        # title += " (" + scrapedcalidad + ")

        # ------------------------------------------------
        scrapedthumbnail += "|" + _headers
        # ------------------------------------------------
        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="findvideos",
                         title=title,
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         fulltitle=title,
                         show=scrapedtitle),
                    tipo='movie'))

    itemlist.append(
        Item(channel=__channel__,
             action="HomePage",
             title="[COLOR yellow]Torna Home[/COLOR]",
             folder=True)),

    # Paginación
    next_page = scrapertools.find_single_match(
        data, '<a href="([^"]+)"\s*><span aria-hidden="true">&raquo;')

    if next_page != "":
        itemlist.append(
            Item(
                channel=__channel__,
                action="fichas",
                title="[COLOR orange]Successivo >>[/COLOR]",
                url=next_page,
                thumbnail=
                "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"
            ))

    return itemlist
コード例 #38
0
def serietv(item):
    logger.info("streamondemand.mondolunatico serietv")

    itemlist = []

    p = 1
    if '{}' in item.url:
        item.url, p = item.url.split('{}')
        p = int(p)

    # Carica la pagina
    data = httptools.downloadpage(item.url).data
    data = scrapertools.find_single_match(
        data, '<h1>Lista Alfabetica</h1>(.*?)</div>')

    # Estrae i contenuti
    patron = '<li><a href="([^"]+)">([^<]+)</a></li>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    scrapedplot = ""
    scrapedthumbnail = ""
    for i, (scrapedurl, scrapedtitle) in enumerate(matches):
        if (p - 1) * PERPAGE > i: continue
        if i >= p * PERPAGE: break
        title = scrapertools.decodeHtmlentities(scrapedtitle)
        itemlist.append(
            infoSod(Item(channel=__channel__,
                         extra=item.extra,
                         action="episodios",
                         title=title,
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         fulltitle=title,
                         show=title,
                         plot=scrapedplot,
                         folder=True),
                    tipo='tv'))

    if len(itemlist) > 0:
        itemlist.append(
            Item(
                channel=__channel__,
                action="HomePage",
                title="[COLOR yellow]Torna Home[/COLOR]",
                thumbnail=
                "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/return_home_P.png",
                folder=True)),

    if len(matches) >= p * PERPAGE:
        scrapedurl = item.url + '{}' + str(p + 1)
        itemlist.append(
            Item(
                channel=__channel__,
                extra=item.extra,
                action="serietv",
                title="[COLOR orange]Successivo >>[/COLOR]",
                url=scrapedurl,
                thumbnail=
                "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/successivo_P.png",
                folder=True))

    return itemlist
def peliculas_tv(item):
    logger.info("streamondemand-pureita.FilmZStreaming peliculas_tv")
    itemlist = []

    data = httptools.downloadpage(item.url, headers=headers).data

    # Extrae las entradas (carpetas)
    patron = '<img src="([^"]+)"\s*alt="([^"]+)"><div class="rating">[^>]+>'
    patron += '<\/span>\s*([^<]+)<\/div><div class="mepo">\s*<\/div>\s*'
    patron += '<a\s*href="([^"]+)">.*?<div class="texto">(.*?)<'
    matches = re.compile(patron, re.DOTALL).finditer(data)

    for match in matches:

        scrapedplot = scrapertools.unescape(match.group(5))
        scrapedurl = urlparse.urljoin(item.url, match.group(4))
        rating = scrapertools.unescape(match.group(3))
        scrapedtitle = scrapertools.unescape(match.group(2))
        scrapedthumbnail = scrapertools.unescape(match.group(1))
        #scrapedtitle = scrapedtitle.title()
        scrapedtitle = scrapedtitle.replace("&#8217;", "'").replace(
            "&#8211;", "-").replace("’", "'")
        scrapedtitle = scrapedtitle.replace("!", "").replace("Online",
                                                             "").replace(
                                                                 " ITA ", " ")
        scrapedtitle = scrapedtitle.replace("[", "(").replace(
            "]", ")").replace(" HD", "").replace("SerieTv", "")
        scrapedtitle = scrapedtitle.replace("SerieTV", "").replace(
            "Serie-Tv", "").replace("ONLINE", "")
        scrapedtitle = scrapedtitle.replace("STREAMING", "").replace(
            "streaming", "").replace("Streaming", "")
        scrapedtitle = scrapedtitle.replace("serie", "").replace(
            "Serie TV", "").replace("Serie-TV", "")
        if rating == "0":
            rating = ""
        else:
            rating = " ([COLOR yellow]" + rating + "[/COLOR])"

        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="episodios",
                         contentType="serie",
                         fulltitle=scrapedtitle,
                         show=scrapedtitle,
                         title="[COLOR azure]" + scrapedtitle + "[/COLOR]" +
                         rating,
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         plot=scrapedplot,
                         folder=True),
                    tipo='tv'))

    # Extrae el paginador
    patronvideos = "<span class=\"current\">\d+</span><a href='([^']+)' class=\"inactive\">\d+</a>"
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(
                channel=__channel__,
                action="peliculas_tv",
                title="[COLOR orange]Successivi >>[/COLOR]",
                url=scrapedurl,
                thumbnail=
                "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png",
                folder=True))

    return itemlist
コード例 #40
0
def peliculas_tv(item):
    logger.info("[italiafilm.py] peliculas")
    itemlist = []

    data = scrapertools.anti_cloudflare(item.url, headers)
    patron = '<article(.*?)</article>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for match in matches:
        title = scrapertools.find_single_match(
            match, '<h3[^<]+<a href="[^"]+"[^<]+>([^<]+)</a>')
        title = title.replace("Streaming", "")
        title = scrapertools.decodeHtmlentities(title).strip()
        show_title = re.sub('\(.*?\)', '', title.replace('Serie TV', ''))
        url = scrapertools.find_single_match(match,
                                             '<h3[^<]+<a href="([^"]+)"')
        plot = ""
        thumbnail = scrapertools.find_single_match(match,
                                                   'data-echo="([^"]+)"')

        if (DEBUG):
            logger.info("title=[" + title + "], url=[" + url +
                        "], thumbnail=[" + thumbnail + "]")

        itemlist.append(
            infoSod(Item(
                channel=__channel__,
                extra=item.extra,
                action='episodios' if item.extra == 'serie' else 'findvideos',
                fulltitle=title,
                show=show_title,
                title="[COLOR azure]" + title + "[/COLOR]",
                url=url,
                thumbnail=thumbnail,
                plot=plot,
                viewmode="movie_with_plot",
                folder=True),
                    tipo='tv'))

    # Siguiente
    try:
        pagina_siguiente = scrapertools.get_match(
            data, '<a class="next page-numbers" href="([^"]+)"')
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Torna Home[/COLOR]",
                 folder=True)),
        itemlist.append(
            Item(
                channel=__channel__,
                action="peliculas_tv",
                extra=item.extra,
                title="[COLOR orange]Successivo >> [/COLOR]",
                url=pagina_siguiente,
                thumbnail=
                "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                folder=True))
    except:
        pass

    return itemlist
コード例 #41
0
def peliculas_tv(item):
    logger.info("[streamondemand-pureita mondolunatico_new] peliculas_tv")
    itemlist = []
    numpage = 14

    p = 1
    if '{}' in item.url:
        item.url, p = item.url.split('{}')
        p = int(p)

    # Descarga la pagina
    data = httptools.downloadpage(item.url).data

    # Extrae las entradas
    patron = '<div class="poster">\s*<img src="([^"]+)" \s*'
    patron += 'alt="([^"]+)">\s*<div[^>]+>[^>]+><\/span>\s*([^<]+)<\/div>\s*'
    patron += '[^>]+>\s*<\/div>\s*<a href="([^"]+)">.*?'
    patron += '<\/h3>\s*<span>([^<]+)<\/span>.*?<div class="texto">(.*?)<'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for i, (scrapedthumbnail, scrapedtitle, rating, scrapedurl, year,
            scrapedplot) in enumerate(matches):
        if (p - 1) * numpage > i: continue
        if i >= p * numpage: break
        scrapedtitle = scrapedtitle.replace(
            "(Cliccate La Scheda Info per vedere i link)", "")
        scrapedtitle = scrapedtitle.replace("&#8217;",
                                            "'").replace("Flash",
                                                         "The Flash").strip()

        title = scrapertools.decodeHtmlentities(scrapedtitle)

        if rating == "0":
            rating = ""
        if rating:
            rating = " ([COLOR yellow]" + rating + "[/COLOR])"
        else:
            rating = ""
        if year:
            years = " ([COLOR yellow]" + year + "[/COLOR])"
            date = " (" + year + ")"
        else:
            years = ""
            date = ""
        if year in scrapedtitle:
            years = ""
            date = ""
        itemlist.append(
            infoSod(Item(channel=__channel__,
                         extra=item.extra,
                         action="episodios",
                         contentType="tv",
                         title="[COLOR azure]" + title + "[/COLOR]" + years +
                         rating,
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         fulltitle=title + date,
                         show=title,
                         plot=scrapedplot,
                         folder=True),
                    tipo='tv'))

    # Extrae el paginador
    if len(matches) >= p * numpage:
        scrapedurl = item.url + '{}' + str(p + 1)
        itemlist.append(
            Item(
                channel=__channel__,
                extra=item.extra,
                action="peliculas_tv",
                title="[COLOR orange]Successivi >>[/COLOR]",
                url=scrapedurl,
                thumbnail=
                "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png",
                folder=True))

    else:
        next_page = scrapertools.find_single_match(
            data, '<a href="([^"]+)" ><span class="icon-chevron-right">')
        if next_page != "":
            itemlist.append(
                Item(
                    channel=__channel__,
                    action="peliculas_tv",
                    title="[COLOR orange]Successivi >>[/COLOR]",
                    url=next_page,
                    thumbnail=
                    "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png"
                ))

    return itemlist
コード例 #42
0
def updateserietv(item):
    logger.info("streamondemand.solostreaming update serietv")

    itemlist = []

    # Descarga la pagina
    data = cache_jsonpage(item.url)

    for singledata in data['results']:

        type = normalize_unicode(singledata['type'])
        uri = normalize_unicode(singledata['uri'])
        if item.extra == 'serietv':
            ep_num = normalize_unicode(singledata['ep_num'])
            serie = scrapertools.decodeHtmlentities(normalize_unicode(singledata['serieNome'])).strip()
            titolo = scrapertools.decodeHtmlentities(normalize_unicode(singledata['ep_title'])).strip()

            apisingle = host + "/sod/api.php?get=serietv&type=episodi&uri=" + uri + "&ep_num=" + ep_num + "&sub=" + urllib.quote_plus(
                type)

            fulltitle = serie + ' | ' + ep_num + ' ' + titolo
            frm_title = "[COLOR white](%s)[/COLOR] [B][COLOR royalblue]%s[/COLOR][/B] [B][COLOR deepskyblue]- %s %s[/COLOR][/B]" % (
                type.upper(), serie, ep_num, titolo)
        else:
            e_num = normalize_unicode(singledata['e_num'])
            s_num = normalize_unicode(singledata['s_num'])
            serie = scrapertools.decodeHtmlentities(normalize_unicode(singledata['serie'])).strip()

            apisingle = host + "/sod/api.php?get=anime&type=episodi&uri=" + uri + "&e_num=" + e_num + "&s_num=" + s_num + "&sub=" + urllib.quote_plus(
                type)

            fulltitle = serie + ' | ' + s_num + 'x' + e_num
            frm_title = "[COLOR white](%s)[/COLOR] [B][COLOR royalblue]%s[/COLOR][/B] [B][COLOR deepskyblue]- %sx%s[/COLOR][/B]" % (
                type.upper(), serie, s_num, e_num)

        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="findvid_serie",
                 fulltitle=fulltitle,
                 show=serie,
                 title=frm_title,
                 url=apisingle,
                 thumbnail=singledata['fileName']), tipo='tv'))

    itemlist.append(
        Item(channel=__channel__,
             action="HomePage",
             title="[COLOR yellow]Torna Home[/COLOR]"))

    if len(data['results']) == result_per_page:
        end = int(scrapertools.find_single_match(item.url, r"&end=(\d+)"))
        next_page = item.url.split('&start=')[0] + "&start=%d&end=%d" % (end, end + result_per_page)

        itemlist.append(
            Item(channel=__channel__,
                 action="updateserietv",
                 title="[COLOR orange]Successivo>>[/COLOR]",
                 url=next_page,
                 extra=item.extra,
                 thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))

    return itemlist
コード例 #43
0
def peliculas(item):
    logger.info("[streamondemand-pureita altadefinizione01_zone] peliculas")
    itemlist = []

    # Descarga la pagina
    data = httptools.downloadpage(item.url, headers=headers).data

    # Extrae las entradas (carpetas)
    patron = '<h2>\s*<a href="([^"]+)">([^"]+)<\/a>\s*<\/h2>\s*[^>]+>[^>]+.*?\s*'
    patron += '</div>\s*<a href[^>]+>[^>]+src="([^"]+)"[^>]+>\s*</a>\s*'
    patron += '<div class="trdublaj">\s*(.*?)</div>\s*[^>]+>(.*?)\s*<'
    patron += '.*?<li>\s*<span class="ml[^"]+">(.*?)<\/.*?span>\s*<\/li>\s*' 
    patron += '<li><span class="ml-label">([^<]+)</span></li>.*?<p>(.*?)</p>'
    matches = re.compile(patron, re.DOTALL).finditer(data)

    for match in matches:
        scrapedplot = scrapertools.unescape(match.group(8))
        year = scrapertools.unescape(match.group(7))
        rating = scrapertools.unescape(match.group(6))
        sub = scrapertools.unescape(match.group(5))
        quality = scrapertools.unescape(match.group(4))
        scrapedthumbnail = urlparse.urljoin(item.url, match.group(3))
        #rating = scrapertools.unescape(match.group(3))
        scrapedtitle = scrapertools.unescape(match.group(2))
        scrapedurl = scrapertools.unescape(match.group(1))
        if year:
          scrapetitle=scrapedtitle.strip() + " (" + year + ")"
        else:
          scrapetitle=scrapedtitle		
        if sub:
         sub = " ([COLOR yellow]" + sub + "[/COLOR])"
        if quality:
         quality = " ([COLOR yellow]" + quality + "[/COLOR])"
        if year:
         year = " ([COLOR yellow]" + year + "[/COLOR])"

        if rating:
         rating=rating.replace("<b>", "")
         rating = " ([COLOR yellow]" + rating + "[/COLOR])"
        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="findvideos",
                 contentType="movie",
                 fulltitle=scrapetitle,
                 show=scrapetitle,
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR] " + sub + year + quality + rating,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 folder=True), tipo='movie'))

    # Extrae el paginador
    patronvideos = 'href="([^"]+)">&raquo;</a></i>'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(channel=__channel__,
                 action="peliculas",
                 title="[COLOR orange]Successivi >>[/COLOR]",
                 url=scrapedurl,
                 thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png",
                 folder=True))

    return itemlist
コード例 #44
0
def pelis_src(item):
    logger.info("[streamondemand-pureita mondolunatico_new] pelis_src")
    itemlist = []
    numpage = 14

    p = 1
    if '{}' in item.url:
        item.url, p = item.url.split('{}')
        p = int(p)

    # Descarga la pagina
    data = httptools.downloadpage(item.url).data

    # Estrae i contenuti
    patron = '<div class="thumbnail animation-2">\s*<a href="([^"]+)">\s*<img src="([^"]+)" alt="(.*?)" />.*?'
    patron += '<span class="rating">([^<]+)<\/span>.*?'
    patron += '<span class="year">([^<]+)</span>.*?'
    patron += '<div class="contenido">\s*<p>([^"]+)</p>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for i, (scrapedurl, scrapedthumbnail, scrapedtitle, rating, year,
            scrapedplot) in enumerate(matches):
        if (p - 1) * numpage > i: continue
        if i >= p * numpage: break
        title = scrapertools.decodeHtmlentities(scrapedtitle)

        rating = rating.replace("IMDb ", "")
        rating = " ([COLOR yellow]" + rating + "[/COLOR])"
        if year:
            date = " (" + year + ")"
            years = " ([COLOR yellow]" + year + "[/COLOR])"
        if year in scrapedtitle:
            years = ""
            date = ""
        if "tvshows" in scrapedurl:
            type = " ([COLOR yellow]Serie TV[/COLOR])"
        else:
            type = ""
        itemlist.append(
            infoSod(Item(channel=__channel__,
                         extra=item.extra,
                         action="episodios"
                         if "tvshows" in scrapedurl else "findvideos",
                         title="[COLOR azure]" + title + "[/COLOR]" + type +
                         years + rating,
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         plot=scrapedplot,
                         fulltitle=title + date,
                         show=title,
                         folder=True),
                    tipo="tv" if "tvshows" in scrapedurl else "movie"))

    # Extrae el paginador
    if len(matches) >= p * numpage:
        scrapedurl = item.url + '{}' + str(p + 1)
        itemlist.append(
            Item(
                channel=__channel__,
                extra=item.extra,
                action="pelis_src",
                title="[COLOR orange]Successivi >>[/COLOR]",
                url=scrapedurl,
                thumbnail=
                "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png",
                folder=True))

    else:
        next_page = scrapertools.find_single_match(
            data, '<a href="([^"]+)" ><span class="icon-chevron-right">')
        if next_page != "":
            itemlist.append(
                Item(
                    channel=__channel__,
                    action="pelis_src",
                    title="[COLOR orange]Successivi >>[/COLOR]",
                    url=next_page,
                    thumbnail=
                    "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png"
                ))

    return itemlist
コード例 #45
0
def peliculas(item):
    logger.info("streamondemand.streamblog peliculas")
    itemlist = []

    # Descarga la pagina
    data = httptools.downloadpage(item.url, headers=headers).data

    # Estrae i contenuti
    patron = '<div class="blvideo">\s*<div class="poster"><a href="([^"]+)"><img src="([^"]+)" alt="(.*?)"[^>]+>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        scrapedthumbnail = host + scrapedthumbnail
        scrapedplot = ""
        scrapetrailar = " Trailer"

        # Bypass fake links
        html = httptools.downloadpage(scrapedurl).data

        patron = '<div class="video-player-plugin">([\s\S]*)<div class="wrapper-plugin-video">'
        matches = re.compile(patron, re.DOTALL).findall(html)

        for url in matches:
            if "scrolling" in url:
                scrapedurl = scrapedurl
            else:
                scrapedtitle = scrapedtitle + scrapetrailar

            itemlist.append(
                infoSod(Item(channel=__channel__,
                             action="findvideos",
                             contentType="movie",
                             fulltitle=scrapedtitle,
                             show=scrapedtitle,
                             title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                             url=scrapedurl,
                             thumbnail=scrapedthumbnail,
                             plot=scrapedplot,
                             folder=True),
                        tipo='movie'))

    # Paginazione
    patronvideos = '<div class="navigation">[^n]+n>[^<]+<\/span>[^<]+<a href="(.*?)">'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(
                channel=__channel__,
                action="HomePage",
                title="[COLOR yellow]Torna Home[/COLOR]",
                thumbnail=
                "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/return_home_P.png",
                folder=True)),
        itemlist.append(
            Item(
                channel=__channel__,
                action="peliculas",
                title="[COLOR orange]Successivo >>[/COLOR]",
                url=scrapedurl,
                thumbnail=
                "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/successivo_P.png",
                folder=True))

    return itemlist
コード例 #46
0
def peliculas(item):
    logger.info()
    itemlist = []

    while True:
        data = httptools.downloadpage(item.url).data
        patron = r'<div class="short-story">\s*<a href="([^"]+)".*?>\s*'
        patron += r'<img.*?style="background:url\(([^\)]+)\).*?">'
        patron += r'\s*<div class="custom-title">([^<]+)</div>'
        matches = re.compile(patron, re.DOTALL).findall(data)

        for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
            scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
            year = scrapertools.find_single_match(scrapedtitle, r'\((\d{4})\)')
            scrapedtitle = scrapedtitle.replace(year, color(year, "red"))

            # Bypass fake links
            html = httptools.downloadpage(scrapedurl).data

            patron = '<div class="video-player-plugin">([\s\S]*)<div class="wrapper-plugin-video">'
            matches = re.compile(patron, re.DOTALL).findall(html)
            for url in matches:
                if "scrolling" not in url: continue

                itemlist.append(infoSod(
                    Item(channel=__channel__,
                         action="findvideos",
                         contentType="movie",
                         title=scrapedtitle,
                         fulltitle=scrapedtitle,
                         url=scrapedurl,
                         extra="movie",
                         thumbnail=scrapedthumbnail,
                         folder=True), tipo="movie"))

        # Pagine
        patronvideos = r'<a href="([^"]+)">Avanti</a>'
        next_page = scrapertools.find_single_match(data, patronvideos)

        if not next_page:
            break
        else:
            item.url = next_page
            if itemlist:
                itemlist.append(
                    Item(
                        channel=__channel__,
                        action="HomePage",
                        title="[COLOR yellow]Torna Home[/COLOR]",
                        folder=True)),
                itemlist.append(
                    Item(
                        channel=__channel__,
                        action="peliculas",
                        title="[COLOR orange]Successivo >>[/COLOR]",
                        url=item.url,
                        thumbnail= "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                        folder=True))
                break

    return itemlist
コード例 #47
0
def latest(item):
    logger.info("streamondemand.tantifilm peliculas")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url, headers=headers)

    # Extrae las entradas (carpetas)
    patron = '<div class="mediaWrap mediaWrapAlt">\s*'
    patron += '<a href="([^"]+)" title="([^"]+)" rel="bookmark">\s*'
    patron += '<img[^s]+src="([^"]+)"[^>]+>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
        html = scrapertools.cache_page(scrapedurl, headers=headers)
        start = html.find("<div class=\"content-left-film\">")
        end = html.find("</div>", start)
        scrapedplot = html[start:end]
        scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
        scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        scrapedtitle = scrapedtitle.replace("Permalink to ", "")
        scrapedtitle = scrapedtitle.replace("streaming", "")
        if DEBUG:
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="findvideos",
                         fulltitle=scrapedtitle,
                         show=scrapedtitle,
                         title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         plot=scrapedplot,
                         folder=True),
                    tipo='movie'))

    # Extrae el paginador
    patronvideos = '<a class="nextpostslink" rel="next" href="([^"]+)">»</a>'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(
                channel=__channel__,
                action="HomePage",
                title="[COLOR yellow]Torna Home[/COLOR]",
                thumbnail=
                "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/return_home_P.png",
                folder=True)),
        itemlist.append(
            Item(
                channel=__channel__,
                action="latest",
                title="[COLOR orange]Successivo >>[/COLOR]",
                url=scrapedurl,
                thumbnail=
                "https://raw.githubusercontent.com/orione7/Pelis_images/master/vari/successivo_P.png",
                folder=True))

    return itemlist
コード例 #48
0
def peliculas(item):
    logger.info("streamondemand.altadefinizione01 peliculas")
    itemlist = []

    # Descarga la pagina
    # data = scrapertools.cache_page(item.url)

    data = scrapertools.anti_cloudflare(item.url, headers)

    ## ------------------------------------------------
    cookies = ""
    matches = config.get_cookie_data(item.url).splitlines()[4:]
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    ## ------------------------------------------------

    # Extrae las entradas (carpetas)
    patron = '<a\s+href="([^"]+)"\s+title="[^"]*">\s+<img\s+width="[^"]*"\s+height="[^"]*"\s+src="([^"]+)"\s+class="[^"]*"\s+alt="([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        scrapedplot = ""
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.replace("Streaming", ""))
        if DEBUG: logger.info(
            "title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
        ## ------------------------------------------------
        scrapedthumbnail += "|" + _headers
        ## ------------------------------------------------

        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="findvideos",
                 title=scrapedtitle,
                 fulltitle=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail), tipo="movie"))

    # Extrae el paginador
    patronvideos = 'class="nextpostslink" rel="next" href="([^"]+)">&raquo;'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Torna Home[/COLOR]",
                 folder=True)),
        itemlist.append(
            Item(channel=__channel__,
                 action="peliculas",
                 title="[COLOR orange]Successivo >>[/COLOR]",
                 url=scrapedurl,
                 thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                 folder=True))

    return itemlist
コード例 #49
0
def peliculas_tv(item):
    logger.info("streamondemand.filmstream peliculas")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url)

    # Extrae las entradas (carpetas)
    patron = '<div class="galleryitem".*?>\s*'
    patron += '<a href="?([^>"]+)"?.*?title="?([^>"]+)"?.*?<img.*?src="([^>"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
        html = scrapertools.cache_page(scrapedurl)
        start = html.find("</strong></p>")
        end = html.find("<p>&nbsp;</p>", start)
        scrapedplot = html[start:end]
        scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
        scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        scrapedtitle = scrapedtitle.replace("Streaming", "")
        scrapedtitle = scrapedtitle.replace("(Serie Tv)", "{Serie Tv}")
        scrapedtitle = scrapedtitle.replace("(Serie TV)", "{Serie Tv}")
        scrapedtitle = scrapedtitle.replace("(Tv)", "{Tv}")
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.replace("(Miniserie Tv)", "{Miniserie Tv}"))
        if scrapedtitle.startswith("Permanent Link to "):
            scrapedtitle = scrapedtitle[18:]
        if (DEBUG): logger.info(
            "title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="episodios",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 extra=item.extra,
                 folder=True), tipo='tv'))

    # Extrae el paginador
    patronvideos = '<li><a href="([^"]+)">&gt;</a></li>'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Home[/COLOR]",
                 thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/return_home_P.png",
                 folder=True)),
        itemlist.append(
            Item(channel=__channel__,
                 action="peliculas_tv",
                 title="[COLOR orange]Successivo >>[/COLOR]",
                 url=scrapedurl,
                 thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/successivo_P.png",
                 extra=item.extra,
                 folder=True))

    return itemlist
コード例 #50
0
def peliculas(item):
    logger.info("streamondemand.streaming01 peliculas")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.cache_page(item.url)

    # Extrae las entradas (carpetas)
    patron = '<a class="short-img" href="([^"]+)"[^>]+>\s*'
    patron += '<img src="([^"]+)"[^>]+>\s*'
    patron += '</a>\s*'
    patron += '<div[^>]+>\s*'
    patron += '<h3>[^>]+>(.*?)</a>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        #		COMMENTING PLOT LINES BECAUSE CHANNELS' TOO SLOW
        #       html = scrapertools.cache_page(scrapedurl)
        #       start = html.find("<div class=\"full-text clearfix desc-text\">")
        #       end = html.find("<table>", start)
        #       scrapedplot = html[start:end]
        #       scrapedplot = re.sub(r'<.*?>', '', scrapedplot)
        #       scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        scrapedtitle = scrapedtitle.replace("Streaming ", "")
        scrapedtitle = scrapedtitle.replace(" e download", "")
        scrapedtitle = scrapedtitle.replace("gratis", "")
        scrapedtitle = scrapedtitle.replace("streaming", "")
        scrapedtitle = scrapedtitle.replace("ita", "")
        scrapedtitle = scrapedtitle.replace("ITA", "")
        scrapedtitle = scrapedtitle.replace("download", "")
        scrapedtitle = scrapedtitle.replace("GRATIS", "")
        scrapedtitle = scrapedtitle.replace("[", "")
        scrapedtitle = scrapedtitle.replace("]", "")
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        if DEBUG:
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="findvideos",
                         contentType="movie",
                         fulltitle=scrapedtitle,
                         show=scrapedtitle,
                         title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         folder=True),
                    tipo='movie'))

    # Extrae el paginador
    patronvideos = '<span class="pnext"><a href="([^"]+)">Avanti</a></span>'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Torna Home[/COLOR]",
                 folder=True)),
        itemlist.append(
            Item(
                channel=__channel__,
                action="peliculas",
                title="[COLOR orange]Successivo >>[/COLOR]",
                url=scrapedurl,
                thumbnail=
                "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                folder=True))

    return itemlist
コード例 #51
0
def peliculas(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    patron = r'<div class="main-news-image">\s*<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)".*?/></a>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        year = scrapertools.find_single_match(scrapedtitle, r'\((\d{4})\)')
        scrapetrailar = " Trailer"

        #
        html = httptools.downloadpage(scrapedurl).data

        patron = '<div class="video-player-plugin">([\s\S]*)<div class="wrapper-plugin-video">'
        matches = re.compile(patron, re.DOTALL).findall(html)
        for url in matches:
            if "scrolling" in url:
                scrapedurl = scrapedurl
            else:
                scrapedtitle = scrapedtitle + scrapetrailar

            itemlist.append(
                infoSod(Item(channel=__channel__,
                             action="findvideos",
                             contentType="movie",
                             title=scrapedtitle,
                             fulltitle=scrapedtitle,
                             url=scrapedurl,
                             extra="movie",
                             thumbnail=scrapedthumbnail,
                             folder=True),
                        tipo="movie"))

    # Pagine
    patronvideos = r'<a href="([^"]+)">>'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(
                channel=__channel__,
                action="HomePage",
                title="[COLOR yellow]Torna Home[/COLOR]",
                thumbnail=
                "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/return_home_P.png",
                folder=True)),
        itemlist.append(
            Item(
                channel=__channel__,
                action="peliculas",
                title="[COLOR orange]Successivo >>[/COLOR]",
                url=scrapedurl,
                thumbnail=
                "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/successivo_P.png",
                extra=item.extra,
                folder=True))

    return itemlist
コード例 #52
0
def peliculas_update(item):
    logger.info("streamondemand-pureita.filmpertutti peliculas")
    itemlist = []

    numpage = 14

    p = 1
    if '{}' in item.url:
        item.url, p = item.url.split('{}')
        p = int(p)

    # Descarga la pagina
    data = httptools.downloadpage(item.url).data

    # Extrae las entradas (carpetas)
    patron = '<li><a\s*href="([^\/]+\/\/[^\/]+\/([^"]+))" data-\s*thumbnail="([^"]+)">'
    patron += '<div>\s*<div class="title">(.*?)<\/div>\s*<div class="episode"[^>]+>(.*?)<\/div>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for i, (scrapedurl, titolo, scrapedthumbnail, scrapedtitle,
            episode) in enumerate(matches):

        if (p - 1) * numpage > i: continue
        if i >= p * numpage: break

        if scrapedtitle == "":
            scrapedtitle = titolo.title()

        episode = episode.replace("<br>", " ")

        scrapedtitle = scrapedtitle.replace("<br>", " ").replace("&amp;", "e")
        scrapedtitle = scrapedtitle.replace("-", " ").replace("6", "")
        scrapedtitle = scrapedtitle.replace("/", " ").replace("Serie Tv", "")
        scrapedtitle = scrapedtitle.replace("serie tv",
                                            "").replace("Serie TV", "")
        scrapedtitle = scrapedtitle.replace("SERIE TV", "")

        scrapedtitle = scrapedtitle.strip()
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)

        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="episodios",
                         fulltitle=scrapedtitle,
                         show=scrapedtitle,
                         title="[COLOR azure]" + scrapedtitle +
                         "[COLOR yellow] (" + episode + ")[/COLOR]",
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         plot="",
                         extra=item.extra,
                         folder=True),
                    tipo='tv'))

    # Extrae el paginador
    if len(matches) >= p * numpage:
        scrapedurl = item.url + '{}' + str(p + 1)
        itemlist.append(
            Item(
                channel=__channel__,
                extra=item.extra,
                action="peliculas_update",
                title="[COLOR orange]Successivi >>[/COLOR]",
                url=scrapedurl,
                thumbnail=
                "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png",
                folder=True))

    return itemlist
コード例 #53
0
def peliculas(item):
    logger.info("streamondemand.portalehd peliculas")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.anti_cloudflare(item.url, headers)

    # ------------------------------------------------
    cookies = ""
    matches = re.compile('(.24hd.online.*?)\n',
                         re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    # Extrae las entradas (carpetas)
    patron = '<li><img src=".*?src="([^"]+)".*?<a href="([^"]+)".*?class="title">([^<]+)</a>.*?</li>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        scrapedplot = ""
        #scrapedthumbnail = ""
        # ------------------------------------------------
        scrapedthumbnail += "|" + _headers
        # ------------------------------------------------
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="findvideos",
                         fulltitle=scrapedtitle,
                         show=scrapedtitle,
                         title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         plot=scrapedplot,
                         folder=True),
                    tipo='movie'))

    # Extrae el paginador
    patronvideos = '<a class="nextpostslink" rel="next" href="([^"]+)">Avanti »</a>'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Torna Home[/COLOR]",
                 folder=True)),
        itemlist.append(
            Item(
                channel=__channel__,
                action="peliculas",
                title="[COLOR orange]Successivo >>[/COLOR]",
                url=scrapedurl,
                thumbnail=
                "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                folder=True))

    return itemlist
def peliculas(item):
    logger.info("streamondemand-pureita majintoon lista_animation")
    itemlist = []
    minpage = 14

    p = 1
    if '{}' in item.url:
        item.url, p = item.url.split('{}')
        p = int(p)

    data = httptools.downloadpage(item.url, headers=headers).data

    patron = r'<a\s*href="((http[^"]+))".*?>\s*<img\s*src="([^"]+)"\s*style[^>]+>\s*<\/a>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for i, (scrapedurl, scrapedtitle, scrapedthumbnail) in enumerate(matches):
        if (p - 1) * minpage > i: continue
        if i >= p * minpage: break
        scrapedplot = ""
        scrapedtitle = scrapedtitle.replace(
            "https://www.videotecaproject.eu/news/", "")

        scrapedtitle = scrapedtitle.replace("-", " ").replace("1/", "")
        scrapedtitle = scrapedtitle.replace(":", " - ").replace("2/", "")
        scrapedtitle = scrapedtitle.replace("3/", "").replace("/", "")
        scrapedtitle = re.sub(r"([0-9])", r" \1", scrapedtitle)
        scrapedtitle = re.sub('(?<=\d) (?=\d)', '', scrapedtitle)

        scrapedtitle = scrapedtitle.title()
        scrapedtitle = scrapedtitle.strip()
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        scrapedtitle = scrapedtitle.replace("A ", "")
        scrapedtitle = scrapedtitle.replace("Sub Ita", "(Sub Ita)")
        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="findvideos",
                         contentType="movie",
                         title=scrapedtitle,
                         fulltitle=scrapedtitle,
                         url=scrapedurl,
                         show=scrapedtitle,
                         extra="tv",
                         thumbnail=scrapedthumbnail,
                         plot=scrapedplot,
                         folder=True),
                    tipo="movie"))

    # Extrae el paginador
    if len(matches) >= p * minpage:
        scrapedurl = item.url + '{}' + str(p + 1)
        itemlist.append(
            Item(
                channel=__channel__,
                extra=item.extra,
                action="peliculas",
                title="[COLOR orange]Successivi >>[/COLOR]",
                url=scrapedurl,
                thumbnail=
                "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png",
                folder=True))

    return itemlist
コード例 #55
0
def peliculas(item):
    logger.info("[cineblog01.py] mainlist")
    itemlist = []

    if item.url == "":
        item.url = sito

    # Descarga la página
    data = scrapertools.anti_cloudflare(item.url, headers)

    # Extrae las entradas (carpetas)
    patronvideos = '<div class="span4".*?<a.*?<p><img src="([^"]+)".*?'
    patronvideos += '<div class="span8">.*?<a href="([^"]+)"> <h1>([^"]+)</h1></a>.*?'
    patronvideos += '<strong>([^<]*)</strong>.*?<br />([^<+]+)'
    matches = re.compile(patronvideos, re.DOTALL).finditer(data)

    for match in matches:
        scrapedtitle = scrapertools.unescape(match.group(3))
        scrapedurl = urlparse.urljoin(item.url, match.group(2))
        scrapedthumbnail = urlparse.urljoin(item.url, match.group(1))
        scrapedthumbnail = scrapedthumbnail.replace(" ", "%20")
        scrapedplot = scrapertools.unescape("[COLOR orange]" + match.group(4) +
                                            "[/COLOR]\n" +
                                            match.group(5).strip())
        scrapedplot = scrapertools.htmlclean(scrapedplot).strip()
        if DEBUG:
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="findvideos",
                         fulltitle=scrapedtitle,
                         show=scrapedtitle,
                         title=scrapedtitle,
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         plot=scrapedplot,
                         extra=item.extra,
                         viewmode="movie_with_plot"),
                    tipo='movie'))

    # Next page mark
    try:
        bloque = scrapertools.get_match(
            data, "<div id='wp_page_numbers'>(.*?)</div>")
        patronvideos = '<a href="([^"]+)">></a></li>'
        matches = re.compile(patronvideos, re.DOTALL).findall(bloque)
        scrapertools.printMatches(matches)

        if len(matches) > 0:
            scrapedtitle = "[COLOR orange]Successivo>>[/COLOR]"
            scrapedurl = matches[0]
            scrapedthumbnail = ""
            scrapedplot = ""
            if (DEBUG):
                logger.info("title=[" + scrapedtitle + "], url=[" +
                            scrapedurl + "], thumbnail=[" + scrapedthumbnail +
                            "]")
            itemlist.append(
                Item(channel=__channel__,
                     action="HomePage",
                     title="[COLOR yellow]Torna Home[/COLOR]",
                     folder=True)),
            itemlist.append(
                Item(
                    channel=__channel__,
                    action="peliculas",
                    title=scrapedtitle,
                    url=scrapedurl,
                    thumbnail=
                    "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                    extra=item.extra,
                    plot=scrapedplot))
    except:
        pass

    return itemlist
コード例 #56
0
def peliculas(item):
    logger.info("streamondemand-pureita.cineblogrun peliculas")
    itemlist = []

    # Descarga la pagina
    data = httptools.downloadpage(item.url, headers=headers).data

    # Extrae las entradas (carpetas)
    patron = r'<a href="([^"]+)">\s*<div class="Image">\s*<figure clas[^>]+><img[^>]+src="([^"]+)"\s*'
    patron += r'class[^>]+><\/figure>\s*<\/div>\s*<h3 class="Title">(.*?)<\/h3>.*?'
    patron += r'<span[^>]+>([^<]+)</span><span class="Qlty">([^<]+)</span>.*?'
    patron += r'.*?<p>(.*?)</p>'
    matches = re.compile(patron, re.DOTALL).finditer(data)

    for match in matches:
        scrapedplot = scrapertools.unescape(match.group(6))
        quality = scrapertools.unescape(match.group(5))
        year = scrapertools.unescape(match.group(4))
        scrapedtitle = scrapertools.unescape(match.group(3))
        scrapedthumbnail = scrapertools.unescape(match.group(2))
        scrapedurl = urlparse.urljoin(item.url, match.group(1))
        scrapedtitle = scrapedtitle.replace("&", "e")
        if "." in year or "h" in year:
            year = ""
        else:
            year = " ([COLOR yellow]" + year + "[/COLOR])"
        if "1080" in quality or "720" in quality:
            quality = " ([COLOR yellow]HD[/COLOR])"
        else:
            if "Unknown" in quality:
                quality = " ([COLOR yellow]NA[/COLOR])"
            else:
                quality = " ([COLOR yellow]LQ[/COLOR])"
        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="findvideos",
                         contentType="movie",
                         fulltitle=scrapedtitle,
                         show=scrapedtitle,
                         title="[COLOR azure]" + scrapedtitle + "[/COLOR]" +
                         year + quality,
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         plot=scrapedplot,
                         folder=True),
                    tipo='movie'))

    patronvideos = '<a class="next page-numbers" href="([^"]+)">'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(
                channel=__channel__,
                action="peliculas",
                title="[COLOR orange]Successivi >>[/COLOR]",
                url=scrapedurl,
                thumbnail=
                "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png",
                folder=True))

    return itemlist
コード例 #57
0
def fichas(item):
    logger.info("[itastreaming.py] fichas")

    itemlist = []

    # Descarga la pagina
    data = scrapertools.anti_cloudflare(item.url, headers)
    # fix - calidad
    data = re.sub(r'<div class="wrapperImage"[^<]+<a',
                  '<div class="wrapperImage"><fix>SD</fix><a', data)
    # fix - IMDB
    data = re.sub(r'<h5> </div>', '<fix>IMDB: 0.0</fix>', data)
    # ------------------------------------------------
    cookies = ""
    matches = config.get_cookie_data(item.url).splitlines()
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    patron = '<div class="item">.*?'
    patron += 'href="([^"]+)".*?'
    patron += 'title="([^"]+)".*?'
    patron += '<img src="([^"]+)".*?'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)

        # ------------------------------------------------
        scrapedthumbnail += "|" + _headers
        # ------------------------------------------------
        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="findvideos",
                         title=scrapedtitle,
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         fulltitle=scrapedtitle,
                         show=scrapedtitle),
                    tipo='movie'))

    # Paginación
    next_page = scrapertools.find_single_match(
        data, "href='([^']+)'>Seguente &rsaquo;")
    if next_page != "":
        itemlist.append(
            Item(
                channel=__channel__,
                action="fichas",
                title="[COLOR orange]Successivo >>[/COLOR]",
                url=next_page,
                thumbnail=
                "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"
            ))

    return itemlist
コード例 #58
0
def fichas(item):
    logger.info("[hdgratis.py] fichas")

    itemlist = []

    # Descarga la pagina
    data = scrapertools.anti_cloudflare(item.url, headers)
    # fix - calidad

    # ------------------------------------------------
    cookies = ""
    matches = config.get_cookie_data(item.url).splitlines()[4:]
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    if "/?s=" in item.url:
        patron = '<div class="col-lg-3 col-md-3 col-xs-3">\s*<a href="([^"]+")>\s*<div class="wrapperImage">[^i]+i[^s]+src="([^"]+)"[^>]+> <div class="info">\s*<h5[^>]+>(.*?)<'
    else:
        patron = '<span class="hd">HD</span>\s*<a href="([^"]+)"><img[^s]+src="([^"]+)"[^>]+></a> <div class="info">\s*<[^>]+>[^>]+>(.*?)</a>'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:

        title = scrapertools.decodeHtmlentities(scrapedtitle)

        # ------------------------------------------------
        scrapedthumbnail += "|" + _headers
        # ------------------------------------------------

        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="findvideos",
                         title=title,
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         fulltitle=title,
                         show=scrapedtitle),
                    tipo='movie'))

    # Paginación
    next_page = re.compile('<link rel="next" href="(.+?)"/>',
                           re.DOTALL).findall(data)
    for page in next_page:
        next_page = page

    if next_page != "":
        itemlist.append(
            Item(
                channel=__channel__,
                action="fichas",
                title="[COLOR orange]Successivo >>[/COLOR]",
                url=next_page,
                thumbnail=
                "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"
            ))

    return itemlist
コード例 #59
0
def anime(item):
    logger.info("[italiafilm.py] anime")
    itemlist = []

    data = scrapertools.cache_page(item.url)
    patron = '<li class="cat_19(.*?)</li>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for match in matches:
        title = scrapertools.find_single_match(
            match, '<span class="tvseries_name">(.*?)</span>')
        t = scrapertools.find_single_match(match, '</i>(.*?)</a>')
        t = scrapertools.decodeHtmlentities(t).strip()
        title = title.replace("Streaming", "")
        title = scrapertools.decodeHtmlentities(title).strip()
        title = title + " - " + t
        url = scrapertools.find_single_match(match, '<a href="([^"]+)"')
        plot = ""
        thumbnail = scrapertools.find_single_match(match,
                                                   'data-echo="([^"]+)"')

        if (DEBUG):
            logger.info("title=[" + title + "], url=[" + url +
                        "], thumbnail=[" + thumbnail + "]")

        itemlist.append(
            infoSod(Item(
                channel=__channel__,
                action='episodios' if item.extra == 'serie' else 'findvideos',
                fulltitle=title,
                show=title,
                title="[COLOR azure]" + title + "[/COLOR]",
                url=url,
                thumbnail=thumbnail,
                plot=plot,
                viewmode="movie_with_plot",
                folder=True),
                    tipo='tv'))

    # Siguiente
    try:
        pagina_siguiente = scrapertools.get_match(
            data, '<a class="next page-numbers" href="([^"]+)"')
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Torna Home[/COLOR]",
                 folder=True)),
        itemlist.append(
            Item(
                channel=__channel__,
                action="anime",
                extra=item.extra,
                title="[COLOR orange]Successivo >> [/COLOR]",
                url=pagina_siguiente,
                thumbnail=
                "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                folder=True))
    except:
        pass

    return itemlist
コード例 #60
0
def peliculas(item):
    logger.info("streamondemand-pureita.mondolunatico_hd peliculas")
    itemlist = []
    numpage = 14

    p = 1
    if '{}' in item.url:
        item.url, p = item.url.split('{}')
        p = int(p)

    # Descarga la pagina
    data = httptools.downloadpage(item.url).data

    # Extrae las entradas
    patron = '<a href="([^"]+)" data-url="" class="ml-mask jt" data-hasqtip=".*?" oldtitle="(.*?)" title="">'
    matches = re.compile(patron, re.DOTALL).findall(data)

    scrapedplot = ""
    for i, (scrapedurl, scrapedtitle) in enumerate(matches):
        if (p - 1) * numpage > i: continue
        if i >= p * numpage: break
        scrapedtitle = scrapedtitle.replace("&#8217;", "'")
        if "Fichier" in scrapedtitle:
            continue
        if not "http" in scrapedurl:
            scrapedurl = host + scrapedurl
        title = scrapertools.decodeHtmlentities(scrapedtitle)
        scrapedthumbnail = ""
        itemlist.append(
            infoSod(Item(channel=__channel__,
                         extra=item.extra,
                         action="findvideos",
                         contentType="movie",
                         title="[COLOR azure]" + title + "[/COLOR]",
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         fulltitle=title,
                         show=title,
                         plot=scrapedplot,
                         folder=True),
                    tipo='movie'))

    # Extract the paginador
    if len(matches) >= p * numpage:
        scrapedurl = item.url + '{}' + str(p + 1)
        itemlist.append(
            Item(
                channel=__channel__,
                extra=item.extra,
                action="peliculas",
                title="[COLOR orange]Successivi >>[/COLOR]",
                url=scrapedurl,
                thumbnail=
                "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png",
                folder=True))

    else:
        next_page = scrapertools.find_single_match(
            data,
            "</div>\s*<div id='pagination.*?>\d+</a></li><li><a rel='nofollow' class='page\s*larger' href='([^']+)'>\d+<\/a></li><li><a"
        )
        if next_page != "":
            itemlist.append(
                Item(
                    channel=__channel__,
                    action="peliculas",
                    title="[COLOR orange]Successivi >>[/COLOR]",
                    url=next_page,
                    thumbnail=
                    "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png"
                ))

    return itemlist