Пример #1
0
def findvideos(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    data = scrapertools.unescape(data)
    patron = '#div_\d_\D.+?<img id="([^"]+).*?<span>.*?</span>.*?<span>(.*?)</span>.*?imgdes.*?imgdes/([^\.]+).*?<a href=([^\s]+)'  #Añado calidad
    matches = scrapertools.find_multiple_matches(data, patron)
    for scrapedidioma, scrapedcalidad, scrapedserver, scrapedurl in matches:
        scrapedurl = scrapedurl.replace('"','')
        while True:
            loc = httptools.downloadpage(scrapedurl, follow_redirects=False).headers.get("location", "")
            if not loc or "/ad/locked" in loc or not loc.startswith("http"):
                break
            scrapedurl = loc
        scrapedurl, c = unshortenit.unshorten_only(scrapedurl)
        if "dest=" in scrapedurl or "dp_href=" in scrapedurl:
            scrapedurl = scrapertools.find_single_match(urllib.unquote(scrapedurl), '(?:dest|dp_href)=(.*)')
        title = item.title + "_" + scrapedidioma + "_"+ scrapedserver + "_" + scrapedcalidad
        itemlist.append( item.clone(action="play",
                                    title=title,
                                    url=scrapedurl) )
    itemlist = servertools.get_servers_itemlist(itemlist)
    tmdb.set_infoLabels(itemlist)
    if itemlist:
        itemlist.append(Item(channel = item.channel))
        itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
                                   text_color="magenta"))
        # Opción "Añadir esta película a la biblioteca de KODI"
        if item.contentChannel != "videolibrary" and config.get_videolibrary_support():
            itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
                                 action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
                                 contentTitle = item.contentTitle
                                 ))
    return itemlist
Пример #2
0
def findvideos(item):
    logger.info("[cinemastreaming.py] findvideos")

    # Carica la pagina
    data = httptools.downloadpage(item.url, headers=headers).data

    urls = set()
    if 'trdownload' in data:
        data = data.replace("&#038;", "&")
        for url in scrapertools.find_multiple_matches(
                data,
                r'href="(https://www\.cinemastreaming\.pw/\?trdownload[^"]+?)"'
        ):
            while True:
                loc = httptools.downloadpage(
                    url, follow_redirects=False).headers.get("location", "")
                if not loc:
                    break
                url = loc

            url, c = unshorten_only(url, 'adfly')
            urls.add(url)

    itemlist = servertools.find_video_items(data=str(urls) + data)

    for videoitem in itemlist:
        videoitem.title = "".join([
            item.title, '[COLOR green][B]' + videoitem.title + '[/B][/COLOR]'
        ])
        videoitem.fulltitle = item.fulltitle
        videoitem.show = item.show
        videoitem.thumbnail = item.thumbnail
        videoitem.channel = __channel__

    return itemlist
Пример #3
0
def findvideos(item):
    logger.info("[thegroove360.downloadme] findvideos")

    # Carica la pagina
    data = httptools.downloadpage(item.url, headers=headers).data
    patron = r'<a\s?href=\"([^\"]+)\">LINK DOWNLOAD E STREAMING'
    matches = re.compile(patron, re.IGNORECASE).findall(data)

    from lib.unshortenit import unshorten_only
    urls = ""
    for url in matches:

        resp = httptools.downloadpage(url, follow_redirects=False)
        url = resp.headers.get("location", "")

        uri, status = unshorten_only(url, "adfly")

        if status < 400:
            urls += uri.encode('utf8') + "\n"
        else:
            urls += url.encode('utf8') + "\n"

    itemlist = servertools.find_video_items(data=urls)

    for videoitem in itemlist:
        videoitem.title = item.title + videoitem.title
        videoitem.fulltitle = item.fulltitle
        videoitem.thumbnail = item.thumbnail
        videoitem.show = item.show
        videoitem.plot = item.plot
        videoitem.channel = __channel__

    return itemlist
Пример #4
0
def findvideos(item):
    if item.contentType != 'movie':
        links = support.match(item.url, patron=r'href="([^"]+)"').matches
    else:
        matchData = item.data if item.data else item
        links = support.match(
            matchData,
            patron=r'(?:SRC|href)="([^"]+)"',
            patronBlock=r'<div class="col-md-10">(.+?)<div class="ads">'
        ).matches
    data = ''
    from lib.unshortenit import unshorten_only
    for link in links:
        support.log('URL=', link)
        url, c = unshorten_only(link.replace('#', 'speedvideo.net'))
        data += url + '\n'
    return support.server(item, data)
Пример #5
0
def findvideos(item):
    log()
    listurl = set()
    # itemlist = []
    support.log("ITEMLIST: ", item)
    ##    if item.args == 'anime':
    ##        data = item.url
    ##    else:
    ##        data = httptools.downloadpage(item.url, headers=headers).data
    data = httptools.downloadpage(item.url, headers=headers).data

    data = re.sub('\n|\t', ' ', data)
    data = re.sub(r'>\s+<', '> <', data)
    check = scrapertools.find_single_match(
        data, r'<div class="category-film">\s+<h3>\s+(.*?)\s+</h3>\s+</div>')
    if 'sub' in check.lower():
        item.contentLanguage = 'Sub-ITA'
    support.log("CHECK : ", check)
    if 'anime' in check.lower():
        item.contentType = 'tvshow'
        item.data = data
        support.log('select = ### è una anime ###')
        return episodios(item)
    elif 'serie' in check.lower():
        item.contentType = 'tvshow'
        item.data = data
        return episodios(item)

    if 'protectlink' in data:
        urls = scrapertools.find_multiple_matches(
            data, r'<iframe src="[^=]+=(.*?)"')
        support.log("SONO QUI: ", urls)
        for url in urls:
            url = url.decode('base64')
            # tiro via l'ultimo carattere perchè non c'entra
            url, c = unshorten_only(url)
            if 'nodmca' in url:
                page = httptools.downloadpage(url, headers=headers).data
                url = '\t' + scrapertools.find_single_match(
                    page, '<meta name="og:url" content="([^=]+)">')
            if url:
                listurl.add(url)
    data += '\n'.join(listurl)
    return support.server(item, data)  #, headers=headers)
Пример #6
0
def findvideos(item):
    log()
    listurl = set()
    itemlist = []
    support.log("ITEMLIST: ", item)
    data = support.match(item.url, headers=headers).data
    check = support.match(
        data, patron=r'<div class="category-film">(.*?)</div>').match
    if 'sub' in check.lower():
        item.contentLanguage = 'Sub-ITA'
    support.log("CHECK : ", check)
    if 'anime' in check.lower():
        item.contentType = 'tvshow'
        item.data = data
        support.log('select = ### è una anime ###')
        try:
            return episodios(item)
        except:
            pass
    elif 'serie' in check.lower():
        item.contentType = 'tvshow'
        item.data = data
        return episodios(item)

    if 'protectlink' in data:
        urls = scrapertools.find_multiple_matches(
            data, r'<iframe src="[^=]+=(.*?)"')
        support.log("SONO QUI: ", urls)
        for url in urls:
            url = url.decode('base64')
            # tiro via l'ultimo carattere perchè non c'entra
            url, c = unshorten_only(url)
            if 'nodmca' in url:
                page = httptools.downloadpage(url, headers=headers).data
                url = '\t' + scrapertools.find_single_match(
                    page, '<meta name="og:url" content="([^=]+)">')
            if url:
                listurl.add(url)
    data += '\n'.join(listurl)

    itemlist = support.server(item,
                              data + item.otherLinks,
                              patronTag='Keywords:\s*<span>([^<]+)')
    return itemlist
Пример #7
0
def findvideos(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    data = scrapertools.unescape(data)
    patron = '#div_\d_\D.+?<img id="([^"]+).*?<span>.*?</span>.*?<span>(.*?)</span>.*?imgdes.*?imgdes/([^\.]+).*?<a href=([^\s]+)'  #Añado calidad
    matches = scrapertools.find_multiple_matches(data, patron)
    for scrapedidioma, scrapedcalidad, scrapedserver, scrapedurl in matches:
        while True:
            loc = httptools.downloadpage(scrapedurl,
                                         follow_redirects=False).headers.get(
                                             "location", "")
            if not loc or "/ad/locked" in loc:
                break
            scrapedurl = loc
        scrapedurl = scrapedurl.replace('"', '')
        scrapedurl, c = unshortenit.unshorten_only(scrapedurl)
        title = item.title + "_" + scrapedidioma + "_" + scrapedserver + "_" + scrapedcalidad
        itemlist.append(item.clone(action="play", title=title, url=scrapedurl))
    tmdb.set_infoLabels(itemlist)
    itemlist = servertools.get_servers_itemlist(itemlist)
    return itemlist
Пример #8
0
def expand_url(url):

    long_url, _ = unshortenit.unshorten_only(url)

    return long_url
Пример #9
0
def findvideos(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|amp;|#038;|\(.*?\)|\s{2}|&nbsp;", "", data)
    data = scrapertools.decodeHtmlentities(data)
    patron = 'data-tplayernv="Opt(.*?)"><span>(.*?)</span>(.*?)</li>'  # option, server, lang - quality
    matches = re.compile(patron, re.DOTALL).findall(data)

    for option, servername, quote in matches:
        patron = '<span>(.*?) -([^<]+)</span'
        match = re.compile(patron, re.DOTALL).findall(quote)
        lang, quality = match[0]
        quality = quality.strip()
        headers = {'Referer': item.url}
        url_1 = scrapertools.find_single_match(
            data, 'id="Opt%s"><iframe width="560" height="315" src="([^"]+)"' %
            option)
        new_data = httptools.downloadpage(url_1, headers=headers).data
        new_data = re.sub(r"\n|\r|\t|amp;|\(.*?\)|\s{2}|&nbsp;", "", new_data)
        new_data = scrapertools.decodeHtmlentities(new_data)
        url2 = scrapertools.find_single_match(
            new_data, '<iframe width="560" height="315" src="([^"]+)"')
        url = url2 + '|%s' % url_1
        if 'rapidvideo' in url2 or "verystream" in url2:
            url = url2

        lang = lang.lower().strip()
        languages = {
            'latino': '[COLOR cornflowerblue](LAT)[/COLOR]',
            'español': '[COLOR green](CAST)[/COLOR]',
            'subespañol': '[COLOR red](VOS)[/COLOR]',
            'sub': '[COLOR red](VOS)[/COLOR]'
        }
        if lang in languages:
            lang = languages[lang]

        servername = servertools.get_server_from_url(url)

        title = "Ver en: [COLOR yellowgreen](%s)[/COLOR] [COLOR yellow](%s)[/COLOR] %s" % (
            servername.title(), quality, lang)

        itemlist.append(
            item.clone(action='play',
                       url=url,
                       title=title,
                       language=lang,
                       quality=quality,
                       text_color=color3))

    patron1 = 'href="([^>]+)" class="Button STPb">.*?<img src="([^>]+)".*?alt="Imagen (.*?)">.*?<span>(\d+)'  # option, server, lang - quality
    matches1 = re.compile(patron1, re.DOTALL).findall(data)
    for url, img, lang, quality in matches1:
        if "cine24h" in url or "short." in url:
            continue
        else:
            url, c = unshortenit.unshorten_only(url)
            if "short." in url:
                continue
            elif "google." in url:
                for item in itemlist:
                    if "google." in item.url:
                        item.url = url
                    #logger.error("url=%s" % item.url)
    itemlist = servertools.get_servers_itemlist(itemlist)

    itemlist.sort(key=lambda it: it.language, reverse=False)

    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language)
    # Requerido para AutoPlay
    autoplay.start(itemlist, item)

    if config.get_videolibrary_support(
    ) and len(itemlist) > 0 and item.extra != 'episodios':
        itemlist.append(
            Item(channel=__channel__,
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 title=
                 '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                 thumbnail=thumbnail_host,
                 contentTitle=item.contentTitle))

    return itemlist
Пример #10
0
def findvideos(item):
    logger.info("kod.tantifilm findvideos")

    # Carica la pagina
    data = item.url if item.contentType == "episode" else httptools.downloadpage(
        item.url, headers=headers).data

    if 'protectlink' in data:
        urls = scrapertools.find_multiple_matches(
            data, r'<iframe src="[^=]+=(.*?)"')
        for url in urls:
            url = url.decode('base64')
            data += '\t' + url
            url, c = unshorten_only(url)
            data += '\t' + url

    itemlist = servertools.find_video_items(data=data)

    for videoitem in itemlist:
        videoitem.title = item.title + videoitem.title
        videoitem.fulltitle = item.fulltitle
        videoitem.thumbnail = item.thumbnail
        videoitem.show = item.show
        videoitem.plot = item.plot
        videoitem.channel = item.channel
        videoitem.contentType = item.contentType
        videoitem.language = IDIOMAS['Italiano']

    # Requerido para Filtrar enlaces

    if __comprueba_enlaces__:
        itemlist = servertools.check_list_links(itemlist,
                                                __comprueba_enlaces_num__)

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if item.contentType != 'episode':
        if config.get_videolibrary_support(
        ) and len(itemlist) > 0 and item.extra != 'findvideos':
            itemlist.append(
                Item(channel=item.channel,
                     title=
                     '[COLOR yellow][B]Aggiungi alla videoteca[/B][/COLOR]',
                     url=item.url,
                     action="add_pelicula_to_library",
                     extra="findvideos",
                     contentTitle=item.contentTitle))

    # Estrae i contenuti
    patron = r'\{"file":"([^"]+)","type":"[^"]+","label":"([^"]+)"\}'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for scrapedurl, scrapedtitle in matches:
        title = item.title + " " + scrapedtitle + " quality"
        itemlist.append(
            Item(channel=item.channel,
                 action="play",
                 title=title,
                 url=scrapedurl.replace(r'\/', '/').replace('%3B', ';'),
                 thumbnail=item.thumbnail,
                 fulltitle=item.title,
                 show=item.title,
                 server='',
                 contentType=item.contentType,
                 folder=False))

    return itemlist
Пример #11
0
def expand_url(url):

    long_url, _ = unshortenit.unshorten_only(url)

    return long_url if long_url != url else ""