Ejemplo n.º 1
0
def findvideos(item):
    logger.info()
    itemlist = []

    data = load_data(item.url)

    #TODO revisar patrones de esta parte
    '''if item.extra:
        getContentShow(data, item)
    else:
        getContentMovie(data, item)'''

    pattern = '<div id="reproductor(\d+)".*?src="([^"]+)"'
    subtitles = ""

    title = "[COLOR yellowgreen]Servidor [%s][/COLOR]"
    server = ""
    #itemlist.append(Item(channel = item.channel, title=item.url))
    for rep, link in scrapertools.find_multiple_matches(data, pattern):

        if not subtitles:
            subtitles = scrapertools.find_single_match(link, '&sub=(.*)')

        if 'player' in link:
            # Por si acaso están los dos metodos, de todas maneras esto es corto circuito
            if r'ir.php' in link:
                link = scrapertools.find_single_match(
                    link, 'php\?url=(.*?)&').replace('%3A',
                                                     ':').replace('%2F', '/')
                #logger.info("CUEVANA IR %s" % link)
                server = servertools.get_server_from_url(link)
            # otros links convencionales (fembed, rapidvideo, etc)
            elif r'irgoto.php' in link:
                link = scrapertools.find_single_match(
                    link, 'php\?url=(.*?)&').replace('%3A',
                                                     ':').replace('%2F', '/')
                if link.startswith('aHR0'):
                    try:
                        link = base64.b64decode(link.strip() + '==')
                    except:
                        link = RedirectLink(link)
                else:
                    link = RedirectLink(link)
                if link:
                    server = servertools.get_server_from_url(link)
                #logger.info("CUEVANA IRGOTO %s" % link)
            # vanlong (google drive)
            elif r'irgotogd.php' in link:
                link = redirect_url('https:' + link, scr=True)
                server = "directo"
            #openloadpremium no les va en la web, se hace un fix aqui
            elif r'irgotogp.php' in link:
                link = scrapertools.find_single_match(
                    data, r'irgotogd.php\?url=(\w+)')
                #link = redirect_url('https:'+link, "", True)
                link = GKPluginLink(link)
                server = "directo"
            elif r'gdv.php' in link:
                # google drive hace lento la busqueda de links, ademas no es tan buena opcion y es el primero que eliminan
                continue

            elif r'irgotoolp.php' in link:
                continue
            else:
                link = scrapertools.find_single_match(link, 'php.*?=(\w+)&')
                link = GKPluginLink(link)
                server = "directo"

        elif 'openload' in link:
            link = scrapertools.find_single_match(link, '\?h=(\w+)')
            #logger.info("CUEVANA OL HASH %s" % link)
            link = OpenloadLink(link)
            #logger.info("CUEVANA OL %s" % link)
            server = "openload"

        elif 'youtube' in link:
            title = "[COLOR yellow]Ver Trailer (%s)[/COLOR]"
            server = "youtube"
        else:  # En caso de que exista otra cosa no implementada, reportar si no aparece pelicula
            continue

        if not link:
            continue

        # GKplugin puede devolver multiples links con diferentes calidades, si se pudiera colocar una lista de opciones
        # personalizadas para Directo, se agradece, por ahora solo devuelve el primero que encuentre
        if type(link) is list:
            link = link[0]['link']
        if r'chomikuj.pl' in link:
            # En algunas personas la opcion CH les da error 401
            link += "|Referer=https://player.%s/plugins/gkpluginsphp.php" % domain
        elif r'vidcache.net' in link:
            # Para que no salga error 500
            link += '|Referer=https://player.%s/yourupload.com.php' % domain

        itemlist.append(
            item.clone(channel=item.channel,
                       title=title % server.capitalize(),
                       url=link,
                       action='play',
                       subtitle=subtitles,
                       server=server))

    autoplay.start(itemlist, item)

    if config.get_videolibrary_support() and len(
            itemlist) and not item.contentSerieName:
        itemlist.append(
            Item(channel=item.channel,
                 title="Añadir a la videoteca",
                 text_color="green",
                 action="add_pelicula_to_library",
                 url=item.url,
                 thumbnail=item.thumbnail,
                 contentTitle=item.contentTitle))
    return itemlist
Ejemplo n.º 2
0
def findvideos(item):
    logger.info()

    itemlist = list()
    sub = ""
    soup = create_soup(item.url)
    matches = soup.find("div", class_="navEP2")
    if not matches:
        return itemlist

    for elem in matches.find_all("li", class_="dooplay_player_option"):

        post = {
            "action": "doo_player_ajax",
            "post": elem["data-post"],
            "nume": elem["data-nume"],
            "type": elem["data-type"]
        }
        headers = {"Referer": item.url}
        doo_url = "%swp-admin/admin-ajax.php" % host

        data = httptools.downloadpage(doo_url, post=post, headers=headers).data

        if not data:
            continue
        player_url = BeautifulSoup(data, "html5lib").find("iframe")["src"]

        player = httptools.downloadpage(player_url,
                                        headers={
                                            "referer": item.url
                                        }).data
        soup = BeautifulSoup(player, "html5lib")
        matches = soup.find_all("li", {"onclick": True})
        lang = soup.find("li", class_="SLD_A")["data-lang"]

        for elem in matches:
            url = base64.b64decode(elem["data-r"]).decode('utf-8')

            if "animekao.club/player.php" in url:
                url = url.replace("animekao.club/player.php?x",
                                  "player.premiumstream.live/player.php?id")
            elif "animekao.club/play.php" in url:
                url = url.replace("animekao.club/play.php?x",
                                  "hls.playerd.xyz/player.php?id")
            elif "https://animekao.club/playmp4" in url:
                file_id = scrapertools.find_single_match(
                    url, "link=([A-z0-9]+)")
                post = {'link': file_id}
                hidden_url = 'https://animekao.club/playmp4/plugins/gkpluginsphp.php'
                dict_vip_url = httptools.downloadpage(hidden_url,
                                                      post=post).json
                url = dict_vip_url['link']
            elif "animekao.club/reproductores" in url:
                v_id = scrapertools.find_single_match(url, "v=([A-z0-9_-]+)")
                url = "https://drive.google.com/file/d/%s/preview" % v_id
            elif "kaodrive" in url:
                new_data = httptools.downloadpage(url).data
                v_id = scrapertools.find_single_match(
                    new_data, 'var shareId = "([^"]+)"')
                url = "https://www.amazon.com/drive/v1/shares/%s" % v_id
            elif "playhydrax.com" in url:
                slug = scrapertools.find_single_match(url, 'v=(\w+)')
                post = "slug=%s&dataType=mp4" % slug
                data = httptools.downloadpage("https://ping.iamcdn.net/",
                                              post=post).json
                url = data.get("url", '')
                if url:
                    url = "https://www.%s" % base64.b64decode(
                        url[-1:] + url[:-1]).decode('utf-8')
                    url += '|Referer=https://playhydrax.com/?v=%s&verifypeer=false' % slug
            elif "kplayer" in url:
                data = httptools.downloadpage(url).data
                src = scrapertools.find_single_match(data, "source: '([^']+)'")
                sgn = scrapertools.find_single_match(data,
                                                     "signature: '([^']+)'")
                sub = scrapertools.find_single_match(data, 'file: "([^"]+)"')
                url = "%s%s" % (src, sgn)

            itemlist.append(
                Item(channel=item.channel,
                     title='%s',
                     action='play',
                     url=url,
                     language=IDIOMAS.get(lang, "VOSE"),
                     infoLabels=item.infoLabels,
                     subtitle=sub))
    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda x: x.title % x.server.capitalize())

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if config.get_videolibrary_support(
    ) and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(channel=item.channel,
                 title=
                 '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 contentTitle=item.contentTitle))

    return itemlist
Ejemplo n.º 3
0
def findvideos(item):
    servidor = {"http://uptobox.com/": "uptobox",
                "http://userscloud.com/": "userscloud",
                "https://my.pcloud.com/publink/show?code=": "pcloud",
                "http://thevideos.tv/": "thevideos",
                "http://ul.to/": "uploadedto",
                "http://turbobit.net/": "turbobit",
                "http://www.cinecalidad.to/protect/v.html?i=": "cinecalidad",
                "http://www.mediafire.com/download/": "mediafire",
                "https://www.youtube.com/watch?v=": "youtube",
                "http://thevideos.tv/embed-": "thevideos",
                "//www.youtube.com/embed/": "youtube",
                "http://ok.ru/video/": "okru",
                "http://ok.ru/videoembed/": "okru",
                "http://www.cinemaqualidade.com/protect/v.html?i=": "cinemaqualidade.com",
                "http://usersfiles.com/": "usersfiles",
                "https://depositfiles.com/files/": "depositfiles",
                "http://www.nowvideo.sx/video/": "nowvideo",
                "http://vidbull.com/": "vidbull",
                "http://filescdn.com/": "filescdn",
                "https://www.yourupload.com/watch/": "yourupload",
                "http://www.cinecalidad.to/protect/gdredirect.php?l=": "directo",
                "https://openload.co/embed/": "openload",
                "https://streamango.com/embed/f/": "streamango",
                "https://www.rapidvideo.com/embed/": "rapidvideo",
                }


    logger.info()
    itemlist = []
    duplicados = []

    if 'cinemaqualidade' in item.url:
        lang = 'portugues'
    elif 'espana' in item.url:
        lang = 'castellano'
    elif 'cinecalidad' in item.url:
        lang = 'latino'

    data = httptools.downloadpage(item.url).data
    patron = 'target=_blank.*? service=.*? data="(.*?)"><li>(.*?)<\/li>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    server_url = {'YourUpload': 'https://www.yourupload.com/embed/',
                  'Openload': 'https://openload.co/embed/',
                  'TVM': 'https://thevideo.me/embed-',
                  'Streamango': 'https://streamango.com/embed/',
                  'RapidVideo': 'https://www.rapidvideo.com/embed/',
                  'Trailer': '',
                  'BitTorrent': '',
                  'Mega': '',
                  'MediaFire': ''}
    dec_value = scrapertools.find_single_match(data, 'String\.fromCharCode\(parseInt\(str\[i\]\)-(\d+)\)')
    torrent_link = scrapertools.find_single_match(data, '<a href=".*?/protect/v\.php\?i=([^"]+)"')
    if torrent_link != '':
        import urllib
        base_url = '%s/protect/v.php' % host
        post = {'i': torrent_link, 'title': item.title}
        post = urllib.urlencode(post)
        headers = {'Referer': item.url}
        protect = httptools.downloadpage(base_url + '?' + post, headers=headers).data
        url = scrapertools.find_single_match(protect, 'value="(magnet.*?)"')
        server = 'torrent'

        title = item.contentTitle + ' (%s)' % server
        quality = 'default'
        language = IDIOMAS[lang]

        new_item = Item(channel=item.channel,
                        action='play',
                        title=title,
                        fulltitle=item.contentTitle,
                        url=url,
                        language=language,
                        thumbnail=item.thumbnail,
                        quality=quality,
                        server=server
                        )
        itemlist.append(new_item)

    for video_cod, server_id in matches:
        if server_id not in ['MediaFire', 'Trailer', '']:
            video_id = dec(video_cod, dec_value)

        if server_id in server_url:
            server = server_id.lower()
            thumbnail = item.thumbnail
            if server_id == 'TVM':
                server = 'thevideome'
                url = server_url[server_id] + video_id + '.html'
            else:
                url = server_url[server_id] + video_id
        title = item.contentTitle + ' (%s)' % server
        quality = 'default'

        if server_id not in ['Mega', 'MediaFire', 'Trailer']:

            language = [IDIOMAS[lang], 'vose']
            if url not in duplicados:
                new_item = Item(channel=item.channel,
                                action='play',
                                title=title,
                                fulltitle=item.contentTitle,
                                url=url,
                                language= language,
                                thumbnail=thumbnail,
                                quality=quality,
                                server=server
                                )
                itemlist.append(new_item)
                duplicados.append(url)

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    # itemlist.append(trailer_item)
    if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(channel=item.channel,
                 title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 contentTitle=item.contentTitle,
                 ))

    return itemlist
Ejemplo n.º 4
0
def findvideos(item):
    logger.info()
    itemlist = []
    itemlist_t = []  #Itemlist total de enlaces
    itemlist_f = []  #Itemlist de enlaces filtrados
    if not item.language:
        item.language = ['CAST']  #Castellano por defecto
    matches = []
    subtitles = []
    item.category = categoria

    #logger.debug(item)

    if item.extra != 'episodios':
        #Bajamos los datos de la página
        data = ''
        patron = '<div class="secciones"><h1>[^<]+<\/h1><br\s*\/><br\s*\/><div class="fichimagen">\s*<img class="carat" src="([^"]+)"'
        try:
            data = re.sub(
                r"\n|\r|\t|\s{2}|(<!--.*?-->)", "",
                httptools.downloadpage(item.url, timeout=timeout).data)
            #data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
        except:
            pass

        if not data:
            logger.error(
                "ERROR 01: FINDVIDEOS: La Web no responde o la URL es erronea: "
                + item.url)
            itemlist.append(
                item.clone(
                    action='',
                    title=item.channel.capitalize() +
                    ': ERROR 01: FINDVIDEOS:.  La Web no responde o la URL es erronea. Si la Web está activa, reportar el error con el log',
                    folder=False))

            if item.emergency_urls and not item.videolibray_emergency_urls:  #Hay urls de emergencia?
                matches = item.emergency_urls[
                    1]  #Restauramos matches de vídeos
                subtitles = item.emergency_urls[
                    2]  #Restauramos matches de subtítulos
                item.armagedon = True  #Marcamos la situación como catastrófica
            else:
                if item.videolibray_emergency_urls:  #Si es llamado desde creación de Videoteca...
                    return item  #Devolvemos el Item de la llamada
                else:
                    return itemlist  #si no hay más datos, algo no funciona, pintamos lo que tenemos

        if not item.armagedon:
            #Extraemos el thumb
            if not item.thumbnail:
                item.thumbnail = scrapertools.find_single_match(
                    data, patron)  #guardamos thumb si no existe

            #Extraemos quality, audio, year, country, size, scrapedlanguage
            patron = '<\/script><\/div><ul>(?:<li><label>Fecha de estreno <\/label>[^<]+<\/li>)?(?:<li><label>Genero <\/label>[^<]+<\/li>)?(?:<li><label>Calidad <\/label>([^<]+)<\/li>)?(?:<li><label>Audio <\/label>([^<]+)<\/li>)?(?:<li><label>Fecha <\/label>.*?(\d+)<\/li>)?(?:<li><label>Pais de Origen <\/label>([^<]+)<\/li>)?(?:<li><label>Tama&ntilde;o <\/label>([^<]+)<\/li>)?(<li> Idioma[^<]+<img src=.*?<br \/><\/li>)?'
            try:
                quality = ''
                audio = ''
                year = ''
                country = ''
                size = ''
                scrapedlanguage = ''
                quality, audio, year, country, size, scrapedlanguage = scrapertools.find_single_match(
                    data, patron)
            except:
                pass
            if quality: item.quality = quality
            if audio: item.quality += ' %s' % audio.strip()
            if not item.infoLabels['year'] and year:
                item.infoLabels['year'] = year
            size = size.replace('GB', 'G·B').replace('Gb', 'G·b').replace('MB', 'M·B')\
                        .replace('Mb', 'M·b').replace('.', ',').replace('G B', 'G·B').replace('M B', 'M·B')
            if size: item.quality += ' [%s]' % size
            if size:
                item.title = re.sub(
                    r'\s*\[\d+,?\d*?\s\w\s*[b|B]\]', '',
                    item.title)  #Quitamos size de título, si lo traía

            language = []
            matches_lang = re.compile('(\d+.png)',
                                      re.DOTALL).findall(scrapedlanguage)
            for lang in matches_lang:
                if "1.png" in lang and not 'CAST' in language:
                    language += ['CAST']
                if "512.png" in lang and not 'LAT' in language:
                    language += ['LAT']
                if ("1.png" not in lang
                        and "512.png" not in lang) and not 'VOSE' in language:
                    language += ['VOSE']
            if language: item.language = language

            #Extraemos los enlaces .torrent
            #Modalidad de varios archivos
            patron = '<div class="fichadescargat"><\/div><div class="table-responsive"[^>]+>.*?<\/thead><tbody>(.*?)<\/tbody><\/table><\/div>'
            if scrapertools.find_single_match(data, patron):
                data_torrents = scrapertools.find_single_match(data, patron)
                patron = '<tr><td>.*?<\/td><td><a href="([^"]+)"[^>]+><[^>]+><\/a><\/td><\/tr>'
            #Modalidad de un archivo
            else:
                data_torrents = data
                patron = '<div class="fichasubtitulos">.*?<\/div><\/li><\/ul>.*?<a href="([^"]+)"'
            matches = re.compile(patron, re.DOTALL).findall(data_torrents)
            if not matches:  #error
                logger.error(
                    "ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web "
                    + " / PATRON: " + patron + data)
                itemlist.append(
                    item.clone(
                        action='',
                        title=item.channel.capitalize() +
                        ': ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web.  Verificar en la Web esto último y reportar el error con el log',
                        folder=False))

                if item.emergency_urls and not item.videolibray_emergency_urls:  #Hay urls de emergencia?
                    matches = item.emergency_urls[
                        1]  #Restauramos matches de vídeos
                    subtitles = item.emergency_urls[
                        2]  #Restauramos matches de subtítulos
                    item.armagedon = True  #Marcamos la situación como catastrófica
                else:
                    if item.videolibray_emergency_urls:  #Si es llamado desde creación de Videoteca...
                        return item  #Devolvemos el Item de la llamada
                    else:
                        return itemlist  #si no hay más datos, algo no funciona, pintamos lo que tenemos

    else:  #SERIES: ya viene con las urls
        data = item.url  #inicio data por compatibilidad
        matches = [item.url]  #inicio matches por compatibilidad

    #Extraemos las urls de los subtítulos (Platformtools usa item.subtitle como sub-titulo por defecto)
    patron = '<div class="fichasubtitulos">\s*<label class="fichsub">\s*<a href="([^"]+)">Subtitulos\s*<\/a>\s*<\/label>'
    if scrapertools.find_single_match(data, patron) or item.subtitle:
        if item.extra == 'episodios':  #Si viene de Series, ya tengo la primera url
            subtitle = item.subtitle
            del item.subtitle
        else:
            subtitle = scrapertools.find_single_match(data, patron).replace(
                '&#038;', '&').replace('.io/', sufix).replace('.com/', sufix)

        try:
            data_subtitle = re.sub(
                r"\n|\r|\t|\s{2}|(<!--.*?-->)", "",
                httptools.downloadpage(subtitle, timeout=timeout).data)
        except:
            pass

        if not data_subtitle:
            if item.emergency_urls and not item.videolibray_emergency_urls:  #Hay urls de emergencia?
                matches = item.emergency_urls[
                    1]  #Restauramos matches de vídeos
                subtitles = item.emergency_urls[
                    2]  #Restauramos matches de subtítulos
                item.armagedon = True  #Marcamos la situación como catastrófica
        else:
            patron = '<tbody>(<tr class="fichserietabla_b">.*?<\/tr>)<\/tbody>'  #salvamos el bloque
            data_subtitle = scrapertools.find_single_match(
                data_subtitle, patron)
            patron = '<tr class="fichserietabla_b">.*?<a href="([^"]+)"'
            subtitles = re.compile(patron, re.DOTALL).findall(
                data_subtitle)  #Creamos una lista con todos los sub-títulos
        if subtitles:
            item.subtitle = []
            for subtitle in subtitles:
                subtitle = subtitle.replace('&#038;', '&').replace(
                    '.io/', sufix).replace('.com/', sufix)
                item.subtitle.append(subtitle)

    #logger.debug("PATRON: " + patron)
    #logger.debug(matches)
    #logger.debug(subtitles)
    #logger.debug(data)

    #Si es un lookup para cargar las urls de emergencia en la Videoteca...
    if item.videolibray_emergency_urls:
        item.emergency_urls = []  #Iniciamos emergency_urls
        item.emergency_urls.append(
            [])  #Reservamos el espacio para los .torrents locales
        item.emergency_urls.append(
            matches)  #Salvamnos matches de los vídeos...
        item.emergency_urls.append(
            subtitles)  #Salvamnos matches de los subtítulos

    #Llamamos al método para crear el título general del vídeo, con toda la información obtenida de TMDB
    if not item.videolibray_emergency_urls:
        item, itemlist = generictools.post_tmdb_findvideos(item, itemlist)

    #Ahora tratamos los enlaces .torrent
    for scrapedurl in matches:  #leemos los torrents con la diferentes calidades
        #Generamos una copia de Item para trabajar sobre ella
        item_local = item.clone()

        item_local.url = scrapedurl.replace('&#038;', '&').replace(
            '.io/', sufix).replace('.com/', sufix)
        if item.videolibray_emergency_urls:
            item.emergency_urls[0].append(
                scrapedurl)  #guardamos la url y pasamos a la siguiente
            continue
        if item.emergency_urls and not item.videolibray_emergency_urls:
            item_local.torrent_alt = item.emergency_urls[0][
                0]  #Guardamos la url del .Torrent ALTERNATIVA
            if item.armagedon:
                item_local.url = item.emergency_urls[0][
                    0]  #... ponemos la emergencia como primaria
            del item.emergency_urls[0][0]  #Una vez tratado lo limpiamos

        #Buscamos si ya tiene tamaño, si no, los buscamos en el archivo .torrent
        size = scrapertools.find_single_match(
            item_local.quality, '\s*\[(\d+,?\d*?\s\w\s*[b|B])\]')
        if not size and not item.armagedon:
            size = generictools.get_torrent_size(
                scrapedurl)  #Buscamos el tamaño en el .torrent
        if size:
            logger.error(size)
            size = size.replace('GB', 'G·B').replace('Gb', 'G·b').replace('MB', 'M·B')\
                        .replace('Mb', 'M·b').replace('.', ',').replace('G B', 'G·B').replace('M B', 'M·B')
            item_local.title = re.sub(
                r'\s*\[\d+,?\d*?\s\w\s*[b|B]\]', '',
                item_local.title)  #Quitamos size de título, si lo traía
            item_local.quality = re.sub(
                r'\s*\[\d+,?\d*?\s\w\s*[b|B]\]', '',
                item_local.quality)  #Quitamos size de calidad, si lo traía
            item_local.torrent_info += '%s' % size  #Agregamos size
            if not item.unify:
                item_local.torrent_info = '[%s]' % item_local.torrent_info.strip(
                ).strip(',')

        if item.armagedon:  #Si es catastrófico, lo marcamos
            item_local.quality = '[/COLOR][COLOR hotpink][E] [COLOR limegreen]%s' % item_local.quality

        #Ahora pintamos el link del Torrent
        item_local.title = '[[COLOR yellow]?[/COLOR]] [COLOR yellow][Torrent][/COLOR] ' \
                        + '[COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR] %s' % \
                        (item_local.quality, str(item_local.language),  \
                        item_local.torrent_info)                                #Preparamos título de Torrent

        #Preparamos título y calidad, quitamos etiquetas vacías
        item_local.title = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]',
                                  '', item_local.title)
        item_local.title = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '',
                                  item_local.title)
        item_local.title = item_local.title.replace("--", "").replace(
            "[]", "").replace("()", "").replace("(/)", "").replace("[/]",
                                                                   "").strip()
        item_local.quality = re.sub(
            r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '',
            item_local.quality)
        item_local.quality = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '',
                                    item_local.quality)
        item_local.quality = item_local.quality.replace("--", "").replace(
            "[]", "").replace("()",
                              "").replace("(/)",
                                          "").replace("[/]",
                                                      "").replace(".",
                                                                  ",").strip()

        item_local.alive = "??"  #Calidad del link sin verificar
        item_local.action = "play"  #Visualizar vídeo
        item_local.server = "torrent"  #Seridor Torrent

        itemlist_t.append(
            item_local.clone())  #Pintar pantalla, si no se filtran idiomas

        # Requerido para FilterTools
        if config.get_setting(
                'filter_languages',
                channel) > 0:  #Si hay idioma seleccionado, se filtra
            itemlist_f = filtertools.get_link(
                itemlist_f, item_local,
                list_language)  #Pintar pantalla, si no está vacío

        #logger.debug("TORRENT: " + scrapedurl + " / title gen/torr: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName)
        #logger.debug(item_local)

    if item.videolibray_emergency_urls:  #Si ya hemos guardado todas las urls...
        return item  #... nos vamos

    if len(itemlist_f) > 0:  #Si hay entradas filtradas...
        itemlist.extend(itemlist_f)  #Pintamos pantalla filtrada
    else:
        if config.get_setting('filter_languages', channel) > 0 and len(
                itemlist_t) > 0:  #Si no hay entradas filtradas ...
            thumb_separador = get_thumb(
                "next.png")  #... pintamos todo con aviso
            itemlist.append(
                Item(
                    channel=item.channel,
                    url=host,
                    title=
                    "[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]",
                    thumbnail=thumb_separador,
                    folder=False))
        itemlist.extend(
            itemlist_t)  #Pintar pantalla con todo si no hay filtrado

    # Requerido para AutoPlay
    autoplay.start(itemlist, item)  #Lanzamos Autoplay

    return itemlist
Ejemplo n.º 5
0
def findvideos(item):
    logger.info()
    itemlist = []

    data = get_source(item.url)
    patron = '<div id="tab(\d+)".*?<iframe.*?src="([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for option, url in matches:
        extra_info = scrapertools.find_single_match(
            data, '<a href="#tab%s">(.*?)<' % option)
        if '-' in extra_info:
            quality, language = scrapertools.find_single_match(
                extra_info, '(.*?) - (.*)')
            if " / " in language:
                language = language.split(" / ")[1]
        else:
            language = ''
            quality = extra_info

        if 'https:' not in url:
            url = 'https:' + url
        title = ''
        if not config.get_setting('unify'):
            if language != '':
                try:
                    title += ' [%s]' % IDIOMAS.get(language.capitalize(),
                                                   'Latino')
                except:
                    pass
            if quality != '':
                title += ' [%s]' % quality
        url = "%s|%s" % (url, host)
        new_item = Item(channel=item.channel,
                        url=url,
                        title='%s' + title,
                        contentTitle=item.title,
                        action='play',
                        infoLabels=item.infoLabels)
        if language != '':
            new_item.language = IDIOMAS.get(language.capitalize(), 'Latino')
        if quality != '':
            new_item.quality = quality

        itemlist.append(new_item)
    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda i: i.title % i.server.capitalize())

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if config.get_videolibrary_support() and len(
            itemlist
    ) > 0 and item.extra != 'findvideos' and not "/episode/" in item.url:
        itemlist.append(
            Item(
                channel=item.channel,
                title=
                '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                url=item.url,
                action="add_pelicula_to_library",
                extra="findvideos",
                contentTitle=item.contentTitle,
            ))

    return itemlist
Ejemplo n.º 6
0
def findvideos(item):
    findhost()

    if item.contentType == "episode":
        return findvid_serie(item)

    def load_links(itemlist, re_txt, color, desc_txt, quality=""):
        streaming = scrapertoolsV2.find_single_match(data,
                                                     re_txt).replace('"', '')
        support.log('STREAMING=', streaming)
        patron = '<td><a.*?href=(.*?) target[^>]+>([^<]+)<'
        matches = re.compile(patron, re.DOTALL).findall(streaming)
        for scrapedurl, scrapedtitle in matches:
            logger.debug("##### findvideos %s ## %s ## %s ##" %
                         (desc_txt, scrapedurl, scrapedtitle))
            title = "[COLOR " + color + "]" + desc_txt + ":[/COLOR] " + item.fulltitle + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]"
            itemlist.append(
                Item(channel=item.channel,
                     action="play",
                     title=title,
                     url=scrapedurl,
                     server=scrapedtitle,
                     fulltitle=item.fulltitle,
                     thumbnail=item.thumbnail,
                     show=item.show,
                     quality=quality,
                     contentType=item.contentType,
                     folder=False))

    support.log()

    itemlist = []

    # Carica la pagina
    data = httptools.downloadpage(item.url).data
    data = re.sub('\n|\t', '', data)

    # Extract the quality format
    patronvideos = '>([^<]+)</strong></div>'
    matches = re.compile(patronvideos, re.DOTALL).finditer(data)
    QualityStr = ""
    for match in matches:
        QualityStr = scrapertoolsV2.decodeHtmlentities(match.group(1))[6:]

    # Estrae i contenuti - Streaming
    load_links(
        itemlist,
        '<strong>Streaming:</strong>(.*?)<tableclass=cbtable height=30>',
        "orange", "Streaming", "SD")

    # Estrae i contenuti - Streaming HD
    load_links(
        itemlist,
        '<strong>Streaming HD[^<]+</strong>(.*?)<tableclass=cbtable height=30>',
        "yellow", "Streaming HD", "HD")

    autoplay.start(itemlist, item)

    # Estrae i contenuti - Streaming 3D
    load_links(
        itemlist,
        '<strong>Streaming 3D[^<]+</strong>(.*?)<tableclass=cbtable height=30>',
        "pink", "Streaming 3D")

    # Estrae i contenuti - Download
    load_links(
        itemlist,
        '<strong>Download:</strong>(.*?)<tableclass=cbtable height=30>',
        "aqua", "Download")

    # Estrae i contenuti - Download HD
    load_links(
        itemlist,
        '<strong>Download HD[^<]+</strong>(.*?)<tableclass=cbtable width=100% height=20>',
        "azure", "Download HD")

    if len(itemlist) == 0:
        itemlist = servertools.find_video_items(item=item)

    # Requerido para Filtrar enlaces

    if __comprueba_enlaces__:
        itemlist = servertools.check_list_links(itemlist,
                                                __comprueba_enlaces_num__)

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    support.videolibrary(itemlist, item)

    return itemlist
Ejemplo n.º 7
0
def findvideos(item):
    logger.info()

    itemlist = []

    data = get_source(item.url)

    bloq = scrapertools.find_single_match(data, '</th>(.*?)</table>')

    patron = '<a href="([^"]+)".*?<td>(.*?)</td><td class="hidden-xs">(.*?)</td>'
    matches = re.compile(patron, re.DOTALL).findall(bloq)
    #for quality_value, lang_value, scrapedurl in matches:
    for scrapedurl, lang_value, quality_value in matches:
        server = ""
        if lang_value not in IDIOMAS:
            lang_value = '6'
        if quality_value not in CALIDADES:
            quality_value = '3'
        language = IDIOMAS[lang_value]

        quality = CALIDADES[quality_value]
        if not config.get_setting("unify"):
            title = ' [%s] [%s]' % (quality, language)
        else:
            title = ''
        if scrapedurl.startswith("magnet:"):
            server = "torrent"
        itemlist.append(
            Item(channel=item.channel,
                 url=scrapedurl,
                 title='%s' + title,
                 action='play',
                 language=language,
                 quality=quality,
                 infoLabels=item.infoLabels,
                 server=server))
    embed = scrapertools.find_single_match(
        data, 'movie-online-iframe" src="([^"]+)"')
    if embed:
        fquality = itemlist[1].quality
        flanguage = itemlist[1].language
        title = ' [%s] [%s]' % (quality, language)
        itemlist.append(
            item.clone(title="%s" + title,
                       url=embed,
                       language=flanguage,
                       quality=fquality,
                       infoLabels=item.infoLabels,
                       action="play"))
    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda i: i.title % i.server.capitalize())

    # Requerido para Filtrar enlaces

    if __comprueba_enlaces__:
        itemlist = servertools.check_list_links(itemlist,
                                                __comprueba_enlaces_num__)

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    itemlist = sorted(itemlist, key=lambda it: it.language)

    if item.contentType != 'episode':
        if config.get_videolibrary_support(
        ) and len(itemlist) > 0 and item.extra != 'findvideos':
            itemlist.append(
                Item(
                    channel=item.channel,
                    title=
                    '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                    url=item.url,
                    action="add_pelicula_to_library",
                    extra="findvideos",
                    contentTitle=item.contentTitle))

    return itemlist
Ejemplo n.º 8
0
def findvideos(item):
    logger.info()

    itemlist = []
    itemlist_t = []  #Itemlist total de enlaces
    itemlist_f = []  #Itemlist de enlaces filtrados

    #logger.debug(item)

    #Si no existe "clean_plot" se crea a partir de "plot"
    if not item.clean_plot and item.infoLabels['plot']:
        item.clean_plot = item.infoLabels['plot']

    #Ahora tratamos los enlaces .torrent con las diferentes calidades
    for scrapedurl, scrapedserver in item.matches:
        #Generamos una copia de Item para trabajar sobre ella
        item_local = item.clone()

        item_local.url = scrapedurl
        item_local.server = scrapedserver.lower()
        item_local.action = "play"

        #Buscamos tamaño en el archivo .torrent
        size = ''
        if item_local.server == 'torrent' and not size and not item_local.url.startswith(
                'magnet:'):
            size = generictools.get_torrent_size(
                item_local.url
            )  #              Buscamos el tamaño en el .torrent desde la web

        if size:
            size = size.replace('GB', 'G·B').replace('Gb', 'G·b').replace('MB', 'M·B')\
                        .replace('Mb', 'M·b').replace('.', ',')
            item_local.torrent_info = '%s, ' % size  #Agregamos size
        if item_local.url.startswith(
                'magnet:') and not 'Magnet' in item_local.torrent_info:
            item_local.torrent_info += ' Magnet'
        if item_local.torrent_info:
            item_local.torrent_info = item_local.torrent_info.strip().strip(
                ',')
            if not item.unify:
                item_local.torrent_info = '[%s]' % item_local.torrent_info

        #Ahora pintamos lo enlaces
        item_local.title = '[[COLOR yellow]?[/COLOR]] [COLOR yellow][%s][/COLOR] ' %item_local.server.capitalize() \
                        + '[COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR] %s' % \
                        (item_local.quality, str(item_local.language), \
                        item_local.torrent_info)

        # Verificamos enlaces
        if item_local.server != 'torrent':
            if config.get_setting(
                    "hidepremium"
            ):  #Si no se aceptan servidore premium, se ignoran
                if not servertools.is_server_enabled(item_local.server):
                    continue
            devuelve = servertools.findvideosbyserver(
                item_local.url, item_local.server)  #existe el link ?
            if not devuelve:
                continue
            item_local.url = devuelve[0][1]
            item_local.alive = servertools.check_video_link(
                item_local.url, item_local.server,
                timeout=timeout)  #activo el link ?
            if 'NO' in item_local.alive:
                continue
        else:
            if not size or 'Magnet' in size:
                item_local.alive = "??"  #Calidad del link sin verificar
            elif 'ERROR' in size:
                item_local.alive = "no"  #Calidad del link en error?
                continue
            else:
                item_local.alive = "ok"  #Calidad del link verificada

        itemlist_t.append(
            item_local.clone())  #Pintar pantalla, si no se filtran idiomas

        # Requerido para FilterTools
        if config.get_setting(
                'filter_languages',
                channel) > 0:  #Si hay idioma seleccionado, se filtra
            itemlist_f = filtertools.get_link(
                itemlist_f, item_local,
                list_language)  #Pintar pantalla, si no está vacío

    if len(itemlist_f) > 0:  #Si hay entradas filtradas...
        itemlist.extend(itemlist_f)  #Pintamos pantalla filtrada
    else:
        if config.get_setting('filter_languages', channel) > 0 and len(
                itemlist_t) > 0:  #Si no hay entradas filtradas ...
            thumb_separador = get_thumb(
                "next.png")  #... pintamos todo con aviso
            itemlist.append(
                Item(
                    channel=item.channel,
                    url=host,
                    title=
                    "[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]",
                    thumbnail=thumb_separador,
                    folder=False))
        itemlist.extend(
            itemlist_t)  #Pintar pantalla con todo si no hay filtrado

    # Requerido para AutoPlay
    autoplay.start(itemlist, item)  #Lanzamos Autoplay

    return itemlist
Ejemplo n.º 9
0
def findvideos(item):

    logger.info()

    itemlist = []
    TMP_IDIOMAS = {
        'Latino': 'la',
        'Castellano': 'es',
        'Subtitulado': 'sub',
        'VO': 'en',
        'Ingles': 'en',
        'zc': 'es',
        'zl': 'la',
        'zs': 'sub'
    }

    data = get_source(item.url)
    json_data = jsontools.load(data)
    if len(json_data) > 0:
        if item.contentType != 'episode':
            videos_info = json_data['title']['videos']
        else:
            videos_info = json_data['title']['season']['episodes'][
                item.ep_info]['videos']

        for elem in videos_info:
            full_language = ''
            quality = elem['quality']
            url = elem['url']

            if ('<img' in elem['name']):
                full_language = scrapertools.find_single_match(
                    elem['name'], '-(.*?)<img').strip()
            elif ('-' in elem['name']):
                full_language = elem['name'].partition('-')[2].strip()
            else:
                full_language = elem['name'].strip()

            if full_language != '' and full_language in TMP_IDIOMAS:
                lang = TMP_IDIOMAS[full_language]
            else:
                lang = 'en'

            if lang != '':
                lang = IDIOMAS[lang]
            else:
                lang = IDIOMAS['en']

            if not config.get_setting('unify'):
                title = ' [%s] [%s]' % (lang, quality)
            else:
                title = ''

            itemlist.append(
                Item(channel=item.channel,
                     title='%s' + title,
                     action='play',
                     url=url,
                     language=lang,
                     infoLabels=item.infoLabels))

    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda i: i.title % i.server.capitalize())

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if item.contentType != 'episode':
        if config.get_videolibrary_support(
        ) and len(itemlist) > 0 and item.extra != 'findvideos':
            itemlist.append(
                Item(
                    channel=item.channel,
                    title=
                    '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                    url=item.url,
                    action="add_pelicula_to_library",
                    extra="findvideos",
                    contentTitle=item.contentTitle))

    return itemlist
Ejemplo n.º 10
0
def findvideos(item):
    logger.info()

    itemlist = []

    data = get_source(item.url)
    new_dom = scrapertools.find_single_match(data,
                                             "var web = { domain: '(.*?)'")

    patron = 'link="([^"]+)"'

    matches = re.compile(patron, re.DOTALL).findall(data)

    if '</strong> ¡Este capítulo no tiene subtítulos, solo audio original! </div>' in data:
        language = IDIOMAS['vo']
    else:
        language = IDIOMAS['sub']

    #if item.type !='episode' and '<meta property=article:section content=Pelicula>' not in data:
    # if item.type !='episode' and item.type != 'movie':
    #     item.type = 'dorama'
    #     item.contentSerieName = item.contentTitle
    #     item.contentTitle = ''
    #     return episodios(item)
    # else:

    for video_url in matches:
        headers = {'referer': video_url}
        token = scrapertools.find_single_match(video_url, 'token=(.*)')
        if 'fast.php' in video_url:
            video_url = 'https://player.rldev.in/fast.php?token=%s' % token
            video_data = httptools.downloadpage(video_url,
                                                headers=headers).data
            url = scrapertools.find_single_match(video_data,
                                                 "'file':'([^']+)'")
        else:
            video_url = new_dom + 'api/redirect.php?token=%s' % token
            video_data = httptools.downloadpage(video_url,
                                                headers=headers,
                                                follow_redirects=False).headers
            url = scrapertools.find_single_match(video_data['location'],
                                                 '\d+@@@(.*?)@@@')

        new_item = Item(channel=item.channel,
                        title='[%s] [%s]',
                        url=url,
                        action='play',
                        language=language)
        itemlist.append(new_item)

    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda x: x.title % (x.server.capitalize(), x.language))

    if len(itemlist) == 0 and item.type == 'search':
        item.contentSerieName = item.contentTitle
        item.contentTitle = ''
        return episodios(item)

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    return itemlist
Ejemplo n.º 11
0
def findvideos(item):
    logger.info()
    itemlist = []
    if "-episode-0" in item.url:
        data1 = httptools.downloadpage(item.url).data
        if "Page not found</h1>" in data1:
            item.url = item.url.replace("-episode-0", "-episode-1")

    data = re.sub(r"\n|\r|\t|\s{2}|-\s", "",
                  httptools.downloadpage(item.url).data)
    matches = scrapertools.find_multiple_matches(data, 'data-video="(.*?)"')
    url = ''
    urlsub = ''
    urlsub = scrapertools.find_single_match(data, "&sub=(.*?)&cover")
    if urlsub != '':
        urlsub = base64.b64decode(urlsub)
        urlsub = 'https://sub.movie-series.net' + urlsub
    for source in matches:
        if '/streaming.php' in source:
            new_data = httptools.downloadpage("https:" + source).data
            url = scrapertools.find_single_match(
                new_data, "file: '(https://redirector.*?)'")
            thumbnail = "https://martechforum.com/wp-content/uploads/2015/07/drive-300x300.png"
            if url == "":
                source = source.replace("streaming.php", "load.php")
        elif '/load.php' in source:
            new_data = httptools.downloadpage("https:" + source).data
            url = scrapertools.find_single_match(
                new_data, "file: '(https://[A-z0-9]+.cdnfile.info/.*?)'")
            thumbnail = "https://vidcloud.icu/img/logo_vid.png"
        else:
            url = source
            thumbnail = ""
        if "https://redirector." in url or "cdnfile.info" in url:
            url = url + "|referer=https://vidcloud.icu/"

        if url != "":
            itemlist.append(
                Item(channel=item.channel,
                     url=url,
                     title='%s',
                     action='play',
                     plot=item.plot,
                     thumbnail=thumbnail,
                     subtitle=urlsub))

    itemlist = servertools.get_servers_itemlist(itemlist,
                                                lambda i: i.title % i.server)
    if config.get_videolibrary_support(
    ) and len(itemlist) > 0 and item.extra == 'film':
        itemlist.append(
            Item(channel=item.channel,
                 title="Añadir a la Videoteca",
                 text_color="yellow",
                 action="add_pelicula_to_library",
                 url=item.url,
                 thumbnail=item.thumbnail,
                 contentTitle=item.contentTitle))
    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    return itemlist
Ejemplo n.º 12
0
def findvideos(item):
    logger.info()
    itemlist = []

    data = load_data(item.url)
    if item.extra:
        getContentShow(data, item)
    else:
        getContentMovie(data, item)
    pattern = '<iframe width="650" height="450" scrolling="no" src="([^"]+)'
    subtitles = scrapertools.find_single_match(data, '<iframe width="650" height="450" scrolling="no" src=".*?sub=([^"]+)"')

    title = "[COLOR blue]Servidor [%s][/COLOR]"
    #itemlist.append(Item(channel = item.channel, title=item.url))
    for link in scrapertools.find_multiple_matches(data, pattern):
        #php.*?=(\w+)&
        #url=(.*?)&
        if 'player4' in link:
            # Por si acaso están los dos metodos, de todas maneras esto es corto circuito
            if r'ir.php' in link:
                link = scrapertools.find_single_match(link, 'php\?url=(.*?)&').replace('%3A', ':').replace('%2F', '/')
                logger.info("CUEVANA IR %s" % link)
            elif r'irgoto.php' in link:
                link = scrapertools.find_single_match(link, 'php\?url=(.*?)&').replace('%3A', ':').replace('%2F', '/')
                link = RedirectLink(link)
                logger.info("CUEVANA IRGOTO %s" % link)
            elif r'gdv.php' in link:
                # google drive hace lento la busqueda de links, ademas no es tan buena opcion y es el primero que eliminan
                continue
            else:
                link = scrapertools.find_single_match(link, 'php.*?=(\w+)&')
                link = GKPluginLink(link)
            
        elif 'openload' in link:
            link = scrapertools.find_single_match(link, '\?h=(\w+)&')
            logger.info("CUEVANA OL HASH %s" % link)
            link = OpenloadLink(link) 
            logger.info("CUEVANA OL %s" % link)

        elif 'youtube' in link:
            title = "[COLOR yellow]Ver Trailer (%s)[/COLOR]"
        else: # En caso de que exista otra cosa no implementada, reportar si no aparece pelicula
            continue

        if not link:
            continue

        # GKplugin puede devolver multiples links con diferentes calidades, si se pudiera colocar una lista de opciones
        # personalizadas para Directo, se agradece, por ahora solo devuelve el primero que encuentre
        if type(link) is list:
            link = link[0]['link']
        if r'chomikuj.pl' in link:
            # En algunas personas la opcion CH les da error 401
            link += "|Referer=https://player4.cuevana2.com/plugins/gkpluginsphp.php" 
        elif r'vidcache.net' in link:
            # Para que no salga error 500
            link += '|Referer=https://player4.cuevana2.com/yourupload.com.php'

        itemlist.append(
            item.clone(
                channel = item.channel, 
                title=title, 
                url=link, action='play', 
                subtitle=subtitles))

    itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
    autoplay.start(itemlist, item)

    if config.get_videolibrary_support() and len(itemlist):
                itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
                                     action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
                                     fulltitle = item.fulltitle
                                     ))
    return itemlist
Ejemplo n.º 13
0
def findvideos(item):
    logger.info()
    from lib import jsunpack
    import base64

    itemlist = list()

    data = httptools.downloadpage(item.url).data
    #logger.error(data)
    matches = scrapertools.find_multiple_matches(data, "(eval.*?)\n")
    if len(matches) > 1:
        for pack in matches:
            unpack = jsunpack.unpack(pack)
            #logger.error(unpack)
            url = scrapertools.find_single_match(unpack, 'file(?:"|):"([^"]+)')
            if not url.startswith('http'):
                url = 'http:' + url

            itemlist.append(
                Item(channel=item.channel,
                     title='%s',
                     action='play',
                     url=url,
                     language=IDIOMAS['vose'],
                     infoLabels=item.infoLabels))

    else:
        unpack = jsunpack.unpack(matches[0])
        #logger.error(unpack)
        slugs = scrapertools.find_multiple_matches(unpack, '"slug":"([^"]+)')
        if slugs:
            for slug in slugs:
                url = '%sapi_donghua.php?slug=%s' % (host, slug)
                data = httptools.downloadpage(url,
                                              headers={
                                                  'Referer': item.url
                                              }).json[0]
                #logger.error(data)
                if data.get('url', ''):
                    url = 'https://www.dailymotion.com/video/' + base64.b64decode(
                        data['url'])
                elif data.get('source', ''):
                    url = data['source'][0].get('file', '')
                    if not url.startswith('http'):
                        url = 'http:' + url

                itemlist.append(
                    Item(channel=item.channel,
                         title='%s',
                         action='play',
                         url=url,
                         language=IDIOMAS['vose'],
                         infoLabels=item.infoLabels))
        else:
            url = scrapertools.find_single_match(unpack, 'file(?:"|):"([^"]+)')
            if not url.startswith('http'):
                url = 'http:' + url

            itemlist.append(
                Item(channel=item.channel,
                     title='%s',
                     action='play',
                     url=url,
                     language=IDIOMAS['vose'],
                     infoLabels=item.infoLabels))

    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda x: x.title % x.server.capitalize())

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if config.get_videolibrary_support() and itemlist > 0 and item.typo:
        itemlist.append(
            Item(channel=item.channel,
                 title=
                 '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 contentTitle=item.contentTitle))

    return itemlist
Ejemplo n.º 14
0
def findvideos(item):
    import urllib
    logger.info()
    itemlist=[]
    qual_fix = ''
    data = httptools.downloadpage(item.url).data
    
    patron = "<a class='optn' href='([^']+)'.*?<img alt='([^']+)'.*?<img src='.*?>([^<]+)<.*?<img src='.*?>([^<]+)<"
    matches = scrapertools.find_multiple_matches(data, patron)
    for url, iserver, quality, language in matches:
        
        if not qual_fix:
            qual_fix += quality
        if language == 'Inglés':
            language = 'VO'
        if not config.get_setting('unify'):
            title = ' [%s][%s]' % (quality, IDIOMAS[language])
        else:
            title = ''
        try:
            iserver = iserver.split('.')[0]
            iserver = servers.get(iserver, iserver)
        except:
            pass
        iserver = iserver.capitalize()
        itemlist.append(item.clone(channel=item.channel, title=iserver+title, url=url, action='play', quality=quality,
                                 language=IDIOMAS[language], infoLabels=item.infoLabels, text_color = ""))

    patron  = 'tooltipctx.*?data-type="([^"]+).*?'
    patron += 'data-post="(\d+)".*?'
    patron += 'data-nume="(\d+).*?'
    patron += '</noscript> (.*?)</.*?'
    patron += 'assets/img/(.*?)"/>'
    matches = scrapertools.find_multiple_matches(data, patron)
    for tp, pt, nm, language, iserver in matches:
        language = language.strip()
        post = {'action':'doo_player_ajax', 'post':pt, 'nume':nm, 'type':tp}
        post = urllib.urlencode(post)
        if not config.get_setting('unify'):
            if item.quality == '':
                quality = 'SD'
                if qual_fix:
                    quality = qual_fix
            else:
                quality = item.quality
            title = ' [%s][%s]' % (quality, IDIOMAS[language])
        else:
            title = ''
        try:
            iserver = iserver.split('.')[0]
            iserver = servers.get(iserver, iserver)

        except:
            pass
        iserver = iserver.capitalize()

        itemlist.append(item.clone(title =iserver+title, url="", action='play',
                                 language=IDIOMAS[language], text_color = "",
                                 spost=post, quality=quality))
    #itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
    itemlist.sort(key=lambda it: (it.language, it.title, it.quality))
    tmdb.set_infoLabels(itemlist, __modo_grafico__)
    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)
    if itemlist:
        if item.contentChannel != "videolibrary":
            if config.get_videolibrary_support():
                itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
                                     action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
                                     contentTitle = item.contentTitle
                                     ))
    return itemlist
Ejemplo n.º 15
0
def findvideos(item):
    logger.info()

    itemlist = []
    langs = dict()

    data = httptools.downloadpage(item.url).data
    patron = '<a.*?onclick="return (play\d+).*?;".*?> (.*?) <\/a>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for key, value in matches:

        langs[key] = value.strip()

    patron = 'function (play\d).*?servidores.*?attr.*?src.*?\+([^;]+);'
    matches = re.compile(patron, re.DOTALL).findall(data)
    title = item.title
    enlace = scrapertools.find_single_match(
        data,
        'var e20fb341325556c0fc0145ce10d08a970538987 =.*?"\/your\.".*?"([^"]+)"'
    )

    for scrapedlang, encurl in matches:

        if 'e20fb34' in encurl:
            url = dec(encurl)
            url = url + enlace

        else:
            url = dec(encurl)
        title = ''
        server = ''
        servers = {
            '/opl': 'openload',
            '/your': 'yourupload',
            '/sen': 'senvid',
            '/face': 'netutv',
            '/vk': 'vk'
        }
        server_id = re.sub(r'.*?embed|\.php.*', '', url)
        if server_id and server_id in servers:
            server = servers[server_id]
        logger.debug('server_id: %s' % server_id)
        logger.debug('langs: %s' % langs)
        if langs[scrapedlang] in list_language:
            language = IDIOMAS[langs[scrapedlang]]
        else:
            language = 'Latino'
        if langs[scrapedlang] == 'Latino':
            idioma = '[COLOR limegreen]LATINO[/COLOR]'
        elif langs[scrapedlang] == 'Sub Español':
            idioma = '[COLOR red]SUB[/COLOR]'

        title = item.contentSerieName + ' (' + server + ') ' + idioma
        plot = item.plot

        thumbnail = servertools.guess_server_thumbnail(title)

        if 'player' not in url and 'php' in url:
            itemlist.append(
                item.clone(title=title,
                           url=url,
                           action="play",
                           plot=plot,
                           thumbnail=thumbnail,
                           server=server,
                           quality='',
                           language=language))
        logger.debug('url: %s' % url)
    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    return itemlist
Ejemplo n.º 16
0
Archivo: pelisr.py Proyecto: w1s0/addon
def findvideos(item):
    logger.info()
    from lib import generictools
    import urllib
    itemlist = []
    data = get_source(item.url)
    patron = 'data-post="(\d+)" data-nume="(\d+)".*?img src=\'([^\']+)\''
    matches = re.compile(patron, re.DOTALL).findall(data)
    for id, option, lang in matches:
        lang = scrapertools.find_single_match(lang, '.*?/flags/(.*?).png')
        quality = ''

        post = {
            'action': 'doo_player_ajax',
            'post': id,
            'nume': option,
            'type': 'movie'
        }
        post = urllib.urlencode(post)
        test_url = 'https://pelisr.com/wp-admin/admin-ajax.php'
        new_data = httptools.downloadpage(test_url, post=post).data
        scrapedurl = scrapertools.find_single_match(new_data, "src='([^']+)'")

        if lang not in IDIOMAS:
            lang = 'en'
        title = '%s'

        if 'drive' in scrapedurl:
            try:
                enc_data = httptools.downloadpage(scrapedurl,
                                                  headers={
                                                      'Referer': item.url
                                                  }).data
                dec_data = generictools.dejuice(enc_data)
                url, quality = scrapertools.find_single_match(
                    dec_data, '"file":"(.*?)","label":"(.*?)"')
            except:
                pass
        else:
            url = scrapedurl
        try:
            url = url + "|referer=%s" % item.url
            itemlist.append(
                Item(channel=item.channel,
                     url=url,
                     title=title,
                     action='play',
                     quality=quality,
                     language=IDIOMAS[lang],
                     infoLabels=item.infoLabels))
        except:
            pass

    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda x: x.title % x.server.capitalize())

    # Requerido para Filtrar enlaces

    if __comprueba_enlaces__:
        itemlist = servertools.check_list_links(itemlist,
                                                __comprueba_enlaces_num__)

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    itemlist = sorted(itemlist, key=lambda it: it.language)

    if item.contentType != 'episode':
        if config.get_videolibrary_support(
        ) and len(itemlist) > 0 and item.extra != 'findvideos':
            itemlist.append(
                Item(
                    channel=item.channel,
                    title=
                    '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                    url=item.url,
                    action="add_pelicula_to_library",
                    extra="findvideos",
                    contentTitle=item.contentTitle))

    return itemlist
Ejemplo n.º 17
0
def findvideos(item):
    itemlist = []
    data = httptools.downloadpage(item.url).data
    bloque = scrapertools.find_single_match(data, 'var data = {([^\}]+)}')
    action, dataurl = scrapertools.find_single_match(
        bloque, "(?is)action : '([^']+)'.*?postID, .*?(\w+) : dataurl")
    if not item.infoLabels["year"]:
        item.infoLabels["year"] = scrapertools.find_single_match(
            data, 'dateCreated.*?(\d{4})')
        if "orig_title" in data:
            contentTitle = scrapertools.find_single_match(
                data, 'orig_title.*?>([^<]+)<').strip()
            if contentTitle != "":
                item.contentTitle = contentTitle
    bloque = scrapertools.find_single_match(
        data, '(?s)<div class="bottomPlayer">(.*?)<script>')
    match = scrapertools.find_multiple_matches(
        bloque, '(?is)data-Url="([^"]+).*?data-postId="([^"]*)')
    for d_u, datapostid in match:
        page_url = host + "/wp-admin/admin-ajax.php"
        post = "action=%s&postID=%s&%s=%s" % (action, datapostid, dataurl, d_u)
        data = httptools.downloadpage(page_url, post=post).data
        url = scrapertools.find_single_match(data, '(?i)src="([^"]+)')
        titulo = "Ver en: %s"
        text_color = "white"
        if "goo.gl" in url:
            url = httptools.downloadpage(url,
                                         follow_redirects=False,
                                         only_headers=True).headers.get(
                                             "location", "")
        if "youtube" in url:
            titulo = "Ver trailer: %s"
            text_color = "yellow"
        if "ad.js" in url or "script" in url or "jstags.js" in url or not datapostid:
            continue
        elif "vimeo" in url:
            url += "|" + "http://www.allcalidad.com"
        itemlist.append(
            item.clone(channel=item.channel,
                       action="play",
                       text_color=text_color,
                       title=titulo,
                       url=url))
    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda i: i.title % i.server.capitalize())
    tmdb.set_infoLabels(itemlist, __modo_grafico__)
    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if itemlist and item.contentChannel != "videolibrary":
        itemlist.append(Item(channel=item.channel))
        itemlist.append(
            item.clone(channel="trailertools",
                       title="Buscar Tráiler",
                       action="buscartrailer",
                       context="",
                       text_color="magenta"))
        # Opción "Añadir esta película a la biblioteca de KODI"
        if config.get_videolibrary_support():
            itemlist.append(
                Item(channel=item.channel,
                     title="Añadir a la videoteca",
                     text_color="green",
                     action="add_pelicula_to_library",
                     url=item.url,
                     thumbnail=item.thumbnail,
                     contentTitle=item.contentTitle))
    return itemlist
Ejemplo n.º 18
0
def findvideos(item):
    logger.info()

    itemlist = list()
    itemlist2 = list()
    servers = {'fcom': 'fembed', 'dood': 'doodstream', 
                'hqq': '', 'youtube': '', 'saruch': '',
                'supervideo': '', 'aparat': 'aparatcam'}
    headers = {"Referer": item.url}

    soup = create_soup(item.url)
    matches = soup.find("ul", id="playeroptionsul")
    if not matches:
        return itemlist
    for elem in matches.find_all("li"):
        
        server = elem.find("span", class_="server").text
        server = re.sub(r"\.\w{2,4}", "", server.lower())
        server = servers.get(server, server)
        
        if not server:
            continue

        doo_url = "%swp-json/dooplayer/v1/post/%s?type=%s&source=%s" % \
                 (host, elem["data-post"], elem["data-type"], elem["data-nume"])
        
        lang = elem.find("span", class_="title").text
        lang = re.sub(r'SERVER \d+ ', '', lang)
        language=IDIOMAS.get(lang.lower(), "VOSE")

        title = '%s [%s]' % (server.capitalize(), language)
        #Sistema movidy
        if lang.lower() == 'multiserver':
            data = httptools.downloadpage(doo_url, headers=headers).json
            url = data.get("embed_url", "")
            soup = create_soup(url).find("div", class_="OptionsLangDisp")

            for elem in soup.find_all("li"):
                url = elem["onclick"]
                server = elem.find("span").text
                lang = elem.find("p").text
                
                server = re.sub(r"\.\w{2,4}", "", server.lower())
                server = servers.get(server, server)
                if not server:
                    continue

                lang = re.sub(' -.*', '', lang)
                language=IDIOMAS.get(lang.lower(), "VOSE")

                url = scrapertools.find_single_match(url, r"\('([^']+)")
                stitle = ' [%s]' % language

                if url:
                    itemlist2.append(Item(channel=item.channel, title='%s'+stitle,
                                         action="play", url=url, language=language,
                                         infoLabels=item.infoLabelss))

        else:    
            itemlist.append(Item(channel=item.channel, title=title, action="play",
                            language=language, infoLabels=item.infoLabels,
                            server=server, headers=headers, url=doo_url))
    if itemlist2:
        itemlist = servertools.get_servers_itemlist(itemlist2, lambda x: x.title % x.server.capitalize())
    else:
        itemlist.sort(key=lambda i: (i.language, i.server))

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)
    if item.contentType != "episode":
        if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != "findvideos":
            itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]",
                                 url=item.url, action="add_pelicula_to_library", extra="findvideos",
                                 contentTitle=item.contentTitle))

    return itemlist
Ejemplo n.º 19
0
def findvid_serie(item):
    def load_vid_series(html, item, itemlist, blktxt):
        patron = '<a href="([^"]+)"[^=]+="_blank"[^>]+>(.*?)</a>'
        # Estrae i contenuti
        matches = re.compile(patron, re.DOTALL).finditer(html)
        for match in matches:
            scrapedurl = match.group(1)
            scrapedtitle = match.group(2)
            title = item.title + " [COLOR blue][" + scrapedtitle + "][/COLOR]"
            itemlist.append(
                Item(channel=item.channel,
                     action="play",
                     title=title,
                     url=scrapedurl,
                     server=scrapedtitle,
                     fulltitle=item.fulltitle,
                     show=item.show,
                     contentType=item.contentType,
                     folder=False))

    support.log()

    itemlist = []
    lnkblk = []
    lnkblkp = []

    data = item.url

    # First blocks of links
    if data[0:data.find('<a')].find(':') > 0:
        lnkblk.append(data[data.find(' - ') +
                           3:data[0:data.find('<a')].find(':') + 1])
        lnkblkp.append(data.find(' - ') + 3)
    else:
        lnkblk.append(' ')
        lnkblkp.append(data.find('<a'))

    # Find new blocks of links
    patron = r'<a\s[^>]+>[^<]+</a>([^<]+)'
    matches = re.compile(patron, re.DOTALL).finditer(data)
    for match in matches:
        sep = match.group(1)
        if sep != ' - ':
            lnkblk.append(sep)

    i = 0
    if len(lnkblk) > 1:
        for lb in lnkblk[1:]:
            lnkblkp.append(data.find(lb, lnkblkp[i] + len(lnkblk[i])))
            i = i + 1

    for i in range(0, len(lnkblk)):
        if i == len(lnkblk) - 1:
            load_vid_series(data[lnkblkp[i]:], item, itemlist, lnkblk[i])
        else:
            load_vid_series(data[lnkblkp[i]:lnkblkp[i + 1]], item, itemlist,
                            lnkblk[i])

    autoplay.start(itemlist, item)

    return itemlist
Ejemplo n.º 20
0
def findvideos(item):
    logger.info()

    itemlist = []
    data = get_source(item.url)
    data = scrapertools.find_single_match(data, '<div id="marco-post">.*?<div id="sidebar">')
    data = scrapertools.unescape(data)
    data = scrapertools.decodeHtmlentities(data)
    
    options_regex = '<a href="#tab.*?">.*?<b>(.*?)</b>'
    option_matches = re.compile(options_regex, re.DOTALL).findall(data)

    video_regex = '<iframe.*?src="(.*?)".*?</iframe>'
    video_matches = re.compile(video_regex, re.DOTALL).findall(data)

    # for option, scrapedurl in matches:
    for option, scrapedurl in map(None, option_matches, video_matches):
        if scrapedurl is None:
            continue
        
        scrapedurl = scrapedurl.replace('"','').replace('&#038;','&')

        try:
            data_video = get_source(scrapedurl)
        except Exception as e:
            logger.info('Error en url: ' + scrapedurl)
            continue
        
        # logger.info(data_video)

        # Este sitio pone multiples páginas intermedias, cada una con sus reglas.
        source_headers = dict()
        source_headers["Content-Type"] = "application/x-www-form-urlencoded; charset=UTF-8"
        source_headers["X-Requested-With"] = "XMLHttpRequest"
        if scrapedurl.find("https://repro") != 0:
            logger.info("Caso 0: url externa")
            url = scrapedurl
            itemlist.append(Item(channel=item.channel, title=option, url=url, action='play', language=IDIOMA))
        elif scrapedurl.find("pi76823.php") > 0:
            logger.info("Caso 1")
            source_data = get_source(scrapedurl)
            source_regex = 'post\( "(.*?)", { acc: "(.*?)", id: \'(.*?)\', tk: \'(.*?)\' }'
            source_matches = re.compile(source_regex, re.DOTALL).findall(source_data)
            for source_page, source_acc, source_id, source_tk in source_matches:
                source_url = scrapedurl[0:scrapedurl.find("pi76823.php")] + source_page
                source_result = httptools.downloadpage(source_url, post='acc=' + source_acc + '&id=' + 
                                                       source_id + '&tk=' + source_tk, headers=source_headers)
                if source_result.code == 200:
                    source_json = jsontools.load(source_result.data)
                    itemlist.append(Item(channel=item.channel, title=option, url=source_json['urlremoto'], action='play', language=IDIOMA))
        elif scrapedurl.find("pi7.php") > 0:
            logger.info("Caso 2")
            source_data = get_source(scrapedurl)
            source_regex = 'post\( "(.*?)", { acc: "(.*?)", id: \'(.*?)\', tk: \'(.*?)\' }'
            source_matches = re.compile(source_regex, re.DOTALL).findall(source_data)
            for source_page, source_acc, source_id, source_tk in source_matches:
                source_url = scrapedurl[0:scrapedurl.find("pi7.php")] + source_page
                source_result = httptools.downloadpage(source_url, post='acc=' + source_acc + '&id=' + 
                                                       source_id + '&tk=' + source_tk, headers=source_headers)
                if source_result.code == 200:
                    source_json = jsontools.load(source_result.data)
                    itemlist.append(Item(channel=item.channel, title=option, url=source_json['urlremoto'], action='play', language=IDIOMA))
        elif scrapedurl.find("reproducir120.php") > 0:
            logger.info("Caso 3")
            source_data = get_source(scrapedurl)

            videoidn = scrapertools.find_single_match(source_data, 'var videoidn = \'(.*?)\';')
            tokensn = scrapertools.find_single_match(source_data, 'var tokensn = \'(.*?)\';')
            
            source_regex = 'post\( "(.*?)", { acc: "(.*?)"'
            source_matches = re.compile(source_regex, re.DOTALL).findall(source_data)
            for source_page, source_acc in source_matches:
                source_url = scrapedurl[0:scrapedurl.find("reproducir120.php")] + source_page
                source_result = httptools.downloadpage(source_url, post='acc=' + source_acc + '&id=' + 
                                                       videoidn + '&tk=' + tokensn, headers=source_headers)
                if source_result.code == 200:
                    source_json = jsontools.load(source_result.data)
                    urlremoto_regex = "file:'(.*?)'"
                    urlremoto_matches = re.compile(urlremoto_regex, re.DOTALL).findall(source_json['urlremoto'])
                    if len(urlremoto_matches) == 1:
                        itemlist.append(Item(channel=item.channel, title=option, url=urlremoto_matches[0], action='play', language=IDIOMA))
        elif scrapedurl.find("reproducir14.php") > 0:
            logger.info("Caso 4")
            source_data = get_source(scrapedurl)
            
            source_regex = '<div id="player-contenido" vid="(.*?)" name="(.*?)"'
            source_matches = re.compile(source_regex, re.DOTALL).findall(source_data)
            videoidn = source_matches[0][0]
            tokensn = source_matches[0][1]
            
            source_regex = 'post\( "(.*?)", { acc: "(.*?)"'
            source_matches = re.compile(source_regex, re.DOTALL).findall(source_data)
            for source_page, source_acc in source_matches:
                source_url = scrapedurl[0:scrapedurl.find("reproducir14.php")] + source_page
                source_result = httptools.downloadpage(source_url, post='acc=' + source_acc + '&id=' + 
                                                       videoidn + '&tk=' + tokensn, headers=source_headers)
                if source_result.code == 200:
                    source_json = jsontools.load(source_result.data)
                    itemlist.append(Item(channel=item.channel, title=option, url=source_json['urlremoto'], action='play', language=IDIOMA))
        else:
            logger.info("Caso nuevo")      

    itemlist = servertools.get_servers_itemlist(itemlist)

    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay
    autoplay.start(itemlist, item)

    if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
                 action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))

    return itemlist
Ejemplo n.º 21
0
def findvideos(item):
    logger.info()

    servers = {'xstream': 'fembed', 'streamhoe': 'fembed'}

    itemlist = []

    if not item.v_id:
        page = get_source(item.url)
        page = re.sub('"|\'', '', page)
        item.v_id = scrapertools.find_single_match(page, 'data-id=(\d+)')

    if not '/movies/' in item.url:
        ep = item.infoLabels['episode']
        ses = item.infoLabels['season']

        url = '%swp-content/themes/dooplay/vs_player.php?id=%s&tv=1&s=%s&e=%s' % (
            host, item.v_id, ses, ep)
    else:
        url = '%swp-content/themes/dooplay/vs_player.php?id=%s&tv=0&s=0&e=0' % (
            host, item.v_id)

    url_spider = httptools.downloadpage(url).url

    url_oload1 = httptools.downloadpage(url_spider,
                                        headers={
                                            'Referer': url_spider
                                        }).url

    #url_oload = re.sub('video/(.*)', 'watch/', url_oload1)

    data = get_source(url_oload1)

    if "We haven't found any sources for this" in data:
        title = '[COLOR tomato][B]Aún no hay enlaces disponibles para este video[/B][/COLOR]'
        itemlist.append(item.clone(title=title, action=''))
        return itemlist
    parts = url_oload1.partition('/video/')

    patron = '<img src.*?alt="([^"]+)".*?>([^<]+)'
    patron += '.*?class="quality">([^<]+)'

    matches = scrapertools.find_multiple_matches(data, patron)

    for id_, server, quality in matches:
        if quality in ['HD', '?']:
            quality = 'SD'

        title = '[COLOR yellowgreen]%s[/COLOR] [%s]' % (server.capitalize(),
                                                        quality)
        url = '%s/loadsource.php?server=%s&token=%s' % (parts[0], id_,
                                                        parts[2])
        server = servers.get(server, server)

        itemlist.append(
            item.clone(title=title,
                       url=url,
                       action='play',
                       language='VO',
                       server=server,
                       quality=quality))

    itemlist.sort(key=lambda it: it.quality)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if item.contentType != 'episode':
        if config.get_videolibrary_support(
        ) and len(itemlist) > 0 and item.extra != 'findvideos':
            itemlist.append(
                Item(
                    channel=item.channel,
                    title=
                    '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                    url=item.url,
                    action="add_pelicula_to_library",
                    extra="findvideos",
                    contentTitle=item.contentTitle))

    return itemlist
Ejemplo n.º 22
0
def findvideos(item):
    logger.info()

    itemlist = []
    langs = dict()

    data = httptools.downloadpage(item.url).data
    patron = '<a onclick="return (play\d+).*?;"> (.*?) <\/a>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for key, value in matches:
        langs[key] = value.strip()

    patron = 'function (play\d).*?servidores.*?attr.*?src.*?\+([^;]+);'
    matches = re.compile(patron, re.DOTALL).findall(data)
    title = item.title
    enlace = scrapertools.find_single_match(
        data,
        'var e20fb341325556c0fc0145ce10d08a970538987 =.*?"\/your\.".*?"([^"]+)"'
    )

    for scrapedlang, encurl in matches:

        if 'e20fb34' in encurl:
            url = dec(encurl)
            url = url + enlace

        else:
            url = dec(encurl)
        title = ''
        server = ''
        servers = {
            '/opl': 'openload',
            '/your': 'yourupload',
            '/sen': 'senvid',
            '/face': 'netutv',
            '/vk': 'vk',
            '/jk': 'streamcherry',
            '/vim': 'gamovideo'
        }
        server_id = re.sub(r'.*?embed|\.php.*', '', url)
        if server_id and server_id in servers:
            server = servers[server_id]

        if (scrapedlang in langs) and langs[scrapedlang] in list_language:
            language = IDIOMAS[langs[scrapedlang]]
        else:
            language = 'Latino'
        #
        # if langs[scrapedlang] == 'Latino':
        #     idioma = '[COLOR limegreen]LATINO[/COLOR]'
        # elif langs[scrapedlang] == 'Sub Español':
        #     idioma = '[COLOR red]SUB[/COLOR]'

        if item.extra == 'peliculas':
            title = item.contentTitle + ' (' + server + ') ' + language
            plot = scrapertools.find_single_match(data, '<p>([^<]+)<\/p>')
        else:
            title = item.contentSerieName + ' (' + server + ') ' + language
            plot = item.plot

        thumbnail = servertools.guess_server_thumbnail(title)

        if 'player' not in url and 'php' in url:
            itemlist.append(
                item.clone(title=title,
                           url=url,
                           action="play",
                           plot=plot,
                           thumbnail=thumbnail,
                           server=server,
                           quality='',
                           language=language))
    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if config.get_videolibrary_support(
    ) and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(channel=item.channel,
                 title=
                 '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 contentTitle=item.contentTitle))

    return itemlist
Ejemplo n.º 23
0
def findvideos(item):
    logger.info()

    itemlist = list()

    soup = create_soup(item.url)
    matches = soup.find("ul", id="playeroptionsul")
    if not matches:
        return itemlist
    for elem in matches.find_all("li"):
        if "youtube" in elem.find("span", class_="server").text:
            continue
        post = {
            "action": "doo_player_ajax",
            "post": elem["data-post"],
            "nume": elem["data-nume"],
            "type": elem["data-type"]
        }
        headers = {"Referer": item.url}
        doo_url = "%swp-admin/admin-ajax.php" % host
        lang = elem.find("span", class_="flag").img["data-src"]
        lang = scrapertools.find_single_match(lang, r"flags/([^\.]+)\.png")
        data = httptools.downloadpage(doo_url, post=post, headers=headers).json
        if not data:
            continue
        url = data["embed_url"]
        if "hideload" in url:
            url = unhideload(url)
        if "pelis123" in url:
            itemlist.extend(get_premium(item, url, lang))
        elif not "onlystream" in url:
            if "zplayer" in url:
                url += "|referer=%s" % host
            itemlist.append(
                Item(channel=item.channel,
                     title="%s",
                     action="play",
                     url=url,
                     language=IDIOMAS.get(lang, "VOSE"),
                     infoLabels=item.infoLabels))
    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda x: x.title % x.server.capitalize())

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)
    if item.contentType != "episode":
        if config.get_videolibrary_support(
        ) and len(itemlist) > 0 and item.extra != "findvideos":
            itemlist.append(
                Item(
                    channel=item.channel,
                    title=
                    "[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]",
                    url=item.url,
                    action="add_pelicula_to_library",
                    extra="findvideos",
                    contentTitle=item.contentTitle))

    return itemlist
Ejemplo n.º 24
0
def findvideos(item):
    logger.info()
    itemlist = []

    data = get_source(item.url)
    video_id = scrapertools.find_single_match(data, 'getEnlaces\((\d+)\)')
    links_url = '%s%s%s' % (host, 'link/repro.php/', video_id)
    online_url = '%s%s%s' % (host, 'link/enlaces_online.php/', video_id)

    # listado de opciones links_url

    try:
        data = get_source(links_url)
        patron = 'content ><h2>(.*?)</h2>.*?class=video.*?src=(.*?) scrolling'
        matches = re.compile(patron, re.DOTALL).findall(data)

        for lang_data, scrapedurl in matches:
            if 'Latino' in lang_data:
                language = 'Lat'
            elif 'Español' in lang_data:
                language = 'Cast'
            else:
                language = 'VOSE'
            hidden_url = scrapedurl.replace('/i/', '/r/')
            data = get_source(hidden_url)
            url = scrapertools.find_single_match(data, ':url content=(.*?)>')
            title = '%s ' + '[%s]' % language
            if url != '':
                itemlist.append(
                    Item(channel=item.channel,
                         title=title,
                         url=url,
                         action='play',
                         language=language,
                         infoLabels=item.infoLabels))
    except:
        pass

    # listado de enlaces online_url
    try:
        data = get_source(online_url)
        patron = '<i class=lang-(.*?)>.*?href=(.*?) '
        matches = re.compile(patron, re.DOTALL).findall(data)
        scrapertools.printMatches(matches)
        for lang_data, scrapedurl in matches:
            if 'lat' in lang_data:
                language = 'Lat'
            elif 'spa' in lang_data:
                language = 'Cast'
            elif 'eng' in lang_data:
                language = 'VOSE'
            else:
                language = 'VO'
            video_id = scrapertools.find_single_match(scrapedurl,
                                                      'index.php/(\d+)/')
            new_url = '%s%s%s%s' % (host, 'ext/index-include.php?id=',
                                    video_id, '&tipo=1')
            data = get_source(new_url)
            video_url = scrapertools.find_single_match(
                data, '<div class=container><a onclick=addURL.*?href=(.*?)>')
            video_url = video_url.replace('%3D', '&') + 'status'
            headers = {
                'Accept':
                'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
                'Referer': item.url
            }
            data = httptools.downloadpage(video_url,
                                          headers=headers,
                                          ignore_response_code=True).data
            b64_url = scrapertools.find_single_match(
                data, "var string = '([^']+)';") + '=='
            url = base64.b64decode(b64_url)

            title = '%s ' + '[%s]' % language
            if url != '':
                itemlist.append(
                    Item(channel=item.channel,
                         title=title,
                         url=url,
                         action='play',
                         language=language,
                         infoLabels=item.infoLabels))
    except:
        pass

    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda i: i.title % i.server.capitalize())

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    return sorted(itemlist, key=lambda it: it.language)
Ejemplo n.º 25
0
def findvideos(item):
    servidor = {
        "http://uptobox.com/": "uptobox",
        "http://userscloud.com/": "userscloud",
        "https://my.pcloud.com/publink/show?code=": "pcloud",
        "http://thevideos.tv/": "thevideos",
        "http://ul.to/": "uploadedto",
        "http://turbobit.net/": "turbobit",
        "http://www.cinecalidad.com/protect/v.html?i=": "cinecalidad",
        "http://www.mediafire.com/download/": "mediafire",
        "https://www.youtube.com/watch?v=": "youtube",
        "http://thevideos.tv/embed-": "thevideos",
        "//www.youtube.com/embed/": "youtube",
        "http://ok.ru/video/": "okru",
        "http://ok.ru/videoembed/": "okru",
        "http://www.cinemaqualidade.com/protect/v.html?i=":
        "cinemaqualidade.com",
        "http://usersfiles.com/": "usersfiles",
        "https://depositfiles.com/files/": "depositfiles",
        "http://www.nowvideo.sx/video/": "nowvideo",
        "http://vidbull.com/": "vidbull",
        "http://filescdn.com/": "filescdn",
        "https://www.yourupload.com/watch/": "yourupload",
        "http://www.cinecalidad.to/protect/gdredirect.php?l=": "directo",
        "https://openload.co/embed/": "openload"
    }

    logger.info()
    itemlist = []
    duplicados = []
    data = httptools.downloadpage(item.url).data
    patron = 'target="_blank".*? service=".*?" data="(.*?)"><li>(.*?)<\/li>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    server_url = {
        'YourUpload': 'https://www.yourupload.com/embed/',
        'Openload': 'https://openload.co/embed/',
        'TVM': 'https://thevideo.me/embed-',
        'Trailer': '',
        'BitTorrent': '',
        'Mega': '',
        'MediaFire': ''
    }

    for video_cod, server_id in matches:
        if server_id not in ['BitTorrent', 'Mega', 'MediaFire', 'Trailer', '']:
            video_id = dec(video_cod)

        if server_id in server_url:
            server = server_id.lower()
            thumbnail = item.thumbnail
            if server_id == 'TVM':
                server = 'thevideo.me'
                url = server_url[server_id] + video_id + '.html'
            else:
                url = server_url[server_id] + video_id
        title = item.contentTitle + ' (%s)' % server
        quality = 'default'

        if server_id not in ['BitTorrent', 'Mega', 'MediaFire', 'Trailer']:
            if url not in duplicados:
                itemlist.append(
                    item.clone(action='play',
                               title=title,
                               fulltitle=item.contentTitle,
                               url=url,
                               language=IDIOMAS[item.language],
                               thumbnail=thumbnail,
                               quality=quality,
                               server=server))
                duplicados.append(url)

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    # itemlist.append(trailer_item)
    if config.get_videolibrary_support(
    ) and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(
                channel=item.channel,
                title=
                '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                url=item.url,
                action="add_pelicula_to_library",
                extra="findvideos",
                contentTitle=item.contentTitle,
            ))

    return itemlist
Ejemplo n.º 26
0
def findvideos(item):
    logger.info()

    itemlist = list()

    data = httptools.downloadpage(item.url).data
    data = scrapertools.unescape(data)

    soup = BeautifulSoup(data, "html5lib", from_encoding="utf-8")

    for elem in soup.find_all("li", class_="STPb"):

        extra_info = elem.find_all("span")[1].text.split(' - ')

        if 'trailer' in extra_info[0].lower():
            continue

        lang = extra_info[0]

        url_info = soup.find(id=elem["data-tplayernv"])
        url = url_info.iframe["src"]
        title = add_lang(lang)

        itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', infoLabels=item.infoLabels,
                             language=lang))

    d_link = soup.find("div", class_="TPTblCn")

    for elem in d_link.find_all("tr"):
        lang = ''
        try:
            lang = elem.find_all("span")[2].text.strip()
        except:
            continue

        if elem.a:
            url = elem.a["href"]

        title = add_lang(lang)

        new_item = Item(channel=item.channel, title='%s' + title, url=url, action='play', infoLabels=item.infoLabels,
                        language=lang)
        if host in url:
            new_item.server = 'torrent'

        itemlist.append(new_item)

    itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if item.contentType != 'episode':
        if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
            itemlist.append(
                Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
                     action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))

    return itemlist
Ejemplo n.º 27
0
def findvideos(item):
    logger.info()
    itemlist = []
    trailer = ''
    data = get_source(item.url)
    patron = '<a href="#embed\d+".*?data-src="([^"]+)".*?"tab">([^<]+)<'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for url, language in matches:
        url = url.replace('&#038;', '&')
        data = httptools.downloadpage(url,
                                      follow_redirects=False,
                                      headers={'Referer': item.url},
                                      only_headers=True)
        url = data.headers['location']

        if config.get_setting('unify'):
            title = ''
        else:
            title = ' [%s]' % language

        if 'youtube' in url:
            trailer = Item(channel=item.channel,
                           title='Trailer',
                           url=url,
                           action='play',
                           server='youtube')
        else:
            itemlist.append(
                Item(channel=item.channel,
                     title='%s' + title,
                     url=url,
                     action='play',
                     language=IDIOMAS[language],
                     infoLabels=item.infoLabels))

    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda i: i.title % i.server.capitalize())

    if trailer != '':
        itemlist.append(trailer)

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if config.get_videolibrary_support(
    ) and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(channel=item.channel,
                 title=
                 '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 contentTitle=item.contentTitle))

    return itemlist
Ejemplo n.º 28
0
def findvideos(item):
    logger.info()

    itemlist = []
    item.url = re.sub(host, sec_host, item.url)
    is_tvshow = False
    json_data = httptools.downloadpage(item.url, headers={
        'Referer': referer
    }).json

    if len(json_data) > 0:
        videos_info = json_data['title']['videos']

        if str(item.ep_info) != '':
            is_tvshow = True
            epi = item.ep_info
            season = item.infoLabels["season"]

        for elem in videos_info:
            lang = scrapertools.find_single_match(elem['name'], '/(.*?).png')

            if len(lang) > 2 and not 'sub' in lang:
                lang = lang[-2:]
            elif 'sub' in lang:
                lang = 'sub'
            # else:
            #    lang = 'en'

            url = elem['url']

            lang = IDIOMAS.get(lang, 'VO')

            if not config.get_setting('unify'):
                title = ' [%s]' % lang
            else:
                title = ''
            if not is_tvshow or (elem['season'] == season
                                 and elem['episode'] == epi):

                itemlist.append(
                    Item(channel=item.channel,
                         title='%s' + title,
                         action='play',
                         url=url,
                         language=lang,
                         infoLabels=item.infoLabels))

    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda i: i.title % i.server.capitalize())

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if item.contentType != 'episode':
        if config.get_videolibrary_support(
        ) and len(itemlist) > 0 and item.extra != 'findvideos':
            itemlist.append(
                Item(
                    channel=item.channel,
                    title=
                    '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                    url=item.url,
                    action="add_pelicula_to_library",
                    extra="findvideos",
                    contentTitle=item.contentTitle))

    return itemlist
Ejemplo n.º 29
0
def findvideos(item):
    logger.info()
    itemlist = []
    itemlist_t = []                                                                 #Itemlist total de enlaces
    itemlist_f = []                                                                 #Itemlist de enlaces filtrados
    if not item.language:
        item.language = ['VO']                                                      #VO por defecto
    matches = []
    item.category = categoria

    #logger.debug(item)

    #Bajamos los datos de la página
    data = ''
    patron = '<tr class="lista2">\s*<td align="(?:[^"]+)?"\s*class="(?:[^"]+)?"\s*width="(?:[^"]+)?"\s*style="(?:[^"]+)?">\s*<a href="[^"]+">\s*<img src="([^"]+)?"\s*border="(?:[^"]+)?"\s*alt="(?:[^"]+)?"\s*\/><\/a><\/td>\s*<td\s*align="(?:[^"]+)?"(?:\s*width="[^"]+")?\s*class="(?:[^"]+)?">\s*<a onmouseover="(?:[^"]+)?"\s*onmouseout="(?:[^"]+)?"\s*href="([^"]+)" title="[^"]+">(.*?)<\/a>\s*<a href="[^"]+">\s*<img src="[^"]+"\s*border="(?:[^"]+)?"\s*alt="(?:[^"]+)?"\s*><\/a>(?:\s*<a.*?<\/a>)?\s*<br><span.*?<\/span>\s*<\/td>\s*<td align="(?:[^"]+)?"\s*width="(?:[^"]+)?"\s*class="(?:[^"]+)?">.*?<\/td>\s*<td align="(?:[^"]+)?"\s*width="(?:[^"]+)?"\s*class="(?:[^"]+)?">(.*?)?<\/td>\s*<td align="(?:[^"]+)?"\s*width="(?:[^"]+)?"\s*class="(?:[^"]+)?">\s*<font color="(?:[^"]+)?">(\d+)?<\/font>'
        
    try:
        data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url, timeout=timeout).data)
        if not PY3:
            data = unicode(data, "utf-8", errors="replace").encode("utf-8")
    except:
        pass
        
    if not data:
        logger.error("ERROR 01: FINDVIDEOS: La Web no responde o la URL es erronea: " + item.url)
        itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: FINDVIDEOS:.  La Web no responde o la URL es erronea. Si la Web está activa, reportar el error con el log', folder=False))
        return itemlist                             #si no hay más datos, algo no funciona, pintamos lo que tenemos

    status, itemlist = check_blocked_IP(data, itemlist)                         #Comprobamos si la IP ha sido bloqueada
    if status:
        return itemlist                                                         #IP bloqueada
    
    matches = re.compile(patron, re.DOTALL).findall(data)
    if not matches:                                                             #error
        item = generictools.web_intervenida(item, data)                         #Verificamos que no haya sido clausurada
        if item.intervencion:                                                   #Sí ha sido clausurada judicialmente
            item, itemlist = generictools.post_tmdb_findvideos(item, itemlist)  #Llamamos al método para el pintado del error
        else:
            logger.error("ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web " + " / PATRON: " + patron + data)
            itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web.  Verificar en la Web esto último y reportar el error con el log', folder=False))
            return itemlist                         #si no hay más datos, algo no funciona, pintamos lo que tenemos

    #logger.debug("PATRON: " + patron)
    #logger.debug(matches)
    #logger.debug(data)

    #Llamamos al método para crear el título general del vídeo, con toda la información obtenida de TMDB
    item, itemlist = generictools.post_tmdb_findvideos(item, itemlist)

    #Ahora tratamos los enlaces .torrent con las diferentes calidades
    for scrapedthumbnail, scrapedurl, scrapedtitle, scrapedsize, scrapedseeds in matches:
        #Generamos una copia de Item para trabajar sobre ella
        item_local = item.clone()
        title = scrapedtitle

        #Analizamos los formatos de la películas y series
        if item_local.contentType == 'movie':
            patron_title = '(.*?)\.([1|2][9|0]\d{2})?\.(.*?)(?:-.*?)?$'
            if not scrapertools.find_single_match(title, patron_title):
                continue
        else:
            patron_title = '(.*?)(\.[1|2][9|0]\d{2})?\.S\d{2}.*?\.([\d|A-Z]{2}.*?)(?:-.*?)?$'
            if not scrapertools.find_single_match(title, patron_title):
                patron_title = '(.*?)\.*([1|2][9|0]\d{2})?(?:\.\d{2}\.\d{2}).*?\.([\d|A-Z]{2}.*?)(?:-.*?)?$'
                if not scrapertools.find_single_match(title, patron_title):
                    continue
        
        try:
            title, year, item_local.quality = scrapertools.find_single_match(title, patron_title)
        except:
            title = scrapedtitle
            year = ''
            item_local.quality = ''
        title = title.replace('.', ' ')
        item_local.quality = item_local.quality.replace('.', ' ')
        item_local.quality = re.sub(r'(?i)proper|unrated|directors|cut|german|repack|internal|real|korean|extended|masted|docu|oar|super|duper|amzn|uncensored|hulu', '', item_local.quality).strip()
        
        #Buscamos si ya tiene tamaño, si no, los buscamos en el archivo .torrent
        size = scrapedsize
        if size:
            item_local.title = '%s [%s]' % (item_local.title, size)             #Agregamos size al final del título
            size = size.replace('GB', 'G·B').replace('Gb', 'G·b').replace('MB', 'M·B')\
                        .replace('Mb', 'M·b').replace('.', ',')
            item_local.torrent_info = '%s, ' % size                             #Agregamos size

        #Añadimos los seeds en calidad, como información adicional
        if scrapedseeds:
            item_local.torrent_info += 'Seeds: %s' % scrapedseeds               #Agregamos seeds
        if not item.unify:
                item_local.torrent_info = '[%s]' % item_local.torrent_info.strip().strip(',')

        #Ahora pintamos el link del Torrent
        item_local.url = urlparse.urljoin(host, scrapedurl)
        item_local.title = '[[COLOR yellow]?[/COLOR]] [COLOR yellow][Torrent][/COLOR] ' \
                        + '[COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR] %s' % \
                        (item_local.quality, str(item_local.language),  \
                        item_local.torrent_info)                                #Preparamos título de Torrent
        
        #Preparamos título y calidad, quitamos etiquetas vacías
        item_local.title = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.title)    
        item_local.title = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.title)
        item_local.title = item_local.title.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
        item_local.quality = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.quality)
        item_local.quality = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.quality)
        item_local.quality = item_local.quality.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
        
        item_local.alive = "??"                                                     #Calidad del link sin verificar
        item_local.action = "play"                                                  #Visualizar vídeo
        item_local.server = "torrent"                                               #Seridor Torrent
        
        itemlist_t.append(item_local.clone())                                       #Pintar pantalla, si no se filtran idiomas
        
        # Requerido para FilterTools
        if config.get_setting('filter_languages', channel) > 0:                     #Si hay idioma seleccionado, se filtra
            itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language)  #Pintar pantalla, si no está vacío

        #logger.debug("TORRENT: " + scrapedurl + " / title gen/torr: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName)
        #logger.debug(item_local)

    if len(itemlist_f) > 0:                                                     #Si hay entradas filtradas...
        itemlist.extend(itemlist_f)                                             #Pintamos pantalla filtrada
    else:                                                                       
        if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ...
            thumb_separador = get_thumb("next.png")                             #... pintamos todo con aviso
            itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador, folder=False))
        itemlist.extend(itemlist_t)                                             #Pintar pantalla con todo si no hay filtrado
    
    # Requerido para AutoPlay
    autoplay.start(itemlist, item)                                              #Lanzamos Autoplay
    
    return itemlist
Ejemplo n.º 30
0
def findvideos(item):
    logger.info()
    from lib import generictools
    import urllib
    itemlist = []
    data = get_source(item.url)

    patron = "data-type='([^']+)' data-post='(\d+)' data-nume='(\d+).*?img src='([^\?]+)\?"
    matches = re.compile(patron, re.DOTALL).findall(data)
    for type, id, option, lang in matches:
        lang = scrapertools.find_single_match(lang, '.*?/flags/(.*?).png')
        quality = ''
        if lang not in IDIOMAS:
            lang = 'en'
        if not config.get_setting('unify'):
            title = ' [%s]' % IDIOMAS[lang]
        else:
            title = ''

        post = {
            'action': 'doo_player_ajax',
            'post': id,
            'nume': option,
            'type': type
        }
        post = urllib.urlencode(post)

        test_url = '%swp-admin/admin-ajax.php' % host
        new_data = httptools.downloadpage(test_url,
                                          post=post,
                                          headers={
                                              'Referer': item.url
                                          }).data
        url = scrapertools.find_single_match(new_data,
                                             "src='([^']+)'").replace(
                                                 'oladblock.me', 'openload.co')
        if url != '':
            itemlist.append(
                Item(channel=item.channel,
                     url=url,
                     title='%s' + title,
                     action='play',
                     quality=quality,
                     language=IDIOMAS[lang],
                     infoLabels=item.infoLabels))

    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda x: x.title % x.server.capitalize())

    # Requerido para Filtrar enlaces

    if __comprueba_enlaces__:
        itemlist = servertools.check_list_links(itemlist,
                                                __comprueba_enlaces_num__)

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    itemlist = sorted(itemlist, key=lambda it: it.language)

    if item.contentType != 'episode':
        if config.get_videolibrary_support(
        ) and len(itemlist) > 0 and item.extra != 'findvideos':
            itemlist.append(
                Item(
                    channel=item.channel,
                    title=
                    '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                    url=item.url,
                    action="add_pelicula_to_library",
                    extra="findvideos",
                    contentTitle=item.contentTitle))

    return itemlist