Exemplo n.º 1
0
def findvideos(item):
    logger.info()
    itemlist = []
    templist = []
    video_list = []
    data = httptools.downloadpage(item.url).data
    data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)

    patron = '<li data-quality=(.*?) data-lang=(.*?)><a href=(.*?) title=.*?'
    matches = matches = re.compile(patron, re.DOTALL).findall(data)
    for quality, lang, scrapedurl in matches:
        url = host + scrapedurl
        title = item.title + ' (' + lang + ') (' + quality + ')'
        templist.append(item.clone(title=title, language=lang, url=url))
    for videoitem in templist:
        data = httptools.downloadpage(videoitem.url).data
        urls_list = scrapertools.find_single_match(
            data, 'var.*?_SOURCE\s+=\s+\[(.*?)\]')
        urls_list = urls_list.split("},")
        for element in urls_list:
            if not element.endswith('}'):
                element = element + '}'
            json_data = jsontools.load(element)
            if 'id' in json_data:
                id = json_data['id']
            sub = ''
            if 'srt' in json_data:
                sub = json_data['srt']

            url = json_data['source'].replace('\\', '')
            server = json_data['server']
            quality = json_data['quality']
            if 'http' not in url:

                new_url = 'https://onevideo.tv/api/player?key=90503e3de26d45e455b55e9dc54f015b3d1d4150&link' \
                          '=%s&srt=%s' % (url, sub)

                data = httptools.downloadpage(new_url).data
                data = re.sub(r'\\', "", data)
                video_list.extend(servertools.find_video_items(data=data))
                for urls in video_list:
                    if urls.language == '':
                        urls.language = videoitem.language
                    urls.title = item.title + urls.language + '(%s)'

                for video_url in video_list:
                    video_url.channel = item.channel
                    video_url.action = 'play'
                    video_url.quality = quality
                    video_url.server = ""
                    video_url.infoLabels = item.infoLabels
            else:
                title = '%s [%s]' % (server, quality)
                video_list.append(
                    item.clone(title=title,
                               url=url,
                               action='play',
                               quality=quality,
                               server=server,
                               subtitle=sub))
    tmdb.set_infoLabels(video_list)
    if config.get_videolibrary_support(
    ) and len(video_list) > 0 and item.extra != 'findvideos':
        video_list.append(
            Item(channel=item.channel,
                 title=
                 '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 contentTitle=item.contentTitle))
    return video_list
Exemplo n.º 2
0
def listado(item):
    logger.info()
    itemlist = []
    item.category = categoria

    #logger.debug(item)

    curr_page = 1  # Página inicial
    last_page = 99999  # Última página inicial
    last_title = 99999  # Última línea inicial
    cnt_matches = 0  # Contador de líneas insertadas en Itemlist
    if item.curr_page:
        curr_page = int(
            item.curr_page)  # Si viene de una pasada anterior, lo usamos
        del item.curr_page  # ... y lo borramos
    if item.last_page:
        last_page = int(
            item.last_page)  # Si viene de una pasada anterior, lo usamos
        del item.last_page  # ... y lo borramos
    if item.cnt_matches:
        cnt_matches = int(
            item.cnt_matches)  # Si viene de una pasada anterior, lo usamos
    item.cnt_matches = 0
    del item.cnt_matches  # ... y lo borramos

    cnt_tot = 40  # Poner el num. máximo de items por página
    cnt_pct = 0.625  #% de la página a llenar
    cnt_title = 0  # Contador de líneas insertadas en Itemlist
    inicio = time.time(
    )  # Controlaremos que el proceso no exceda de un tiempo razonable
    fin = inicio + 10  # Después de este tiempo pintamos (segundos)
    timeout_search = timeout  # Timeout para descargas
    if item.extra == 'search':
        timeout_search = timeout * 2  # Timeout un poco más largo para las búsquedas
        if timeout_search < 5:
            timeout_search = 5  # Timeout un poco más largo para las búsquedas
    item.tmdb_stat = True  # Este canal no es ambiguo en los títulos

    #Sistema de paginado para evitar páginas vacías o semi-vacías en casos de búsquedas con series con muchos episodios
    title_lista = [
    ]  # Guarda la lista de series que ya están en Itemlist, para no duplicar lineas
    if item.title_lista:  # Si viene de una pasada anterior, la lista ya estará guardada
        title_lista.extend(
            item.title_lista)  # Se usa la lista de páginas anteriores en Item
        del item.title_lista  # ... limpiamos

    if not item.extra2:  # Si viene de Catálogo o de Alfabeto
        item.extra2 = ''

    next_page_url = item.url
    #Máximo num. de líneas permitidas por TMDB. Máx de 10 segundos por Itemlist para no degradar el rendimiento
    while cnt_title < cnt_tot * cnt_pct and cnt_matches + 1 < last_title and fin > time.time(
    ):

        # Descarga la página
        data = ''
        try:
            data = httptools.downloadpage(next_page_url,
                                          timeout=timeout_search).data
            data = unicode(data, "utf-8", errors="replace").encode("utf-8")
            pos = data.find('[')
            if pos > 0: data = data[pos:]
        except:
            pass

        if not data:  #Si la web está caída salimos sin dar error
            logger.error(
                "ERROR 01: LISTADO: La Web no responde o ha cambiado de URL: "
                + item.url + " / DATA: " + data)
            itemlist.append(
                item.clone(
                    action='',
                    title=item.channel.capitalize() +
                    ': ERROR 01: LISTADO:.  La Web no responde o ha cambiado de URL. Si la Web está activa, reportar el error con el log'
                ))
            break  #si no hay más datos, algo no funciona, pintamos lo que tenemos

        matches = jsontools.load(data)  #cargamos lo datos como dict.
        if not matches and data[1] != ']':  #error
            item = generictools.web_intervenida(
                item, data)  #Verificamos que no haya sido clausurada
            if item.intervencion:  #Sí ha sido clausurada judicialmente
                item, itemlist = generictools.post_tmdb_episodios(
                    item,
                    itemlist)  #Llamamos al método para el pintado del error
                return itemlist  #Salimos

            logger.error(
                "ERROR 02: LISTADO: Ha cambiado la estructura de la Web " +
                data)
            itemlist.append(
                item.clone(
                    action='',
                    title=item.channel.capitalize() +
                    ': ERROR 02: LISTADO: Ha cambiado la estructura de la Web.  Reportar el error con el log'
                ))
            return itemlist  #si no hay más datos, algo no funciona, pintamos lo que tenemos

        last_title = len(matches)  #Tamaño de total matches
        matches = matches[cnt_matches:]  #avanzamos hasta la página actual

        #logger.debug(matches)
        #logger.debug(data)

        if last_page == 99999:  #Si es el valor inicial, cargamos el num. de items
            last_page = int((last_title / (cnt_tot * cnt_pct)))
            curr_page = 1

        #Empezamos el procesado de matches
        for titulo in matches:
            cnt_title += 1  #Sumamos 1 a los títulos tratados
            if cnt_title > cnt_tot * cnt_pct:
                cnt_title += last_title
                break
            cnt_matches += 1  #Sumamos 1 a total títulos tratados

            title = titulo.get("nom", "")  #nombre del título
            title = title.replace("á", "a").replace("é", "e").replace(
                "í", "i").replace("ó", "o").replace("ú", "u").replace(
                    "ü", "u").replace("�", "ñ").replace("ñ", "ñ").replace(
                        "&atilde;", "a").replace("&etilde;", "e").replace(
                            "&itilde;", "i").replace("&otilde;", "o").replace(
                                "&utilde;",
                                "u").replace("&ntilde;",
                                             "ñ").replace("&#8217;", "'")

            item_local = item.clone()  #Creamos copia de Item para trabajar
            if item_local.tipo:  #... y limpiamos
                del item_local.tipo
            if item_local.totalItems:
                del item_local.totalItems
            if item_local.post_num:
                del item_local.post_num
            if item_local.intervencion:
                del item_local.intervencion
            if item_local.viewmode:
                del item_local.viewmode
            item_local.text_bold = True
            del item_local.text_bold
            item_local.text_color = True
            del item_local.text_color

            if titulo.get("posterurl", ""):
                item_local.thumbnail = "http://image.tmdb.org/t/p/w342%s" % titulo.get(
                    "posterurl", "")  #thumb
            if titulo.get("backurl", ""):
                item_local.fanart = "http://image.tmdb.org/t/p/w1280%s" % titulo.get(
                    "backurl", "")  #Fanart
            url = titulo.get("magnets", {})  #magnet de diversas calidades
            year = titulo.get("year", "")  #año
            if titulo.get("id", ""):
                item_local.infoLabels["tmdb_id"] = titulo.get("id",
                                                              "")  #TMDB id

            title_subs = []  #creamos una lista para guardar info importante
            item_local.language = []  #iniciamos Lenguaje
            item_local.quality = ''  #inicialmos la calidad
            item_local.context = "['buscar_trailer']"

            item_local.contentType = "movie"  #por defecto, son películas
            item_local.action = "findvideos"

            #Analizamos los formatos de series
            if item_local.extra == 'series':
                item_local.contentType = "tvshow"
                item_local.action = "episodios"
                item_local.season_colapse = True  #Muestra las series agrupadas por temporadas
                item_local.url = "%s?id=%s" % (api_temp, titulo.get(
                    "id", ""))  #Salvamos la url special para series

            #Revisamos para peliculas todos los magnets, extrayendo dirección y calidad
            if item_local.contentType == "movie":
                item_local.url = []  #iniciamos dict. de magnets
                for etiqueta, magnet in titulo.get("magnets", {}).iteritems():
                    if magnet.get("magnet"):  #buscamos los magnets activos
                        url = magnet.get("magnet")  #salvamos el magnet
                        quality = magnet.get(
                            "quality", "")  #salvamos la calidad del magnet
                        item_local.url += [
                            (url, quality)
                        ]  #guardamos todo como url para findvideos

                        item_local.quality += "%s, " % quality.strip(
                        )  #agregamos a la calidad del título
                item_local.quality = re.sub(r', $', '', item_local.quality)
                if not item_local.url:  #si no hay magnets, no seguimos
                    continue

            if item_local.language == []:
                item_local.language = ['CAST']

            #Detectamos info interesante a guardar para después de TMDB
            if scrapertools.find_single_match(title, '[m|M].*?serie'):
                title = re.sub(r'[m|M]iniserie', '', title)
                title_subs += ["Miniserie"]
            if scrapertools.find_single_match(title, '[s|S]aga'):
                title = re.sub(r'[s|S]aga', '', title)
                title_subs += ["Saga"]
            if scrapertools.find_single_match(title, '[c|C]olecc'):
                title = re.sub(r'[c|C]olecc...', '', title)
                title_subs += ["Colección"]

            if "duolog" in title.lower():
                title_subs += ["[Saga]"]
                title = title.replace(" Duologia", "").replace(
                    " duologia", "").replace(" Duolog",
                                             "").replace(" duolog", "")
            if "trilog" in title.lower():
                title_subs += ["[Saga]"]
                title = title.replace(" Trilogia", "").replace(
                    " trilogia", "").replace(" Trilog",
                                             "").replace(" trilog", "")
            if "extendida" in title.lower() or "v.e." in title.lower(
            ) or "v e " in title.lower():
                title_subs += ["[V. Extendida]"]
                title = title.replace("Version Extendida", "").replace(
                    "(Version Extendida)",
                    "").replace("V. Extendida",
                                "").replace("VExtendida", "").replace(
                                    "V Extendida",
                                    "").replace("V.Extendida", "").replace(
                                        "V  Extendida",
                                        "").replace("V.E.", "").replace(
                                            "V E ",
                                            "").replace("V:Extendida", "")

            #Analizamos el año.  Si no está claro ponemos '-'
            try:
                yeat_int = int(year)
                if yeat_int >= 1950 and yeat_int <= 2040:
                    item_local.infoLabels["year"] = yeat_int
                else:
                    item_local.infoLabels["year"] = '-'
            except:
                item_local.infoLabels["year"] = '-'

            #Empezamos a limpiar el título en varias pasadas
            title = re.sub(r'[s|S]erie', '', title)
            title = re.sub(r'- $', '', title)

            #Limpiamos el título de la basura innecesaria
            title = re.sub(
                r'TV|Online|Spanish|Torrent|en Espa\xc3\xb1ol|Español|Latino|Subtitulado|Blurayrip|Bluray rip|\[.*?\]|R2 Pal|\xe3\x80\x90 Descargar Torrent \xe3\x80\x91|Completa|Temporada|Descargar|Torren',
                '',
                title,
                flags=re.IGNORECASE)

            #Terminamos de limpiar el título
            title = re.sub(r'\??\s?\d*?\&.*', '', title)
            title = re.sub(r'[\(|\[]\s+[\)|\]]', '', title)
            title = title.replace('()',
                                  '').replace('[]',
                                              '').strip().lower().title()

            item_local.from_title = title.strip().lower().title(
            )  #Guardamos esta etiqueta para posible desambiguación de título

            #Salvamos el título según el tipo de contenido
            if item_local.contentType == "movie":
                item_local.contentTitle = title.strip().lower().title()
            else:
                item_local.contentSerieName = title.strip().lower().title()

            item_local.title = title.strip().lower().title()

            #Guarda la variable temporal que almacena la info adicional del título a ser restaurada después de TMDB
            item_local.title_subs = title_subs

            #Ahora se filtra por idioma, si procede, y se pinta lo que vale
            if config.get_setting(
                    'filter_languages',
                    channel) > 0:  #Si hay idioma seleccionado, se filtra
                itemlist = filtertools.get_link(itemlist, item_local,
                                                list_language)
            else:
                itemlist.append(item_local.clone())  #Si no, pintar pantalla

            #logger.debug(item_local)

    #Pasamos a TMDB la lista completa Itemlist
    tmdb.set_infoLabels(itemlist, __modo_grafico__)

    #Llamamos al método para el maquillaje de los títulos obtenidos desde TMDB
    item, itemlist = generictools.post_tmdb_listado(item, itemlist)

    # Si es necesario añadir paginacion
    if cnt_title >= cnt_tot * cnt_pct:

        title = '%s' % curr_page

        if cnt_matches + 1 >= last_title:  #Si hemos pintado ya todo lo de esta página...
            cnt_matches = 0  #... la próxima pasada leeremos otra página
            next_page_url = re.sub(
                r'page=(\d+)', r'page=' +
                str(int(re.search('\d+', next_page_url).group()) + 1),
                next_page_url)

        itemlist.append(
            Item(channel=item.channel,
                 action="listado",
                 title=">> Página siguiente " + title,
                 url=next_page_url,
                 extra=item.extra,
                 extra2=item.extra2,
                 last_page=str(last_page),
                 curr_page=str(curr_page + 1),
                 cnt_matches=str(cnt_matches)))

    return itemlist
Exemplo n.º 3
0
def findvideos(item):
    logger.info()

    itemlist = []
    data = get_source(item.url)
    selector_url = scrapertools.find_multiple_matches(
        data, 'id="reproductor\d+".*?src="([^"]+)"')

    for lang in selector_url:
        data = get_source('https:' + lang)
        urls = scrapertools.find_multiple_matches(data,
                                                  'data-playerid="([^"]+)">')
        subs = ''
        lang = scrapertools.find_single_match(lang, 'lang=(.*)?')
        language = IDIOMAS[lang]

        if item.contentType == 'episode':
            quality = 'SD'
        else:
            quality = item.quality

        for url in urls:
            final_url = httptools.downloadpage('https:' + url).data
            if language == 'VOSE':
                sub = scrapertools.find_single_match(url, 'sub=(.*?)&')
                subs = 'https:%s' % sub
            if 'index' in url:
                try:
                    file_id = scrapertools.find_single_match(
                        url, 'file=(.*?)&')
                    post = {'link': file_id}
                    post = urllib.urlencode(post)
                    hidden_url = 'https://player.mirapelisonline.com/repro/plugins/gkpluginsphp.php'
                    data_url = httptools.downloadpage(hidden_url,
                                                      post=post).data
                    dict_vip_url = jsontools.load(data_url)
                    url = dict_vip_url['link']
                except:
                    pass
            else:
                try:

                    if 'openload' in url:
                        file_id = scrapertools.find_single_match(
                            url, 'h=(\w+)')
                        post = {'h': file_id}
                        post = urllib.urlencode(post)

                        hidden_url = 'https://player.mirapelisonline.com/repro/openload/api.php'
                        data_url = httptools.downloadpage(
                            hidden_url, post=post, follow_redirects=False).data
                        json_data = jsontools.load(data_url)
                        url = scrapertools.find_single_match(
                            data_url, "VALUES \('[^']+','([^']+)'")
                        if not url:
                            url = json_data['url']
                        if not url:
                            continue
                    else:
                        new_data = httptools.downloadpage('https:' + url).data
                        file_id = scrapertools.find_single_match(
                            new_data, 'value="([^"]+)"')
                        post = {'url': file_id}
                        post = urllib.urlencode(post)
                        hidden_url = 'https://player.mirapelisonline.com/repro/r.php'
                        data_url = httptools.downloadpage(
                            hidden_url, post=post, follow_redirects=False)
                        url = data_url.headers['location']
                except:
                    pass
            url = url.replace(" ", "%20")
            itemlist.append(
                item.clone(title='[%s] [%s]',
                           url=url,
                           action='play',
                           subtitle=subs,
                           language=language,
                           quality=quality,
                           infoLabels=item.infoLabels))

    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda x: x.title % (x.server.capitalize(), x.language))

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    itemlist = sorted(itemlist, key=lambda it: it.language)

    if item.contentType != 'episode':
        if config.get_videolibrary_support(
        ) and len(itemlist) > 0 and item.extra != 'findvideos':
            itemlist.append(
                Item(
                    channel=item.channel,
                    title=
                    '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                    url=item.url,
                    action="add_pelicula_to_library",
                    extra="findvideos",
                    contentTitle=item.contentTitle))

    return itemlist
Exemplo n.º 4
0
def entradas(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = jsontools.load(data)

    if "Destacados" in item.title:
        itemlist.append(
            item.clone(
                title=
                "Aviso: Si una película no tiene (imagen/carátula) NO va a funcionar",
                action="",
                text_color=color4))

    for child in data["a"]:
        infolabels = {}

        infolabels['originaltitle'] = child['originalTitle']
        infolabels['plot'] = child['description']
        infolabels['year'] = child['year']
        if child.get('tags'):
            infolabels['genre'] = ', '.join([x.strip() for x in child['tags']])
        infolabels['rating'] = child['rateHuman'].replace(',', '.')
        infolabels['votes'] = child['rateCount']
        if child.get('runtime'):
            try:
                infolabels['duration'] = int(child['runtime'].replace(
                    " min.", "")) * 60
            except:
                pass
        if child.get('cast'): infolabels['cast'] = child['cast'].split(",")
        infolabels['director'] = child['director']
        url = host % "movie/%s/movie.js" % child["id"]
        # Fanart
        fanart = host % "movie/%s/background_480.jpg" % child["id"]
        if child.get("episode"):
            thumbnail = host % "movie/%s/thumbnail_200x112.jpg" % child["id"]
        else:
            thumbnail = host % "movie/%s/poster_167x250.jpg" % child["id"]

        if child['height'] < 720:
            quality = "SD"
        elif child['height'] < 1080:
            quality = "720p"
        elif child['height'] >= 1080:
            quality = "1080p"
        contentTitle = unicodedata.normalize('NFD', unicode(child['name'], 'utf-8')).encode('ASCII', 'ignore') \
            .decode("utf-8")
        if child['name'] == "":
            title = child['id'].rsplit(".", 1)[0]
        else:
            title = child['name']
        #if child['year']:
        #    title += " (" + child['year'] + ")"
        #title += quality
        thumbnail += "|User-Agent=%s" % httptools.get_user_agent
        video_urls = []
        for k, v in child.get("video", {}).items():
            for vid in v:
                video_urls.append([
                    "http://%s.pelisipad.com/s/transcoder/%s" %
                    (vid["server"], vid["url"]) + "?%s", vid["height"]
                ])

        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 server="",
                 title=title,
                 url=url,
                 thumbnail=thumbnail,
                 fanart=fanart,
                 contentTitle=contentTitle,
                 infoLabels=infolabels,
                 contentTitle=contentTitle,
                 video_urls=video_urls,
                 text_color=color3,
                 quality=quality))

    return itemlist
Exemplo n.º 5
0
def episodios(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = jsontools.load(data)

    capitulos = []
    if data.get("b"):
        for child in data["b"]:
            for child2 in child["a"]:
                capitulos.append([child["season"], child2, child["id"]])
    else:
        for child in data.get("a", []):
            capitulos.append(['', child, ''])

    for season, child, id_season in capitulos:
        infoLabels = item.infoLabels.copy()

        if child.get('runtime'):
            try:
                infoLabels['duration'] = int(child['runtime'].replace(
                    " min.", "")) * 60
            except:
                pass
        if not season or not season.isdigit():
            season = scrapertools.find_single_match(child['name'], '(\d+)x\d+')
        try:
            infoLabels['season'] = int(season)
        except:
            infoLabels['season'] = 0

        if not child['episode']:
            episode = scrapertools.find_single_match(child['name'],
                                                     '\d+x(\d+)')
            if not episode:
                episode = "0"
            infoLabels['episode'] = int(episode)
        else:
            infoLabels['episode'] = int(child['episode'])
        infoLabels['mediatype'] = "episode"

        url = host % "movie/%s/movie.js" % child["id"]
        thumbnail = host % "movie/%s/thumbnail_200x112.jpg" % child["id"]
        if id_season:
            fanart = host % "list/%s/background_1080.jpg" % id_season
        else:
            fanart = item.fanart

        video_urls = []
        for k, v in child.get("video", {}).items():
            for vid in v:
                video_urls.append([
                    "http://%s.pelisipad.com/s/transcoder/%s" %
                    (vid["server"], vid["url"]) + "?%s", vid["height"]
                ])

        try:
            title = contentTitle = child['name'].rsplit(
                " ", 1)[0] + " - " + child['name'].rsplit(" ", 1)[1]
        except:
            title = contentTitle = child['id'].replace("-", " ")
        thumbnail += "|User-Agent=%s" % httptools.get_user_agent
        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 title=title,
                 url=url,
                 thumbnail=thumbnail,
                 fanart=fanart,
                 contentTitle=contentTitle,
                 viewmode="movie",
                 show=item.show,
                 infoLabels=infoLabels,
                 video_urls=video_urls,
                 extra="episodios",
                 text_color=color3))

    itemlist.sort(key=lambda it:
                  (it.infoLabels["season"], it.infoLabels["episode"]),
                  reverse=True)
    if itemlist and config.get_videolibrary_support():
        itemlist.append(
            Item(channel=item.channel,
                 title="Añadir esta serie a la videoteca",
                 text_color=color5,
                 url=item.url,
                 action="add_serie_to_library",
                 infoLabels=item.infoLabels,
                 show=item.show,
                 extra="episodios"))

    return itemlist
Exemplo n.º 6
0
def search_themoviedb(ShowName):
    scrapedurl = "https://api.themoviedb.org/3/search/tv?api_key=" + key_tmdb + "&language=es-ES&query=" + ShowName.encode(
        "utf-8", "ignore").replace(" ", "+")
    data = tools.getUrl(scrapedurl)
    dict_data = jsontools.load(data)
    return dict_data
Exemplo n.º 7
0
def seasons(item):
    logger.info()
    itemlist = []
    seasons = []
    # Obtenemos el HTML y cargamos el JSON
    soup = create_soup(item.url)
    json = jsontools.load(soup.find('script', id='__NEXT_DATA__').text)

    # Buscamos el "content_id", requerido para búsqueda en la API de la página
    content_id = json['props']['pageProps'].get('id')
    if not content_id:
        id_ = item.url.replace(host, '').split('/')[2].split('-', 1)
        content_id = '{}-{}'.format(id_[0], id_[1].replace('-', '%20'))

    # Obtenemos el JSON con los episodios desde la API para clasificar temporadas (vienen en lotes)
    episodios = httptools.downloadpage(
        'https://fapi.comamosramen.com/api/byUniqId/{}'.format(
            content_id)).json

    # Recorremos la lista de episodios y obtenemos las temporadas según haya diferencias entre c/ep
    for episodio in episodios['temporadas']:
        if len(seasons) > 0 and seasons[-1]['temporada'] == int(
                episodio['temporada']):
            seasons[-1]['episodios'].append(episodio)
        else:
            seasons.append({
                'temporada': int(episodio['temporada']),
                'episodios': []
            })
            seasons[-1]['episodios'].append(episodio)

    # Recorremos la lista de temporadas para procesamiento
    for season in seasons:
        title, language = set_lang(episodios.get('titulo'))
        infoLabels = {'year': episodios.get('año')}
        ogtitle = title

        # Determinación del idioma
        if item.language:
            language = item.language
        if episodios.get('categorias'):
            if 'Audio Latino' in episodios.get('categorias'):
                language = 'LAT'

        # Determinación dinámica del contentType
        if episodios.get('tipo'):
            if episodios.get('tipo') in ['pelicula']:
                contentType = 'movie'
            else:
                contentType = 'tvshow'
        else:
            contentType = ''

        it = Item(action='episodesxseason',
                  channel=item.channel,
                  contentType=contentType,
                  infoLabels=infoLabels,
                  json_episodios=season['episodios'],
                  language=language,
                  plot=episodios.get('descripcion'),
                  thumbnail=item.thumbnail,
                  title=unify.add_languages(
                      (config.get_localized_string(60027) %
                       str(season['temporada'])), language),
                  url=item.url)

        # Asignamos valores al item según su contentType
        if contentType == 'movie':
            it.contentTitle = ogtitle
        else:
            it.contentSeason = season['temporada']
            it.contentSerieName = ogtitle
        itemlist.append(it)

    # Asignamos las infoLabels (si aplica)
    if not item.videolibrary:
        tmdb.set_infoLabels(itemlist, True, force_no_year=True)

    # Si solo hay una temporada, retornamos directamente los episodios
    if len(itemlist) == 1:
        itemlist = episodesxseason(itemlist[0])

    # Agregamos elemento "Agregar a videoteca"
    if len(itemlist) > 0 and config.get_videolibrary_support(
    ) and not itemlist[0].contentType == 'movie' and not item.videolibrary:
        itemlist.append(
            Item(action="add_serie_to_library",
                 channel=item.channel,
                 contentType='tvshow',
                 contentSerieName=item.contentSerieName,
                 extra="episodios",
                 title='[COLOR yellow]{}[/COLOR]'.format(
                     config.get_localized_string(60352)),
                 url=item.url))

    return itemlist
Exemplo n.º 8
0
def findvideos(item):
    logger.info()
    itemlist = []
    it1 = []
    it2 = []
    ## Carga estados
    status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
    url_targets = item.url

    ## Vídeos
    id = ""
    type = ""
    if "###" in item.url:
        id = item.url.split("###")[1].split(";")[0]
        type = item.url.split("###")[1].split(";")[1]
        item.url = item.url.split("###")[0]

    if type == "2" and account and item.category != "Cine":
        title = " ( [COLOR orange][B]Agregar a Favoritos[/B][/COLOR] )"
        if "Favorito" in item.title:
            title = " ( [COLOR red][B]Quitar de Favoritos[/B][/COLOR] )"
        if config.get_videolibrary_support():
            title_label = " ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )"
            it1.append(Item(channel=item.channel, action="findvideos", title=title_label, fulltitle=title_label,
                                 url=url_targets, thumbnail=item.thumbnail, show=item.show, folder=False))

            title_label = " ( [COLOR green][B]Tráiler[/B][/COLOR] )"

            it1.append(
                item.clone(channel="trailertools", action="buscartrailer", title=title_label, contentTitle=item.show, url=item.url,
                     thumbnail=item.thumbnail, show=item.show))

        it1.append(Item(channel=item.channel, action="set_status", title=title, fulltitle=title, url=url_targets,
                             thumbnail=item.thumbnail, show=item.show, folder=True))

    data_js = httptools.downloadpage("%s/templates/hdfull/js/jquery.hdfull.view.min.js" % host).data
    key = scrapertools.find_single_match(data_js, 'JSON.parse\(atob.*?substrings\((.*?)\)')

    data_js = httptools.downloadpage("%s/js/providers.js" % host).data

    try:
        data_js = jhexdecode(data_js)
    except:
        from lib.aadecode import decode as aadecode
        data_js = data_js.split(";゚ω゚")
        decode_aa = ""
        for match in data_js:
            decode_aa += aadecode(match)

        data_js = re.sub(r':(function.*?\})', r':"\g<1>"', decode_aa)
        data_js = re.sub(r':(var[^,]+),', r':"\g<1>",', data_js)

    data = agrupa_datos(httptools.downloadpage(item.url).data)
    data_obf = scrapertools.find_single_match(data, "var ad\s*=\s*'([^']+)'")
    data_decrypt = jsontools.load(obfs(base64.b64decode(data_obf), 126 - int(key)))

    infolabels = {}
    year = scrapertools.find_single_match(data, '<span>A&ntilde;o:\s*</span>.*?(\d{4})')
    infolabels["year"] = year
    matches = []
    for match in data_decrypt:
        prov = eval(scrapertools.find_single_match(data_js, 'p\[%s\]\s*=\s*(\{.*?\}[\']\})' % match["provider"]))

        server_url = scrapertools.find_single_match(prov['l'], 'return\s*"(.*?)"')

        url = '%s%s' % (server_url, match['code'])
        url = re.sub(r'\'|"|\s|\+', '', url)
        url = re.sub(r'var_\d+\[\d+\]', '', url)
        embed = prov["e"]

        matches.append([match["lang"], match["quality"], url, embed])

    for idioma, calidad, url, embed in matches:
        mostrar_server = True
        option = "Ver"
        option1 = 1
        if re.search(r'return ([\'"]{2,}|\})', embed):
            option = "Descargar"
            option1 = 2
        calidad = unicode(calidad, "utf8").upper().encode("utf8")
        title = option + ": %s (" + calidad + ")" + " (" + idioma + ")"
        thumbnail = item.thumbnail
        plot = item.title + "\n\n" + scrapertools.find_single_match(data,
                                                                    '<meta property="og:description" content="([^"]+)"')
        plot = scrapertools.htmlclean(plot)
        fanart = scrapertools.find_single_match(data, '<div style="background-image.url. ([^\s]+)')


        if account:
            url += "###" + id + ";" + type
        it2.append(
            item.clone(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
                 plot=plot, fanart=fanart, show=item.show, folder=True, infoLabels=infolabels,
                 contentTitle=item.show, contentType=item.contentType, tipo=option, tipo1=option1, idioma=idioma))
    it2 = servertools.get_servers_itemlist(it2, lambda i: i.title % i.server.capitalize())
    it2.sort(key=lambda it: (it.tipo1, it.idioma, it.server))
    for item in it2:
        if "###" not in item.url:
            item.url += "###" + id + ";" + type
    itemlist.extend(it1)
    itemlist.extend(it2)
    ## 2 = película
    if type == "2" and item.category != "Cine":
        if config.get_videolibrary_support():
            itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
                                 action="add_pelicula_to_library", url=url_targets, thumbnail = item.thumbnail,
                                 fulltitle = item.contentTitle
                                 ))
    return itemlist
Exemplo n.º 9
0
def check_addon_updates(verbose=False):
    logger.info()

    ADDON_UPDATES_JSON = 'https://extra.alfa-addon.com/addon_updates/updates.json'
    ADDON_UPDATES_ZIP = 'https://extra.alfa-addon.com/addon_updates/updates.zip'

    try:
        get_ua_list()
    except:
        pass

    try:
        last_fix_json = os.path.join(
            config.get_runtime_path(),
            'last_fix.json')  # información de la versión fixeada del usuario
        # Se guarda en get_runtime_path en lugar de get_data_path para que se elimine al cambiar de versión

        try:
            localfilename = os.path.join(config.get_data_path(),
                                         'temp_updates.zip')
            if os.path.exists(localfilename): os.remove(localfilename)
        except:
            pass

        # Descargar json con las posibles actualizaciones
        # -----------------------------------------------
        data = httptools.downloadpage(ADDON_UPDATES_JSON, timeout=5).data
        if data == '':
            logger.info('No se encuentran actualizaciones del addon')
            if verbose:
                platformtools.dialog_notification(
                    'Alfa ya está actualizado',
                    'No hay ninguna actualización urgente')
            check_update_to_others(
                verbose=verbose
            )  # Comprueba las actualuzaciones de otros productos
            return False

        data = jsontools.load(data)
        if 'addon_version' not in data or 'fix_version' not in data:
            logger.info('No hay actualizaciones del addon')
            if verbose:
                platformtools.dialog_notification(
                    'Alfa ya está actualizado',
                    'No hay ninguna actualización urgente')
            check_update_to_others(
                verbose=verbose
            )  # Comprueba las actualuzaciones de otros productos
            return False

        # Comprobar versión que tiene instalada el usuario con versión de la actualización
        # --------------------------------------------------------------------------------
        current_version = config.get_addon_version(with_fix=False)
        if current_version != data['addon_version']:
            logger.info('No hay actualizaciones para la versión %s del addon' %
                        current_version)
            if verbose:
                platformtools.dialog_notification(
                    'Alfa ya está actualizado',
                    'No hay ninguna actualización urgente')
            check_update_to_others(
                verbose=verbose
            )  # Comprueba las actualuzaciones de otros productos
            return False

        if os.path.exists(last_fix_json):
            try:
                lastfix = {}
                lastfix = jsontools.load(open(last_fix_json, "r").read())
                if lastfix['addon_version'] == data[
                        'addon_version'] and lastfix['fix_version'] == data[
                            'fix_version']:
                    logger.info(
                        'Ya está actualizado con los últimos cambios. Versión %s.fix%d'
                        % (data['addon_version'], data['fix_version']))
                    if verbose:
                        platformtools.dialog_notification(
                            'Alfa ya está actualizado', 'Versión %s.fix%d' %
                            (data['addon_version'], data['fix_version']))
                    check_update_to_others(
                        verbose=verbose
                    )  # Comprueba las actualuzaciones de otros productos
                    return False
            except:
                if lastfix:
                    logger.error('last_fix.json: ERROR en: ' + str(lastfix))
                else:
                    logger.error('last_fix.json: ERROR desconocido')
                lastfix = {}

        # Descargar zip con las actualizaciones
        # -------------------------------------

        downloadtools.downloadfile(ADDON_UPDATES_ZIP,
                                   localfilename,
                                   silent=True)

        # Descomprimir zip dentro del addon
        # ---------------------------------
        try:
            unzipper = ziptools.ziptools()
            unzipper.extract(localfilename, config.get_runtime_path())
        except:
            import xbmc
            xbmc.executebuiltin('Extract("%s", "%s")' %
                                (localfilename, config.get_runtime_path()))
            time.sleep(1)

        # Borrar el zip descargado
        # ------------------------
        try:
            os.remove(localfilename)
        except:
            pass

        # Guardar información de la versión fixeada
        # -----------------------------------------
        if 'files' in data: data.pop('files', None)

        open(last_fix_json, "w").write(jsontools.dump(data))

        logger.info('Addon actualizado correctamente a %s.fix%d' %
                    (data['addon_version'], data['fix_version']))
        if verbose:
            platformtools.dialog_notification(
                'Alfa actualizado a', 'Versión %s.fix%d' %
                (data['addon_version'], data['fix_version']))

        check_update_to_others(
            verbose=verbose
        )  # Comprueba las actualuzaciones de otros productos
        return True

    except:
        logger.error('Error al comprobar actualizaciones del addon!')
        logger.error(traceback.format_exc())
        if verbose:
            platformtools.dialog_notification(
                'Alfa actualizaciones', 'Error al comprobar actualizaciones')
        check_update_to_others(
            verbose=verbose
        )  # Comprueba las actualuzaciones de otros productos
        return False
Exemplo n.º 10
0
def get_server_setting(name, server, default=None, caching_var=True):
    global alfa_caching, alfa_servers
    """
        Retorna el valor de configuracion del parametro solicitado.

        Devuelve el valor del parametro 'name' en la configuracion propia del servidor 'server'.

        Busca en la ruta \addon_data\plugin.video.addon\settings_servers el archivo server_data.json y lee
        el valor del parametro 'name'. Si el archivo server_data.json no existe busca en la carpeta servers el archivo 
        server.json y crea un archivo server_data.json antes de retornar el valor solicitado. Si el parametro 'name'
        tampoco existe en el el archivo server.json se devuelve el parametro default.


        @param name: nombre del parametro
        @type name: str
        @param server: nombre del servidor
        @type server: str
        @param default: valor devuelto en caso de que no exista el parametro name
        @type default: any

        @return: El valor del parametro 'name'
        @rtype: any

        """
    # Creamos la carpeta si no existe
    if not filetools.exists(
            filetools.join(config.get_data_path(), "settings_servers")):
        filetools.mkdir(
            filetools.join(config.get_data_path(), "settings_servers"))

    file_settings = filetools.join(config.get_data_path(), "settings_servers",
                                   server + "_data.json")
    dict_settings = {}
    dict_file = {}

    if kodi and caching_var:
        alfa_caching = bool(window.getProperty("alfa_caching"))
        alfa_servers = json.loads(window.getProperty("alfa_servers"))
    if alfa_caching and caching_var and alfa_servers.get(server):
        dict_settings = alfa_servers[server].copy()
        if dict_settings.get(name, ''):
            dict_settings[name] = config.decode_var(dict_settings[name])
            #logger.error('%s, %s: A: %s - D: %s' % (name, server, [alfa_servers[server][name]], [config.decode_var(dict_settings[name])]))

    elif filetools.exists(file_settings):
        # Obtenemos configuracion guardada de ../settings/channel_data.json
        try:
            dict_file = jsontools.load(filetools.read(file_settings))
            if isinstance(dict_file, dict) and 'settings' in dict_file:
                dict_settings = dict_file['settings']
                if alfa_caching and caching_var:
                    alfa_servers[server] = dict_settings.copy()
                    window.setProperty("alfa_servers",
                                       json.dumps(alfa_servers))
        except EnvironmentError:
            logger.info("ERROR al leer el archivo: %s" % file_settings)

    if not dict_settings or name not in dict_settings:
        # Obtenemos controles del archivo ../servers/server.json
        try:
            list_controls, default_settings = get_server_controls_settings(
                server)
        except:
            default_settings = {}
        if name in default_settings:  # Si el parametro existe en el server.json creamos el server_data.json
            default_settings.update(dict_settings)
            dict_settings = default_settings
            if alfa_caching and caching_var:
                alfa_servers[server] = dict_settings.copy()
                window.setProperty("alfa_servers", json.dumps(alfa_servers))
            dict_file['settings'] = dict_settings
            # Creamos el archivo ../settings/channel_data.json
            if not filetools.write(file_settings, jsontools.dump(dict_file)):
                logger.info("ERROR al salvar el archivo: %s" % file_settings)

    # Devolvemos el valor del parametro local 'name' si existe, si no se devuelve default
    return dict_settings.get(name, default)
Exemplo n.º 11
0
def add_channel(item):
    logger.debug()
    channel_to_add = {}
    json_file = ''
    result = platformtools.dialog_select(config.get_localized_string(70676), [
        config.get_localized_string(70678),
        config.get_localized_string(70679)
    ])
    if result == -1:
        return
    if result == 0:
        file_path = xbmcgui.Dialog().browseSingle(
            1, config.get_localized_string(70680), 'files')
        try:
            channel_to_add['path'] = file_path
            channel_to_add['url'] = file_path
            json_file = jsontools.load(open(file_path, "r").read())
            channel_to_add['channel_name'] = json_file['channel_name']
        except:
            pass

    elif result == 1:
        url = platformtools.dialog_input("",
                                         config.get_localized_string(70681),
                                         False)
        try:
            if url[:4] != 'http':
                url = 'http://' + url
            channel_to_add['path'] = url
            json_file = jsontools.load(httptools.downloadpage(url).data)
        except:
            pass

    if len(json_file) == 0:
        return
    if "episodes_list" in json_file:
        platformtools.dialog_ok(config.get_localized_string(20000),
                                config.get_localized_string(70682))
        return
    channel_to_add['channel_name'] = json_file['channel_name']
    if 'thumbnail' in json_file:
        channel_to_add['thumbnail'] = json_file['thumbnail']
    if 'fanart' in json_file: channel_to_add['fanart'] = json_file['fanart']
    path = filetools.join(config.get_data_path(), 'community_channels.json')

    community_json = open(path, "r")
    community_json = jsontools.load(community_json.read())
    id = 1
    while str(id) in community_json['channels']:
        id += 1
    community_json['channels'][str(id)] = (channel_to_add)

    with open(path, "w") as file:
        file.write(jsontools.dump(community_json))
    file.close()

    platformtools.dialog_notification(
        config.get_localized_string(20000),
        config.get_localized_string(70683) % json_file['channel_name'])
    import xbmc
    xbmc.sleep(1000)
    platformtools.itemlist_refresh()
    return
Exemplo n.º 12
0
def get_server_parameters(server):
    """
    Obtiene los datos del servidor
    @param server: Nombre del servidor
    @type server: str

    @return: datos del servidor
    @rtype: dict
    """
    # logger.info("server %s" % server)
    global dict_servers_parameters
    server = server.split('.')[0]
    if not server:
        return {}

    if server not in dict_servers_parameters:
        try:
            # Servers
            if filetools.isfile(
                    filetools.join(config.get_runtime_path(), "servers",
                                   server + ".json")):
                path = filetools.join(config.get_runtime_path(), "servers",
                                      server + ".json")

            # Debriders
            elif filetools.isfile(
                    filetools.join(config.get_runtime_path(), "servers",
                                   "debriders", server + ".json")):
                path = filetools.join(config.get_runtime_path(), "servers",
                                      "debriders", server + ".json")
            #
            #Cuando no está bien definido el server en el canal (no existe conector), muestra error por no haber "path" y se tiene que revisar el canal
            #
            dict_server = jsontools.load(filetools.read(path))

            # Imagenes: se admiten url y archivos locales dentro de "resources/images"
            if dict_server.get(
                    "thumbnail") and "://" not in dict_server["thumbnail"]:
                dict_server["thumbnail"] = filetools.join(
                    config.get_runtime_path(), "resources", "media", "servers",
                    dict_server["thumbnail"])
            for k in ['premium', 'id']:
                dict_server[k] = dict_server.get(k, list())

                if isinstance(dict_server[k], str):
                    dict_server[k] = [dict_server[k]]

            if "find_videos" in dict_server:
                dict_server['find_videos']["patterns"] = dict_server[
                    'find_videos'].get("patterns", list())
                dict_server['find_videos']["ignore_urls"] = dict_server[
                    'find_videos'].get("ignore_urls", list())

            if "settings" in dict_server:
                dict_server['has_settings'] = True
            else:
                dict_server['has_settings'] = False

            dict_servers_parameters[server] = dict_server

        except:
            mensaje = config.get_localized_string(59986) % server
            import traceback
            logger.error(mensaje + traceback.format_exc())
            return {}

    return dict_servers_parameters[server]
Exemplo n.º 13
0
def findvideos(item):
    import time
    support.info(item)
    itemlist = []
    urls = []
    # resp = support.match(get_data(item), headers=headers, patron=r'data-name="(\d+)">([^<]+)<')
    resp = support.match(item,
                         headers=headers,
                         patron=r'data-name="(\d+)">([^<]+)<')
    data = resp.data
    for ID, name in resp.matches:
        if not item.number:
            item.number = support.match(item.title, patron=r'(\d+) -').match
        match = support.match(
            data,
            patronBlock=r'data-name="' + ID +
            r'"[^>]+>(.*?)(?:<div class="(?:server|download)|link)',
            patron=r'data-id="([^"]+)" data-episode-num="' +
            (item.number if item.number else '1') + '"' +
            r'.*?href="([^"]+)"').match
        if match:
            epID, epurl = match
            if 'vvvvid' in name.lower():
                urls.append(
                    support.match(host + '/api/episode/serverPlayer?id=' +
                                  epID,
                                  headers=headers,
                                  patron=r'<a.*?href="([^"]+)"').match)
            elif 'streamtape' in name.lower():
                urls.append(
                    support.match(
                        data,
                        patron=r'<a href="(https://streamtape[^"]+)"').match)
            elif 'beta' in name.lower():
                urls.append(
                    support.match(
                        data,
                        patron=r'<a href="(https://animeworld[^"]+)"').match)
            elif 'server 2' in name.lower():
                dataJson = support.match(host + '/api/episode/info?id=' +
                                         epID + '&alt=0',
                                         headers=headers).data
                json = jsontools.load(dataJson)
                title = support.match(json['grabber'],
                                      patron=r'server2.([^.]+)',
                                      string=True).match
                itemlist.append(
                    item.clone(action="play",
                               title=title,
                               url=json['grabber'],
                               server='directo'))
            elif 'animeworld' in name.lower():
                url = support.match(
                    data,
                    patron=r'href="([^"]+)"\s*id="alternativeDownloadLink"',
                    headers=headers).match
                title = support.match(url,
                                      patron=r'http[s]?://(?:www.)?([^.]+)',
                                      string=True).match
                itemlist.append(
                    item.clone(action="play",
                               title=title,
                               url=url,
                               server='directo'))
    return support.server(item, urls, itemlist)
Exemplo n.º 14
0
def findvideos(item):
    logger.info()
    itemlist = []

    headers = default_headers.copy()
    cookies = {}

    proxies = config.get_setting('proxies', item.channel, default='').replace(' ', '')
    if ';' in proxies: # Si los proxies estan separados por ; orden aleatorio
        proxies = proxies.replace(',', ';').split(';')
        import random
        random.shuffle(proxies)
    else:
        proxies = proxies.split(',')

    proxy_ok = False
    for n, proxy in enumerate(proxies):
        use_proxy = None if proxy == '' else {'http': proxy}

        # 1- /film/... (obtener cookies __cfduid y __cflb)
        resp = httptools.downloadpage(item.referer, headers=headers, only_headers=True, cookies=False, use_proxy=use_proxy, raise_weberror=False)
        if (type(resp.code) == int and (resp.code < 200 or resp.code > 399)) or not resp.sucess:
            logger.info('El proxy %s NO responde adecuadamente. %s' % (proxy, resp.code))
        else:
            proxy_ok = True
            logger.info('El proxy %s parece válido.' % proxy)
            if n > 0: # guardar el proxy que ha funcionado como primero de la lista si no lo está
                del proxies[n]
                new_proxies = proxy + ', ' + ', '.join(proxies)
                config.set_setting('proxies', new_proxies, item.channel)
            break
    if not proxy_ok: 
        platformtools.dialog_notification('Sin respuesta válida', 'Ninguno de los proxies ha funcionado.')
        return itemlist

    cks = httptools.get_cookies_from_headers(resp.headers)
    cookies.update(cks)

    # 2- /video2-prod/s/c (obtener cookie c)
    headers['Referer'] = item.referer
    headers['Cookie'] = '; '.join([ck_name + '=' + ck_value for ck_name, ck_value in cookies.items()])
    resp = httptools.downloadpage('http://tv-vip.com/video2-prod/s/c', headers=headers, cookies=False, use_proxy=use_proxy)
    cks = httptools.get_cookies_from_headers(resp.headers)
    cookies.update(cks)

    # 3- /json/repo/...
    headers['X-Requested-With'] = 'XMLHttpRequest'
    headers['Cookie'] = '; '.join([ck_name + '=' + ck_value for ck_name, ck_value in cookies.items()])
    try:
        data = jsontools.load(httptools.downloadpage(item.url, headers=headers, cookies=False, use_proxy=use_proxy).data)
    except:
        return itemlist
    if 'profiles' not in data:
        return itemlist

    # 4- /vendors/font-awesome/ (por cf_clearance !? required !?)
    url = 'http://tv-vip.com/vendors/font-awesome/fonts/fontawesome-webfont.woff2?v=4.7.0'
    headers['Referer'] = 'http://tv-vip.com/vendors/font-awesome/css/font-awesome.min.css'
    headers['Accept-Encoding'] = 'identity'
    del headers['X-Requested-With']
    resp = httptools.downloadpage(url, headers=headers, only_headers=True, cookies=False, use_proxy=use_proxy)


    for perfil, datos in data['profiles'].items():
        for servidor in datos['servers']:
            if servidor['id'] == 's2': continue # con s2 parece que siempre falla el vídeo

            itemlist.append(Item( channel = item.channel, action = 'play', server = 'directo', title = '', 
                                  videoUri = datos['videoUri'], videoServer = servidor['id'],
                                  referer = item.referer, cookies = headers['Cookie'], use_proxy = use_proxy, 
                                  language = '', quality = datos['videoResolution'], quality_num = datos['height'],
                                  other = datos['sizeHuman'] + ', ' + servidor['id']
                           ))

    # ~ return sorted(itemlist, key=lambda it: it.quality_num) # ordenar por calidad ascendente
    return itemlist
Exemplo n.º 15
0
def get_server_setting(name, server, default=None):
    """
        Retorna el valor de configuracion del parametro solicitado.

        Devuelve el valor del parametro 'name' en la configuracion propia del servidor 'server'.

        Busca en la ruta \addon_data\plugin.video.addon\settings_servers el archivo server_data.json y lee
        el valor del parametro 'name'. Si el archivo server_data.json no existe busca en la carpeta servers el archivo 
        server.json y crea un archivo server_data.json antes de retornar el valor solicitado. Si el parametro 'name'
        tampoco existe en el el archivo server.json se devuelve el parametro default.


        @param name: nombre del parametro
        @type name: str
        @param server: nombre del servidor
        @type server: str
        @param default: valor devuelto en caso de que no exista el parametro name
        @type default: any

        @return: El valor del parametro 'name'
        @rtype: any

        """
    # Creamos la carpeta si no existe
    if not os.path.exists(
            os.path.join(config.get_data_path(), "settings_servers")):
        os.mkdir(os.path.join(config.get_data_path(), "settings_servers"))

    file_settings = os.path.join(config.get_data_path(), "settings_servers",
                                 server + "_data.json")
    dict_settings = {}
    dict_file = {}
    if os.path.exists(file_settings):
        # Obtenemos configuracion guardada de ../settings/channel_data.json
        try:
            dict_file = jsontools.load(open(file_settings, "rb").read())
            if isinstance(dict_file, dict) and 'settings' in dict_file:
                dict_settings = dict_file['settings']
        except EnvironmentError:
            logger.info("ERROR al leer el archivo: %s" % file_settings)

    if not dict_settings or name not in dict_settings:
        # Obtenemos controles del archivo ../servers/server.json
        try:
            list_controls, default_settings = get_server_controls_settings(
                server)
        except:
            default_settings = {}
        if name in default_settings:  # Si el parametro existe en el server.json creamos el server_data.json
            default_settings.update(dict_settings)
            dict_settings = default_settings
            dict_file['settings'] = dict_settings
            # Creamos el archivo ../settings/channel_data.json
            json_data = jsontools.dump(dict_file)
            try:
                open(file_settings, "wb").write(json_data)
            except EnvironmentError:
                logger.info("ERROR al salvar el archivo: %s" % file_settings)

    # Devolvemos el valor del parametro local 'name' si existe, si no se devuelve default
    return dict_settings.get(name, default)
Exemplo n.º 16
0
def items_usuario(item):
    logger.info()
    itemlist = []
    ## Carga estados
    status = jsontools.load(
        httptools.downloadpage(host + '/a/status/all').data)
    ## Fichas usuario
    url = item.url.split("?")[0]
    post = item.url.split("?")[1]
    old_start = scrapertools.get_match(post, 'start=([^&]+)&')
    limit = scrapertools.get_match(post, 'limit=(\d+)')
    start = "%s" % (int(old_start) + int(limit))
    post = post.replace("start=" + old_start, "start=" + start)
    next_page = url + "?" + post
    ## Carga las fichas de usuario
    data = httptools.downloadpage(url, post=post).data
    fichas_usuario = jsontools.load(data)
    for ficha in fichas_usuario:
        try:
            title = ficha['title']['es'].strip()
        except:
            title = ficha['title']['en'].strip()
        try:
            title = title.encode('utf-8')
        except:
            pass
        show = title
        try:
            thumbnail = host + "/thumbs/" + ficha['thumbnail']
        except:
            thumbnail = host + "/thumbs/" + ficha['thumb']
        try:
            url = urlparse.urljoin(
                host,
                '/serie/' + ficha['permalink']) + "###" + ficha['id'] + ";1"
            action = "episodios"
            str = get_status(status, 'shows', ficha['id'])
            if "show_title" in ficha:
                action = "findvideos"
                try:
                    serie = ficha['show_title']['es'].strip()
                except:
                    serie = ficha['show_title']['en'].strip()
                temporada = ficha['season']
                episodio = ficha['episode']
                serie = "[COLOR whitesmoke][B]" + serie + "[/B][/COLOR]"
                if len(episodio) == 1: episodio = '0' + episodio
                try:
                    title = temporada + "x" + episodio + " - " + serie + ": " + title
                except:
                    title = temporada + "x" + episodio + " - " + serie.decode(
                        'iso-8859-1') + ": " + title.decode('iso-8859-1')
                url = urlparse.urljoin(
                    host, '/serie/' + ficha['permalink'] + '/temporada-' +
                    temporada + '/episodio-' +
                    episodio) + "###" + ficha['id'] + ";3"
        except:
            url = urlparse.urljoin(host, '/pelicula/' +
                                   ficha['perma']) + "###" + ficha['id'] + ";2"
            action = "findvideos"
            str = get_status(status, 'movies', ficha['id'])
        if str != "": title += str
        itemlist.append(
            Item(channel=item.channel,
                 action=action,
                 title=title,
                 fulltitle=title,
                 url=url,
                 thumbnail=thumbnail,
                 show=show,
                 folder=True))
    if len(itemlist) == int(limit):
        itemlist.append(
            Item(channel=item.channel,
                 action="items_usuario",
                 title=">> Página siguiente",
                 url=next_page,
                 folder=True))
    return itemlist
Exemplo n.º 17
0
def findvideos(item):
    import time
    support.log(item)
    itemlist = []
    matches, data = support.match(item,
                                  r'class="tab.*?data-name="([0-9]+)">',
                                  headers=headers)
    videoData = ''

    for serverid in matches:
        if not item.number:
            item.number = support.scrapertools.find_single_match(
                item.title, r'(\d+) -')
        block = support.scrapertools.find_multiple_matches(
            data, 'data-id="' + serverid + '">(.*?)<div class="server')
        ID = support.scrapertools.find_single_match(
            str(block), r'<a data-id="([^"]+)" data-base="' +
            (item.number if item.number else '1') + '"')
        support.log('ID= ', serverid)
        if id:
            if serverid == '26':
                matches = support.match(
                    item,
                    r'<a href="([^"]+)"',
                    url='%s/ajax/episode/serverPlayer?id=%s' %
                    (host, item.url.split('/')[-1]))[0]
                for url in matches:
                    videoData += '\n' + url
            else:
                try:
                    dataJson = support.httptools.downloadpage(
                        '%s/ajax/episode/info?id=%s&server=%s&ts=%s' %
                        (host, ID, serverid, int(time.time())),
                        headers=[['x-requested-with', 'XMLHttpRequest']]).data
                    json = jsontools.load(dataJson)
                    support.log(json)
                    if 'keepsetsu' in json['grabber']:
                        matches = support.match(item,
                                                r'<iframe\s*src="([^"]+)"',
                                                url=json['grabber'])[0]
                        for url in matches:
                            videoData += '\n' + url
                    else:
                        videoData += '\n' + json['grabber']

                    if serverid == '28':
                        itemlist.append(
                            support.Item(channel=item.channel,
                                         action="play",
                                         title='diretto',
                                         quality='',
                                         url=json['grabber'],
                                         server='directo',
                                         fulltitle=item.fulltitle,
                                         show=item.show,
                                         contentType=item.contentType,
                                         folder=False))
                except:
                    pass

    return support.server(item, videoData, itemlist)
Exemplo n.º 18
0
def fichas(item):
    logger.info()
    itemlist = []
    textoidiomas = ''
    infoLabels = dict()
    ## Carga estados
    status = jsontools.load(
        httptools.downloadpage(host + '/a/status/all').data)

    if item.title == "Buscar...":
        data = agrupa_datos(
            httptools.downloadpage(item.url, post=item.extra).data)
        s_p = scrapertools.get_match(
            data,
            '<h3 class="section-title">(.*?)<div id="footer-wrapper">').split(
                '<h3 class="section-title">')
        if len(s_p) == 1:
            data = s_p[0]
            if 'Lo sentimos</h3>' in s_p[0]:
                return [
                    Item(
                        channel=item.channel,
                        title="[COLOR gold][B]HDFull:[/B][/COLOR] [COLOR blue]"
                        + texto.replace('%20', ' ') +
                        "[/COLOR] sin resultados")
                ]
        else:
            data = s_p[0] + s_p[1]
    else:
        data = agrupa_datos(httptools.downloadpage(item.url).data)

    data = re.sub(
        r'<div class="span-6[^<]+<div class="item"[^<]+' + \
        '<a href="([^"]+)"[^<]+' + \
        '<img.*?src="([^"]+)".*?' + \
        '<div class="left"(.*?)</div>' + \
        '<div class="right"(.*?)</div>.*?' + \
        'title="([^"]+)".*?' + \
        'onclick="setFavorite.\d, (\d+),',
        r"'url':'\1';'image':'\2';'langs':'\3';'rating':'\4';'title':\5;'id':'\6';",
        data
    )
    patron = "'url':'([^']+)';'image':'([^']+)';'langs':'([^']+)';'rating':'([^']+)';'title':([^;]+);'id':'([^']+)';"
    matches = re.compile(patron, re.DOTALL).findall(data)
    for scrapedurl, scrapedthumbnail, scrapedlangs, scrapedrating, scrapedtitle, scrapedid in matches:
        thumbnail = scrapedthumbnail
        language = ''
        title = scrapedtitle.strip()
        show = title
        contentTitle = scrapedtitle.strip()
        if scrapedlangs != ">":
            textoidiomas, language = extrae_idiomas(scrapedlangs)
            #Todo Quitar el idioma
            title += " ( [COLOR teal][B]" + textoidiomas + "[/B][/COLOR])"
        if scrapedrating != ">":
            valoracion = re.sub(r'><[^>]+>(\d+)<b class="dec">(\d+)</b>',
                                r'\1,\2', scrapedrating)
            infoLabels['rating'] = valoracion
            title += " ([COLOR orange]" + valoracion + "[/COLOR])"
        url = urlparse.urljoin(item.url, scrapedurl)
        if "/serie" in url or "/tags-tv" in url:
            action = "episodios"
            url += "###" + scrapedid + ";1"
            type = "shows"
            contentType = "tvshow"
        else:
            action = "findvideos"
            url += "###" + scrapedid + ";2"
            type = "movies"
            contentType = "movie"
        str = get_status(status, type, scrapedid)
        if str != "": title += str
        if item.title == "Buscar...":
            bus = host[-4:]
            tag_type = scrapertools.find_single_match(url, '%s/([^/]+)/' % bus)
            title += " - [COLOR blue]" + tag_type.capitalize() + "[/COLOR]"
        if "/serie" in url or "/tags-tv" in url:
            itemlist.append(
                Item(channel=item.channel,
                     action=action,
                     title=title,
                     url=url,
                     thumbnail=thumbnail,
                     contentSerieName=show,
                     folder=True,
                     contentType=contentType,
                     language=language,
                     infoLabels=infoLabels))
        else:
            itemlist.append(
                Item(channel=item.channel,
                     action=action,
                     title=title,
                     url=url,
                     fulltitle=contentTitle,
                     thumbnail=thumbnail,
                     folder=True,
                     contentType=contentType,
                     contentTitle=contentTitle,
                     language=language,
                     infoLabels=infoLabels))
    ## Paginación
    next_page_url = scrapertools.find_single_match(
        data, '<a href="([^"]+)">.raquo;</a>')
    if next_page_url != "":
        itemlist.append(
            Item(channel=item.channel,
                 action="fichas",
                 title=">> Página siguiente",
                 url=urlparse.urljoin(item.url, next_page_url),
                 folder=True))
    return itemlist
Exemplo n.º 19
0
def list_all(item):
    logger.info()
    itemlist = []
    matches = []
    if item.list_type in [
            'pais', 'pelicula', 'categorias', 'data', 'buscar', 'novedades'
    ]:
        # Si es de este tipo de página (títulos en html)
        if item.list_type in ['pais', 'pelicula', 'categorias', 'data']:
            # Descargamos la página (contiene el JSON)
            soup = create_soup(item.url)

            # Cargamos el JSON (contiene la info de episodios, imág., url)
            json = jsontools.load(
                soup.find(
                    'script',
                    id='__NEXT_DATA__').text)['props']['pageProps']['data']

            # Criterios de determinación de contentType
            if item.list_type in ['pelicula']:
                contentType = 'movie'
            elif item.list_type in ['categorias', 'pais', 'data']:
                contentType = 'tvshow'

            # Obtenemos el listado de elementos (contiene los títulos)
            container = soup.find('div', class_='container wrapper').find(
                'div', class_='row')
            if item.list_type in ['categorias', 'pais', 'data']:
                items = container.find_all('div', class_='mb-3')
            else:
                items = container.find_all('a', class_='mb-3')

            # Recorremos los títulos
            for i, it in enumerate(items):
                j = json[i]  # No. de elem. en el JSON

                action = 'seasons'
                status = j['estado']
                title, language = set_lang(
                    it.find('span', class_='text-dark').text)
                thumb = 'https://img.comamosramen.com/{}-high.webp'.format(
                    j['img'])
                url = '{}/v/{}'.format(host, j.get('uniqid', ''))

                # Criterios de determinación de contentType, parte 2
                if contentType == 'movie':
                    contentSerieName = None
                    contentTitle = title
                else:
                    contentSerieName = title
                    contentTitle = None

                matches.append([
                    action, contentSerieName, contentTitle, contentType,
                    language, status, title, thumb, url
                ])

        # Si es de este tipo de página (todos los datos en JSON)
        elif item.list_type in ['buscar', 'novedades']:
            # El JSON viene desde el API, la mayoría de info ya vendrá en el JSON
            json = httptools.downloadpage(item.url).json
            for j in json:
                action = 'seasons'
                status = j['estado']
                title, language = set_lang(j['uniqid'].split('-', 1)[1])
                contentSerieName = title
                contentTitle = None
                contentType = ''
                thumb = 'https://img.comamosramen.com/{}-high.webp'.format(
                    j['img'])
                id_ = j['uniqid'].split('-', 1)
                url = '{}/v/{}'.format(
                    host, '{}-{}'.format(id_[0], id_[1].replace('-', '%20')))
                matches.append([
                    action, contentSerieName, contentTitle, contentType,
                    language, status, title, thumb, url
                ])

    else:
        # La sección cambió drásticamente, requiere reconstrucción
        logger.debug("\n" + str(soup.prettify()))
        raise Exception('Item malformado, list_type no válido')
        return

    # Recorremos la lista construída de matches
    for action, contentSerieName, contentTitle, contentType, language, status, title, thumb, url in matches:
        it = Item(action=action,
                  contentType=contentType,
                  channel=item.channel,
                  language=language,
                  title=unify.add_languages(title, language),
                  thumbnail=thumb,
                  url=url)

        # Determinación dinámica de contentType
        if contentSerieName:
            it.contentSerieName = contentSerieName
        elif contentTitle:
            it.contentTitle = contentTitle
        itemlist.append(it)

    return itemlist
Exemplo n.º 20
0
def episodios(item):
    logger.info()
    id = "0"
    itemlist = []
    ## Carga estados
    status = jsontools.load(
        httptools.downloadpage(host + '/a/status/all').data)
    url_targets = item.url
    if "###" in item.url:
        id = item.url.split("###")[1].split(";")[0]
        type = item.url.split("###")[1].split(";")[1]
        item.url = item.url.split("###")[0]
    ## Temporadas
    data = agrupa_datos(httptools.downloadpage(item.url).data)
    if id == "0":
        ## Se saca el id de la serie de la página cuando viene de listado_series
        id = scrapertools.get_match(data,
                                    "<script>var sid = '([^']+)';</script>")
        url_targets = url_targets.replace('###0', '###' + id)
    str = get_status(status, "shows", id)
    if str != "" and account and item.category != "Series" and "XBMC" not in item.title:
        if config.get_videolibrary_support():
            title = " ( [COLOR gray][B]" + item.contentSerieName + "[/B][/COLOR] )"
            itemlist.append(
                Item(channel=item.channel,
                     action="episodios",
                     title=title,
                     url=url_targets,
                     thumbnail=item.thumbnail,
                     contentSerieName=item.contentSerieName,
                     folder=False))
        title = str.replace('green', 'red').replace('Siguiendo', 'Abandonar')
        itemlist.append(
            Item(channel=item.channel,
                 action="set_status",
                 title=title,
                 url=url_targets,
                 thumbnail=item.thumbnail,
                 contentSerieName=item.contentSerieName,
                 folder=True))
    elif account and item.category != "Series" and "XBMC" not in item.title:
        if config.get_videolibrary_support():
            title = " ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )"
            itemlist.append(
                Item(channel=item.channel,
                     action="episodios",
                     title=title,
                     url=url_targets,
                     thumbnail=item.thumbnail,
                     contentSerieName=item.contentSerieName,
                     folder=False))
        title = " ( [COLOR orange][B]Seguir[/B][/COLOR] )"
        itemlist.append(
            Item(channel=item.channel,
                 action="set_status",
                 title=title,
                 url=url_targets,
                 thumbnail=item.thumbnail,
                 contentSerieName=item.contentSerieName,
                 folder=True))
    patron = "<li><a href='([^']+)'>[^<]+</a></li>"
    matches = re.compile(patron, re.DOTALL).findall(data)
    for scrapedurl in matches:
        data = agrupa_datos(httptools.downloadpage(scrapedurl).data)
        sid = scrapertools.get_match(data, "<script>var sid = '(\d+)'")
        ssid = scrapertools.get_match(scrapedurl, "temporada-(\d+)")
        post = "action=season&start=0&limit=0&show=%s&season=%s" % (sid, ssid)
        url = host + "/a/episodes"
        data = httptools.downloadpage(url, post=post).data
        episodes = jsontools.load(data)
        for episode in episodes:
            thumbnail = host + "/thumbs/" + episode['thumbnail']
            language = episode['languages']
            temporada = episode['season']
            episodio = episode['episode']
            if len(episodio) == 1: episodio = '0' + episodio
            if episode['languages'] != "[]":
                idiomas = "( [COLOR teal][B]"
                for idioma in episode['languages']:
                    idiomas += idioma + " "
                idiomas += "[/B][/COLOR])"
                idiomas = idiomas
            else:
                idiomas = ""
            if episode['title']:
                try:
                    title = episode['title']['es'].strip()
                except:
                    title = episode['title']['en'].strip()
            if len(title) == 0:
                title = "Temporada " + temporada + " Episodio " + episodio
            try:
                title = temporada + "x" + episodio + " - " + title.decode(
                    'utf-8') + ' ' + idiomas
            except:
                title = temporada + "x" + episodio + " - " + title.decode(
                    'iso-8859-1') + ' ' + idiomas
            str = get_status(status, 'episodes', episode['id'])
            if str != "": title += str
            try:
                title = title.encode('utf-8')
            except:
                title = title.encode('iso-8859-1')
            url = urlparse.urljoin(
                scrapedurl, 'temporada-' + temporada + '/episodio-' +
                episodio) + "###" + episode['id'] + ";3"
            itemlist.append(
                Item(channel=item.channel,
                     action="findvideos",
                     title=title,
                     url=url,
                     thumbnail=thumbnail,
                     contentSerieName=item.contentSerieName,
                     folder=True,
                     contentType="episode",
                     language=language))
    if config.get_videolibrary_support() and len(itemlist) > 0:
        itemlist.append(
            Item(channel=item.channel,
                 title="Añadir esta serie a la videoteca",
                 url=url_targets,
                 action="add_serie_to_library",
                 extra="episodios",
                 contentSerieName=item.contentSerieName))
        itemlist.append(
            Item(channel=item.channel,
                 title="Descargar todos los episodios de la serie",
                 url=url_targets,
                 action="download_all_episodes",
                 extra="episodios"))
    return itemlist
Exemplo n.º 21
0
def findvideos(item):
    logger.info()
    duplicated = []

    data = get_source(item.url)
    video_info = scrapertools.find_single_match(
        data, "load_player\('(.*?)','(.*?)'\);")
    movie_info = scrapertools.find_single_match(
        item.url, 'http:\/\/ver-peliculas\.org\/peliculas\/(\d+)-(.*?)-\d{'
        '4}-online\.')
    movie_id = movie_info[0]
    movie_name = movie_info[1]
    sub = video_info[1]
    url_base = 'http://ver-peliculas.org/core/api.php?id=%s&slug=%s' % (
        movie_id, movie_name)
    data = httptools.downloadpage(url_base).data
    json_data = jsontools.load(data)
    video_list = json_data['lista']
    itemlist = []
    for videoitem in video_list:
        video_base_url = 'http://ver-peliculas.org/core/videofinal.php'
        if video_list[videoitem] != None:
            video_lang = video_list[videoitem]
            languages = ['latino', 'spanish', 'subtitulos']
            for lang in languages:
                if video_lang[lang] != None:
                    if not isinstance(video_lang[lang], int):
                        video_id = video_lang[lang][0]["video"]
                        post = {"video": video_id, "sub": sub}
                        post = urllib.urlencode(post)
                        data = httptools.downloadpage(video_base_url,
                                                      post=post).data
                        playlist = jsontools.load(data)
                        sources = playlist[['playlist'][0]]
                        server = playlist['server']

                        for video_link in sources:
                            url = video_link['sources']
                            # if 'onevideo' in url:
                            # data = get_source(url)
                            # g_urls = servertools.findvideos(data=data)
                            # url = g_urls[0][1]
                            # server = g_urls[0][0]
                            if url not in duplicated and server != 'drive':
                                lang = lang.capitalize()
                                if lang == 'Spanish':
                                    lang = 'Español'
                                title = '(%s) %s (%s)' % (server, item.title,
                                                          lang)
                                thumbnail = servertools.guess_server_thumbnail(
                                    server)
                                itemlist.append(
                                    item.clone(title=title,
                                               url=url,
                                               server=server,
                                               thumbnail=thumbnail,
                                               action='play'))
                                duplicated.append(url)

    if config.get_videolibrary_support(
    ) and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(channel=item.channel,
                 title=
                 '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 contentTitle=item.contentTitle))

    return itemlist
Exemplo n.º 22
0
def novedades_episodios(item):
    logger.info()
    itemlist = []
    ## Carga estados
    status = jsontools.load(
        httptools.downloadpage(host + '/a/status/all').data)
    ## Episodios
    url = item.url.split("?")[0]
    post = item.url.split("?")[1]
    old_start = scrapertools.get_match(post, 'start=([^&]+)&')
    start = "%s" % (int(old_start) + 24)
    post = post.replace("start=" + old_start, "start=" + start)
    next_page = url + "?" + post
    data = httptools.downloadpage(url, post=post).data
    episodes = jsontools.load(data)
    for episode in episodes:
        thumbnail = host + "/thumbs/" + episode['thumbnail']
        temporada = episode['season']
        episodio = episode['episode']
        if len(episodio) == 1: episodio = '0' + episodio
        if episode['languages'] != "[]":
            idiomas = "( [COLOR teal][B]"
            for idioma in episode['languages']:
                idiomas += idioma + " "
            idiomas += "[/B][/COLOR])"
            idiomas = idiomas
        else:
            idiomas = ""
        try:
            contentSerieName = episode['show']['title']['es'].strip()
        except:
            contentSerieName = episode['show']['title']['en'].strip()
        show = "[COLOR whitesmoke][B]" + contentSerieName + "[/B][/COLOR]"
        if episode['title']:
            try:
                title = episode['title']['es'].strip()
            except:
                title = episode['title']['en'].strip()
        if len(title) == 0:
            title = "Temporada " + temporada + " Episodio " + episodio
        try:
            title = temporada + "x" + episodio + " - " + show.decode(
                'utf-8') + ": " + title.decode('utf-8') + ' ' + idiomas
        except:
            title = temporada + "x" + episodio + " - " + show.decode(
                'iso-8859-1') + ": " + title.decode(
                    'iso-8859-1') + ' ' + idiomas
        str = get_status(status, 'episodes', episode['id'])
        if str != "": title += str
        try:
            title = title.encode('utf-8')
        except:
            title = title.encode('iso-8859-1')
        url = urlparse.urljoin(
            host, '/serie/' + episode['permalink'] + '/temporada-' +
            temporada + '/episodio-' + episodio) + "###" + episode['id'] + ";3"
        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 title=title,
                 contentSerieName=contentSerieName,
                 url=url,
                 thumbnail=thumbnail,
                 folder=True,
                 contentType="episode"))
    if len(itemlist) == 24:
        itemlist.append(
            Item(channel=item.channel,
                 action="novedades_episodios",
                 title=">> Página siguiente",
                 url=next_page,
                 folder=True))
    return itemlist
Exemplo n.º 23
0
def entradasconlistas(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = jsontools.load(data)

    # Si hay alguna lista
    contentSerie = False
    contentList = False
    if data.get('b'):
        for child in data['b']:
            infolabels = {}

            infolabels['originaltitle'] = child['originalTitle']
            infolabels['plot'] = child['description']
            infolabels['year'] = data['year']
            if child.get('tags'):
                infolabels['genre'] = ', '.join(
                    [x.strip() for x in child['tags']])
            infolabels['rating'] = child['rateHuman'].replace(',', '.')
            infolabels['votes'] = child['rateCount']
            if child.get('runtime'):
                try:
                    infolabels['duration'] = int(child['runtime'].replace(
                        " min.", "")) * 60
                except:
                    pass
            if child.get('cast'): infolabels['cast'] = child['cast'].split(",")
            infolabels['director'] = child['director']
            season = child.get('season', '')
            if season.isdigit() and not contentList:
                contentSerie = True
                action = "episodios"
            else:
                contentSerie = False
                contentList = True
                action = "entradasconlistas"

            url = host % "list/%s" % child["id"] + ext
            title = re.sub(r"(\w)-(\w)", '\g<1> \g<2>', child['name'])
            contentTitle = re.sub(r"(\w)-(\w)", '\g<1> \g<2>', child['name'])
            if not title:
                title = re.sub(r"(\w)-(\w)", '\g<1> \g<2>', child['id'])
                contentTitle = re.sub(r"(\w)-(\w)", '\g<1> \g<2>', child['id'])
            title = unicode(title, "utf-8").capitalize().encode("utf-8")
            contentTitle = unicode(contentTitle,
                                   "utf-8").capitalize().encode("utf-8")
            show = ""
            if contentSerie:
                title += " (Serie TV)"
                show = contentTitle
            thumbnail = host % "list/%s/thumbnail_167x250.jpg" % child["id"]
            fanart = host % "list/%s/background_1080.jpg" % child["id"]

            thumbnail += "|User-Agent=%s" % httptools.get_user_agent
            itemlist.append(
                Item(channel=item.channel,
                     action=action,
                     title=title,
                     url=url,
                     thumbnail=thumbnail,
                     fanart=fanart,
                     contentTitle=contentTitle,
                     show=show,
                     infoLabels=infolabels,
                     viewmode="movie_with_plot",
                     text_color=color3))
    else:
        contentList = True

    if contentSerie and itemlist:
        itemlist.sort(key=lambda it: it.infoLabels['season'], reverse=True)

    if itemlist:
        itemlist.insert(
            0,
            Item(channel=item.channel,
                 title="**LISTAS**",
                 action="",
                 text_color=color4,
                 text_bold=True,
                 thumbnail=item.thumbnail,
                 fanart=item.fanart))

    if data.get("a") and itemlist:
        itemlist.append(
            Item(channel=item.channel,
                 title="**VÍDEOS**",
                 action="",
                 text_color=color6,
                 text_bold=True,
                 thumbnail=item.thumbnail,
                 fanart=item.fanart))

    for child in data.get("a", []):
        infolabels = {}

        infolabels['originaltitle'] = child['originalTitle']
        infolabels['plot'] = child['description']
        infolabels['year'] = data['year']
        if child.get('tags'):
            infolabels['genre'] = ', '.join([x.strip() for x in child['tags']])
        infolabels['rating'] = child['rateHuman'].replace(',', '.')
        infolabels['votes'] = child['rateCount']
        if child.get('runtime'):
            try:
                infolabels['duration'] = int(child['runtime'].replace(
                    " min.", "")) * 60
            except:
                pass
        if child.get('cast'): infolabels['cast'] = child['cast'].split(",")
        infolabels['director'] = child['director']
        url = host % "movie/%s/movie.js" % child["id"]
        # Fanart
        fanart = host % "movie/%s/background_1080.jpg" % child["id"]
        if child.get("episode"):
            thumbnail = host % "movie/%s/thumbnail.jpg" % child["id"]
        else:
            thumbnail = host % "movie/%s/poster_167x250.jpg" % child["id"]

        if child['height'] < 720:
            quality = "[B]  [SD][/B]"
        elif child['height'] < 1080:
            quality = "[B]  [720p][/B]"
        elif child['height'] >= 1080:
            quality = "[B]  [1080p][/B]"
        contentTitle = unicodedata.normalize('NFD', unicode(child['name'], 'utf-8')).encode('ASCII', 'ignore') \
            .decode("utf-8")
        if not child['name']:
            title = child['id'].rsplit(".", 1)[0]
        else:
            title = child['name']
        if child['year']:
            title += " (" + child['year'] + ")"
        title += quality

        video_urls = []
        for k, v in child.get("video", {}).items():
            for vid in v:
                video_urls.append([
                    "http://%s.pelisipad.com/s/transcoder/%s" %
                    (vid["server"], vid["url"]) + "?%s", vid["height"]
                ])
        thumbnail += "|User-Agent=%s" % httptools.get_user_agent
        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 title=title,
                 url=url,
                 video_urls=video_urls,
                 thumbnail=thumbnail,
                 fanart=fanart,
                 contentTitle=contentTitle,
                 infoLabels=infolabels,
                 contentTitle=contentTitle,
                 viewmode="movie_with_plot",
                 text_color=color3))

    # Se añade item para añadir la lista de vídeos a la videoteca
    if data.get(
            'a'
    ) and itemlist and contentList and config.get_videolibrary_support():
        itemlist.append(
            Item(channel=item.channel,
                 text_color=color5,
                 title="Añadir esta lista a la videoteca",
                 url=item.url,
                 action="listas"))
    elif contentSerie and config.get_videolibrary_support():
        itemlist.append(
            Item(channel=item.channel,
                 title="Añadir esta serie a la videoteca",
                 text_color=color5,
                 url=item.url,
                 action="add_serie_to_library",
                 show=item.show,
                 contentTitle=item.contentTitle,
                 extra="episodios"))

    return itemlist
Exemplo n.º 24
0
def findvideos(item):
    logger.info()
    itemlist = []
    it1 = []
    it2 = []

    ## Carga estados
    status = jsontools.load(
        httptools.downloadpage(host + '/a/status/all').data)
    url_targets = item.url

    ## Vídeos
    id = ""
    type = ""
    if "###" in item.url:
        id = item.url.split("###")[1].split(";")[0]
        type = item.url.split("###")[1].split(";")[1]
        item.url = item.url.split("###")[0]

    if type == "2" and account and item.category != "Cine":
        title = " ( [COLOR orange][B]Agregar a Favoritos[/B][/COLOR] )"
        if "Favorito" in item.title:
            title = " ( [COLOR red][B]Quitar de Favoritos[/B][/COLOR] )"
        if config.get_videolibrary_support():
            title_label = " ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )"
            it1.append(
                Item(channel=item.channel,
                     action="findvideos",
                     title=title_label,
                     fulltitle=title_label,
                     url=url_targets,
                     thumbnail=item.thumbnail,
                     show=item.show,
                     folder=False))
            title_label = " ( [COLOR green][B]Tráiler[/B][/COLOR] )"
            it1.append(
                Item(channel="trailertools",
                     action="buscartrailer",
                     title=title_label,
                     contentTitle=item.show,
                     url=item.url,
                     thumbnail=item.thumbnail,
                     show=item.show))
        it1.append(
            Item(channel=item.channel,
                 action="set_status",
                 title=title,
                 fulltitle=title,
                 url=url_targets,
                 thumbnail=item.thumbnail,
                 show=item.show,
                 language=item.language,
                 folder=True))

    data_js = httptools.downloadpage(
        "%s/templates/hdfull/js/jquery.hdfull.view.min.js" % host).data
    key = scrapertools.find_single_match(
        data_js, 'JSON.parse\(atob.*?substrings\((.*?)\)')

    data_js = httptools.downloadpage("%s/js/providers.js" % host).data
    try:
        from lib import alfaresolver
        provs = alfaresolver.hdfull_providers(data_js)
        if provs == '': return []
    except:
        return []

    data = agrupa_datos(httptools.downloadpage(item.url).data)
    data_obf = scrapertools.find_single_match(data, "var ad\s*=\s*'([^']+)'")
    data_decrypt = jsontools.load(
        obfs(base64.b64decode(data_obf), 126 - int(key)))

    infolabels = {}
    year = scrapertools.find_single_match(
        data, '<span>A&ntilde;o:\s*</span>.*?(\d{4})')
    infolabels["year"] = year
    matches = []
    for match in data_decrypt:
        if match['provider'] in provs:
            try:
                embed = provs[match['provider']][0]
                url = eval(provs[match['provider']][1].replace(
                    '_code_', "match['code']"))
                matches.append([match['lang'], match['quality'], url, embed])
            except:
                pass

    for idioma, calidad, url, embed in matches:
        if embed == 'd':
            option = "Descargar"
            option1 = 2
        else:
            option = "Ver"
            option1 = 1

        calidad = unicode(calidad, "utf8").upper().encode("utf8")
        title = option + ": %s (" + calidad + ")" + " (" + idioma + ")"
        thumbnail = item.thumbnail
        plot = item.title + "\n\n" + scrapertools.find_single_match(
            data, '<meta property="og:description" content="([^"]+)"')
        plot = scrapertools.htmlclean(plot)
        fanart = scrapertools.find_single_match(
            data, '<div style="background-image.url. ([^\s]+)')
        if account:
            url += "###" + id + ";" + type
        it2.append(
            Item(channel=item.channel,
                 action="play",
                 title=title,
                 url=url,
                 thumbnail=thumbnail,
                 plot=plot,
                 fanart=fanart,
                 show=item.show,
                 folder=True,
                 infoLabels=infolabels,
                 language=idioma,
                 contentTitle=item.contentTitle,
                 contentType=item.contentType,
                 tipo=option,
                 tipo1=option1,
                 idioma=idioma))

    it2 = servertools.get_servers_itemlist(
        it2, lambda i: i.title % i.server.capitalize())
    it2.sort(key=lambda it: (it.tipo1, it.idioma, it.server))
    for item in it2:
        if "###" not in item.url:
            item.url += "###" + id + ";" + type
    itemlist.extend(it1)
    itemlist.extend(it2)

    ## 2 = película
    if type == "2" and item.category != "Cine":
        if config.get_videolibrary_support():
            itemlist.append(
                Item(channel=item.channel,
                     title="Añadir a la videoteca",
                     text_color="green",
                     action="add_pelicula_to_library",
                     url=url_targets,
                     thumbnail=item.thumbnail,
                     fulltitle=item.contentTitle))
    return itemlist
Exemplo n.º 25
0
def nuevos_cap(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = jsontools.load(data)
    logger.debug(data)
    capitulos = []
    if "Nuevas" in item.title:
        for child in data["b"]:
            capitulos.append([child["season"], child])
    else:
        for child in data["a"]:
            capitulos.append(['', child])

    for season, child in capitulos:
        infoLabels = item.infoLabels
        if child.get('runtime'):
            try:
                infoLabels['duration'] = int(child['runtime'].replace(
                    " min.", "")) * 60
            except:
                pass
        if not season:
            season = scrapertools.find_single_match(child['name'], '(\d+)x\d+')
        try:
            infoLabels['season'] = int(season)
        except:
            infoLabels['season'] = 0
        if "Nuevos" in item.title:
            if not child['episode']:
                episode = scrapertools.find_single_match(
                    child['name'], '\d+x(\d+)')
                if not episode:
                    episode = "0"
                infoLabels['episode'] = int(episode)
            elif "al" in child['episode']:
                episode = "0"
                infoLabels['episode'] = int(episode)
            else:
                infoLabels['episode'] = int(child['episode'])
            infoLabels['mediatype'] = "episode"

        if "Nuevos" in item.title:
            url = host % "movie/%s/movie.js" % child["id"]
            action = "findvideos"
            thumbnail = host % "movie/%s/thumbnail_200x112.jpg" % child["id"]
            fanart = item.fanart
        else:
            url = host % "list/%s" % child["season"] + ext
            action = "episodios"
            thumbnail = host % "list/%s/thumbnail_167x250.jpg" % child["id"]
            fanart = host % "list/%s/background_1080.jpg" % child["id"]

        video_urls = []
        for k, v in child.get("video", {}).items():
            for vid in v:
                video_urls.append([
                    "http://%s.pelisipad.com/s/transcoder/%s" %
                    (vid["server"], vid["url"]) + "?%s", vid["height"]
                ])

        if "Nuevos" in item.title:
            title = contentTitle = child['name'].rsplit(
                " ", 1)[0] + " - " + child['name'].rsplit(" ", 1)[1]
        else:
            title = contentTitle = child['name']

        thumbnail += "|User-Agent=%s" % httptools.get_user_agent
        itemlist.append(
            Item(channel=item.channel,
                 action=action,
                 title=title,
                 url=url,
                 thumbnail=thumbnail,
                 fanart=fanart,
                 contentTitle=contentTitle,
                 viewmode="movie",
                 show=item.contentTitle,
                 infoLabels=infoLabels,
                 video_urls=video_urls,
                 extra="nuevos",
                 text_color=color3))

    return itemlist
Exemplo n.º 26
0
def findvideos(item):
    logger.info()

    itemlist = []
    data = get_source(item.url)
    data = scrapertools.find_single_match(data, '<div id="marco-post">.*?<div id="sidebar">')
    data = scrapertools.unescape(data)
    data = scrapertools.decodeHtmlentities(data)
    
    options_regex = '<a href="#tab.*?">.*?<b>(.*?)</b>'
    option_matches = re.compile(options_regex, re.DOTALL).findall(data)

    video_regex = '<iframe.*?src="(.*?)".*?</iframe>'
    video_matches = re.compile(video_regex, re.DOTALL).findall(data)

    # for option, scrapedurl in matches:
    for option, scrapedurl in map(None, option_matches, video_matches):
        if scrapedurl is None:
            continue
        
        scrapedurl = scrapedurl.replace('"','').replace('&#038;','&')

        try:
            data_video = get_source(scrapedurl)
        except Exception as e:
            logger.info('Error en url: ' + scrapedurl)
            continue
        
        # logger.info(data_video)

        # Este sitio pone multiples páginas intermedias, cada una con sus reglas.
        source_headers = dict()
        source_headers["Content-Type"] = "application/x-www-form-urlencoded; charset=UTF-8"
        source_headers["X-Requested-With"] = "XMLHttpRequest"
        if scrapedurl.find("https://repro") != 0:
            logger.info("Caso 0: url externa")
            url = scrapedurl
            itemlist.append(Item(channel=item.channel, title=option, url=url, action='play', language=IDIOMA))
        elif scrapedurl.find("pi76823.php") > 0:
            logger.info("Caso 1")
            source_data = get_source(scrapedurl)
            source_regex = 'post\( "(.*?)", { acc: "(.*?)", id: \'(.*?)\', tk: \'(.*?)\' }'
            source_matches = re.compile(source_regex, re.DOTALL).findall(source_data)
            for source_page, source_acc, source_id, source_tk in source_matches:
                source_url = scrapedurl[0:scrapedurl.find("pi76823.php")] + source_page
                source_result = httptools.downloadpage(source_url, post='acc=' + source_acc + '&id=' + 
                                                       source_id + '&tk=' + source_tk, headers=source_headers)
                if source_result.code == 200:
                    source_json = jsontools.load(source_result.data)
                    itemlist.append(Item(channel=item.channel, title=option, url=source_json['urlremoto'], action='play', language=IDIOMA))
        elif scrapedurl.find("pi7.php") > 0:
            logger.info("Caso 2")
            source_data = get_source(scrapedurl)
            source_regex = 'post\( "(.*?)", { acc: "(.*?)", id: \'(.*?)\', tk: \'(.*?)\' }'
            source_matches = re.compile(source_regex, re.DOTALL).findall(source_data)
            for source_page, source_acc, source_id, source_tk in source_matches:
                source_url = scrapedurl[0:scrapedurl.find("pi7.php")] + source_page
                source_result = httptools.downloadpage(source_url, post='acc=' + source_acc + '&id=' + 
                                                       source_id + '&tk=' + source_tk, headers=source_headers)
                if source_result.code == 200:
                    source_json = jsontools.load(source_result.data)
                    itemlist.append(Item(channel=item.channel, title=option, url=source_json['urlremoto'], action='play', language=IDIOMA))
        elif scrapedurl.find("reproducir120.php") > 0:
            logger.info("Caso 3")
            source_data = get_source(scrapedurl)

            videoidn = scrapertools.find_single_match(source_data, 'var videoidn = \'(.*?)\';')
            tokensn = scrapertools.find_single_match(source_data, 'var tokensn = \'(.*?)\';')
            
            source_regex = 'post\( "(.*?)", { acc: "(.*?)"'
            source_matches = re.compile(source_regex, re.DOTALL).findall(source_data)
            for source_page, source_acc in source_matches:
                source_url = scrapedurl[0:scrapedurl.find("reproducir120.php")] + source_page
                source_result = httptools.downloadpage(source_url, post='acc=' + source_acc + '&id=' + 
                                                       videoidn + '&tk=' + tokensn, headers=source_headers)
                if source_result.code == 200:
                    source_json = jsontools.load(source_result.data)
                    urlremoto_regex = "file:'(.*?)'"
                    urlremoto_matches = re.compile(urlremoto_regex, re.DOTALL).findall(source_json['urlremoto'])
                    if len(urlremoto_matches) == 1:
                        itemlist.append(Item(channel=item.channel, title=option, url=urlremoto_matches[0], action='play', language=IDIOMA))
        elif scrapedurl.find("reproducir14.php") > 0:
            logger.info("Caso 4")
            source_data = get_source(scrapedurl)
            
            source_regex = '<div id="player-contenido" vid="(.*?)" name="(.*?)"'
            source_matches = re.compile(source_regex, re.DOTALL).findall(source_data)
            videoidn = source_matches[0][0]
            tokensn = source_matches[0][1]
            
            source_regex = 'post\( "(.*?)", { acc: "(.*?)"'
            source_matches = re.compile(source_regex, re.DOTALL).findall(source_data)
            for source_page, source_acc in source_matches:
                source_url = scrapedurl[0:scrapedurl.find("reproducir14.php")] + source_page
                source_result = httptools.downloadpage(source_url, post='acc=' + source_acc + '&id=' + 
                                                       videoidn + '&tk=' + tokensn, headers=source_headers)
                if source_result.code == 200:
                    source_json = jsontools.load(source_result.data)
                    itemlist.append(Item(channel=item.channel, title=option, url=source_json['urlremoto'], action='play', language=IDIOMA))
        else:
            logger.info("Caso nuevo")      

    itemlist = servertools.get_servers_itemlist(itemlist)

    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay
    autoplay.start(itemlist, item)

    if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
                 action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))

    return itemlist
Exemplo n.º 27
0
def episodios(item):
    logger.info()
    itemlist = []
    item.category = categoria

    #logger.debug(item)

    if item.from_title:
        item.title = item.from_title
    item.extra2 = 'xyz'
    del item.extra2

    #Limpiamos num. Temporada y Episodio que ha podido quedar por Novedades
    season_display = 0
    if item.contentSeason:
        if item.season_colapse:  #Si viene del menú de Temporadas...
            season_display = item.contentSeason  #... salvamos el num de sesión a pintar
            item.from_num_season_colapse = season_display
            del item.season_colapse
            item.contentType = "tvshow"
            if item.from_title_season_colapse:
                item.title = item.from_title_season_colapse
                del item.from_title_season_colapse
                if item.infoLabels['title']:
                    del item.infoLabels['title']
        del item.infoLabels['season']
    if item.contentEpisodeNumber:
        del item.infoLabels['episode']
    if season_display == 0 and item.from_num_season_colapse:
        season_display = item.from_num_season_colapse

    # Obtener la información actualizada de la Serie.  TMDB es imprescindible para Videoteca
    if not item.infoLabels['tmdb_id']:
        tmdb.set_infoLabels(item, True)

    # Descarga la página
    data = ''  #Inserto en num de página en la url
    try:
        data = httptools.downloadpage(item.url, timeout=timeout).data
        data = unicode(data, "utf-8", errors="replace").encode("utf-8")
    except:  #Algún error de proceso, salimos
        pass

    if not data:
        logger.error(
            "ERROR 01: EPISODIOS: La Web no responde o la URL es erronea" +
            item.url)
        itemlist.append(
            item.clone(
                action='',
                title=item.channel.capitalize() +
                ': ERROR 01: EPISODIOS:.  La Web no responde o la URL es erronea. Si la Web está activa, reportar el error con el log'
            ))
        return itemlist

    #Buscamos los episodios
    matches = jsontools.load(data)
    if not matches:  #error
        item = generictools.web_intervenida(
            item, data)  #Verificamos que no haya sido clausurada
        if item.intervencion:  #Sí ha sido clausurada judicialmente
            item, itemlist = generictools.post_tmdb_episodios(
                item, itemlist)  #Llamamos al método para el pintado del error
            return itemlist  #Salimos

        logger.error(
            "ERROR 02: EPISODIOS: Ha cambiado la estructura de la Web: " +
            data)
        itemlist.append(
            item.clone(
                action='',
                title=item.channel.capitalize() +
                ': ERROR 02: EPISODIOS: Ha cambiado la estructura de la Web.  Reportar el error con el log'
            ))
        return itemlist  #si no hay más datos, algo no funciona, pintamos lo que tenemos

    #logger.debug(matches)

    # Recorremos todos los episodios generando un Item local por cada uno en Itemlist
    for temporada in matches.get("temporadas", []):
        if season_display > 0 and temporada.get(
                "numerotemporada",
                0) != season_display:  #si no es nuestra temp., pasamos
            continue

        #Si hay más de una temporada, solo las enumeramos
        if len(matches.get("temporadas", [])) > 1 and item.season_colapse:
            item_local = item.clone()  #creo una copia de Item
            item_local.action = "findvideos"  #y lo preparo para la reproducción
            item_local.contentType = "episode"
            item_local.extra = "episodios"

            item_local.contentSeason = temporada.get(
                "numerotemporada", 1)  #Guardo el num. de temporada
            item_local.contentEpisodeNumber = 1  #relleno el num. de episodio por compatibilidad
            itemlist.append(item_local.clone())  #lo pinto
            continue  #Paso a la siguiente temporada

        #Aquí tratamos todos los episodios de una temporada
        season = temporada.get("numerotemporada", 1)
        for episodio in temporada.get("capituls", []):

            item_local = item.clone()  #creo una copia de Item
            item_local.action = "findvideos"  #y lo preparo para la reproducción
            item_local.contentType = "episode"
            item_local.extra = "episodios"
            if item_local.library_playcounts:
                del item_local.library_playcounts
            if item_local.library_urls:
                del item_local.library_urls
            if item_local.path:
                del item_local.path
            if item_local.update_last:
                del item_local.update_last
            if item_local.update_next:
                del item_local.update_next
            if item_local.channel_host:
                del item_local.channel_host
            if item_local.active:
                del item_local.active
            if item_local.contentTitle:
                del item_local.infoLabels['title']
            if item_local.season_colapse:
                del item_local.season_colapse
            if item_local.tmdb_stat:
                del item_local.tmdb_stat

            item_local.title = ''
            item_local.context = "['buscar_trailer']"
            title = episodio.get("nomcapitul", "")  #título del episodio
            info_epi = episodio.get("infocapitul",
                                    "")  #información adicional del episodio
            item_local.language = []
            item_local.url = []

            if episodio.get("links",
                            {}).get("magnet"):  #buscamos los magnets activos
                url = episodio.get("links",
                                   {}).get("magnet")  #salvamos el magnet
                quality = episodio.get("links", {}).get(
                    "calitat", "")  #salvamos la calidad del magnet
                item_local.url += [(url, quality)
                                   ]  #guardamos todo como url para findvideos
                item_local.quality = quality.strip(
                )  #agregamos a la calidad del título

            if not item_local.language:
                item_local.language += ['CAST']  #Castellano por defecto

            #Buscamos la Temporada y el Episodio
            try:
                item_local.contentSeason = int(
                    season)  #Copiamos el num. de Temporada
            except:
                item_local.contentSeason = 1  #Si hay error, lo dejamos en 1
            try:
                item_local.contentEpisodeNumber = int(
                    episodio.get("numerocapitul",
                                 1))  #Copiamos el num. de Episodio
            except:
                item_local.contentEpisodeNumber = 1  #Si hay error, lo dejamos en 1
            if 'miniserie' in title.lower(
            ):  #Si es una Miniserie, lo ajustamos
                if not item_local.contentSeason:
                    item_local.contentSeason = 1
                title = title.replace('miniserie', '').replace('MiniSerie', '')

            #Si son episodios múltiples, lo extraemos
            patron1 = '\d+[x|X]\d{1,2}.?(?:y|Y|al|Al)?.?(?:(?:\d+[x|X])?(\d{1,2}))?'
            epi_rango = scrapertools.find_single_match(info_epi, patron1)
            if epi_rango:
                item_local.infoLabels['episodio_titulo'] = 'al %s ' % epi_rango
                item_local.title = '%sx%s al %s -' % (
                    str(item_local.contentSeason),
                    str(item_local.contentEpisodeNumber).zfill(2),
                    str(epi_rango).zfill(2))
            else:
                item_local.title = '%sx%s -' % (
                    str(item_local.contentSeason),
                    str(item_local.contentEpisodeNumber).zfill(2))
                item.infoLabels['episodio_titulo'] = '%s' % title

            itemlist.append(item_local.clone())

            #logger.debug(item_local)

    if len(itemlist) > 1:
        itemlist = sorted(itemlist,
                          key=lambda it:
                          (int(it.contentSeason), int(it.contentEpisodeNumber)
                           ))  #clasificamos

    if item.season_colapse and not item.add_videolibrary:  #Si viene de listado, mostramos solo Temporadas
        item, itemlist = generictools.post_tmdb_seasons(item, itemlist)

    if not item.season_colapse:  #Si no es pantalla de Temporadas, pintamos todo
        # Pasada por TMDB y clasificación de lista por temporada y episodio
        tmdb.set_infoLabels(itemlist, True)

        #Llamamos al método para el maquillaje de los títulos obtenidos desde TMDB
        item, itemlist = generictools.post_tmdb_episodios(item, itemlist)

    #logger.debug(item)

    return itemlist
Exemplo n.º 28
0
def get_server_parameters(server):
    """
    Obtiene los datos del servidor
    @param server: Nombre del servidor
    @type server: str

    @return: datos del servidor
    @rtype: dict
    """
    # logger.info("server %s" % server)
    global dict_servers_parameters
    server = server.split('.')[0]
    if not server:
        return {}

    if server not in dict_servers_parameters:
        try:
            # Servers
            if os.path.isfile(
                    os.path.join(config.get_runtime_path(), "servers",
                                 server + ".json")):
                path = os.path.join(config.get_runtime_path(), "servers",
                                    server + ".json")

            # Debriders
            elif os.path.isfile(
                    os.path.join(config.get_runtime_path(), "servers",
                                 "debriders", server + ".json")):
                path = os.path.join(config.get_runtime_path(), "servers",
                                    "debriders", server + ".json")

            import filetools
            data = filetools.read(path)
            dict_server = jsontools.load(data)

            # Imagenes: se admiten url y archivos locales dentro de "resources/images"
            if dict_server.get(
                    "thumbnail") and "://" not in dict_server["thumbnail"]:
                dict_server["thumbnail"] = os.path.join(
                    config.get_runtime_path(), "resources", "media", "servers",
                    dict_server["thumbnail"])
            for k in ['premium', 'id']:
                dict_server[k] = dict_server.get(k, list())

                if type(dict_server[k]) == str:
                    dict_server[k] = [dict_server[k]]

                    # if not dict_server.has_key(k) or dict_server[k] == "":
                    #     dict_server[k] = []
                    # elif type(dict_server[k]) == dict:
                    #     dict_server[k] = dict_server[k]["value"]
                    # if type(dict_server[k]) == str:
                    #     dict_server[k] = [dict_server[k]]

            if "find_videos" in dict_server:
                dict_server['find_videos']["patterns"] = dict_server[
                    'find_videos'].get("patterns", list())
                dict_server['find_videos']["ignore_urls"] = dict_server[
                    'find_videos'].get("ignore_urls", list())

            if "settings" in dict_server:
                dict_server['has_settings'] = True
            else:
                dict_server['has_settings'] = False

            dict_servers_parameters[server] = dict_server

        except:
            mensaje = "Error al cargar el servidor: %s\n" % server
            import traceback
            logger.error(mensaje + traceback.format_exc())
            return {}

    return dict_servers_parameters[server]
Exemplo n.º 29
0
        try:
            req = urllib2.Request(url,
                                  data=jsontools.dump(params),
                                  headers=DEFAULT_HEADERS)
            response = urllib2.urlopen(req)
            html = response.read()
            response.close()

        except Exception, ex:
            message = "An exception of type %s occured. Arguments:\n%s" % (
                type(ex).__name__, repr(ex.args))
            logger.error("error en: %s" % message)

        else:
            dict_html = jsontools.load(html)
            # logger.debug("dict_html %s" % dict_html)

            if "token" in dict_html:
                token = dict_html["token"]
                DEFAULT_HEADERS["Authorization"] = "Bearer " + token

                TOKEN = config.set_setting("tvdb_token", token)

    @classmethod
    def __refresh_token(cls):
        # logger.info()
        global TOKEN
        is_success = False

        url = HOST + "/refresh_token"
Exemplo n.º 30
0
def jayhap_search(item):
    logger.info()
    itemlist = []

    if item.extra != "jayhap":
        item.contentTitle += " trailer"
    texto = item.contentTitle
    post = urllib.urlencode({
        'q': texto,
        'yt': 'true',
        'vm': 'true',
        'dm': 'true',
        'v': 'all',
        'l': 'all',
        'd': 'all'
    })

    # Comprueba si es una búsqueda de cero o viene de la opción Siguiente
    if item.page != "":
        post += urllib.urlencode(item.page)
        data = scrapertools.downloadpage(
            "https://www.jayhap.com/load_more.php", post=post)
    else:
        data = scrapertools.downloadpage(
            "https://www.jayhap.com/get_results.php", post=post)
    data = jsontools.load(data)
    for video in data['videos']:
        url = video['url']
        server = video['source'].lower()
        duration = " (" + video['duration'] + ")"
        title = video['title'].decode(
            "utf-8") + duration + "  [" + server.capitalize() + "]"
        thumbnail = video['thumbnail']
        if item.contextual:
            title = "[COLOR white]%s[/COLOR]" % title
        itemlist.append(
            item.clone(action="play",
                       server=server,
                       title=title,
                       url=url,
                       thumbnail=thumbnail,
                       text_color="white"))

    if not itemlist:
        itemlist.append(
            item.clone(title="La búsqueda no ha dado resultados (%s)" %
                       item.contentTitle,
                       action="",
                       thumbnail="",
                       text_color=""))
    else:
        tokens = data['tokens']
        tokens['yt_token'] = tokens.pop('youtube')
        tokens['vm_token'] = tokens.pop('vimeo')
        tokens['dm_token'] = tokens.pop('dailymotion')
        itemlist.append(
            item.clone(title=">> Siguiente",
                       page=tokens,
                       action="jayhap_search",
                       extra="jayhap",
                       thumbnail="",
                       text_color=""))

    if keyboard:
        if item.contextual:
            title = "[COLOR green]%s[/COLOR]"
        else:
            title = "%s"
        itemlist.append(
            item.clone(title=title % "Búsqueda Manual en Jayhap",
                       action="manual_search",
                       text_color="green",
                       thumbnail="",
                       extra="jayhap"))

    return itemlist