Example #1
0
def temporadas(item):
    logger.info()
    itemlist = []
    data = get_source(item.url)
    url_base = item.url
    patron = '<li class=item\d+><a href=#>(.*?) <\/a>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    temp = 1
    if matches:
        for scrapedtitle in matches:
            url = url_base
            tempo = re.findall(r'\d+', scrapedtitle)
            # if tempo:
            #    title = 'Temporada' + ' ' + tempo[0]
            # else:
            title = scrapedtitle
            thumbnail = item.thumbnail
            plot = item.plot
            fanart = scrapertools.find_single_match(
                data, '<img src="([^"]+)"/>.*?</a>')
            itemlist.append(
                Item(channel=item.channel,
                     action="episodiosxtemp",
                     title=title,
                     fulltitle=item.title,
                     url=url,
                     thumbnail=thumbnail,
                     plot=plot,
                     fanart=fanart,
                     temp=str(temp),
                     contentSerieName=item.contentSerieName,
                     language=item.language,
                     quality=item.quality,
                     context=item.context))
            temp = temp + 1

        if config.get_videolibrary_support() and len(itemlist) > 0:
            itemlist.append(
                Item(channel=item.channel,
                     title=
                     '[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
                     url=item.url,
                     action="add_serie_to_library",
                     extra="episodios",
                     contentSerieName=item.contentSerieName,
                     extra1=item.extra1,
                     temp=str(temp)))
        return itemlist
    else:
        itemlist = episodiosxtemp(item)
        if config.get_videolibrary_support() and len(itemlist) > 0:
            itemlist.append(
                Item(channel=item.channel,
                     title=
                     '[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
                     url=item.url,
                     action="add_serie_to_library",
                     extra="episodios",
                     contentSerieName=item.contentSerieName,
                     extra1=item.extra1,
                     temp=str(temp)))
        return itemlist
Example #2
0
def series(item):
    logger.info()
    itemlist = []

    data = scrapertools.anti_cloudflare(item.url, host=host, headers=headers)
    data = jsontools.load(data)
    head = header_string + get_cookie_value()
    exception = ["top-series", "nuevos-capitulos"]
    for child in data["sortedPlaylistChilds"]:
        if child["id"] not in exception:
            infolabels = {}

            infolabels['plot'] = child['description']
            infolabels['year'] = child['year']
            if child['tags']:
                infolabels['genre'] = ', '.join(
                    [x.strip() for x in child['tags']])
            infolabels['rating'] = child['rate'].replace(',', '.')
            infolabels['votes'] = child['rateCount']
            if child['cast']: infolabels['cast'] = child['cast'].split(",")
            infolabels['director'] = child['director']
            infolabels['mediatype'] = "episode"
            if child['seasonNumber']:
                infolabels['season'] = child['seasonNumber']
            url = "http://tv-vip.com/json/playlist/%s/index.json" % child["id"]
            # Fanart
            if child['hashBackground']:
                fanart = "http://tv-vip.com/json/playlist/%s/background.jpg" % child[
                    "id"]
            else:
                fanart = "http://tv-vip.com/json/playlist/%s/thumbnail.jpg" % child[
                    "id"]
            # Thumbnail
            if child['hasPoster']:
                thumbnail = "http://tv-vip.com/json/playlist/%s/poster.jpg" % child[
                    "id"]
            else:
                thumbnail = fanart
            thumbnail += head
            fanart += head

            if item.contentTitle == "Series":
                if child['name'] != "":
                    fulltitle = unicodedata.normalize('NFD', unicode(child['name'].split(" Temporada")[0], 'utf-8')) \
                        .encode('ASCII', 'ignore').decode("utf-8")
                    fulltitle = fulltitle.replace('-', '')
                    title = child['name'] + " (" + child['year'] + ")"
                else:
                    title = fulltitle = child['id'].capitalize()
                if "Temporada" not in title:
                    title += "     [Temporadas: [COLOR gold]" + str(
                        child['numberOfSeasons']) + "[/COLOR]]"
                elif item.title == "Más Vistas":
                    title = title.replace("- Temporada", "--- Temporada")
            else:
                if data['name'] != "":
                    fulltitle = unicodedata.normalize('NFD', unicode(data['name'], 'utf-8')).encode('ASCII', 'ignore') \
                        .decode("utf-8")
                    if child['seasonNumber']:
                        title = data['name'] + " --- Temporada " + child['seasonNumber'] + \
                                "  [COLOR gold](" + str(child['number']) + ")[/COLOR]"
                    else:
                        title = child['name'] + "  [COLOR gold](" + str(
                            child['number']) + ")[/COLOR]"
                else:
                    fulltitle = unicodedata.normalize('NFD', unicode(data['id'], 'utf-8')).encode('ASCII', 'ignore') \
                        .decode("utf-8")
                    if child['seasonNumber']:
                        title = data['id'].capitalize() + " --- Temporada " + child['seasonNumber'] + \
                                "  [COLOR gold](" + str(child['number']) + ")[/COLOR]"
                    else:
                        title = data['id'].capitalize(
                        ) + "  [COLOR gold](" + str(
                            child['number']) + ")[/COLOR]"
            if not child['playListChilds']:
                action = "episodios"
            else:
                action = "series"
            itemlist.append(
                Item(channel=item.channel,
                     action=action,
                     title=bbcode_kodi2html(title),
                     url=url,
                     server="",
                     thumbnail=thumbnail,
                     fanart=fanart,
                     fulltitle=fulltitle,
                     infoLabels=infolabels,
                     contentTitle=fulltitle,
                     context="25",
                     viewmode="movie_with_plot",
                     folder=True))
            if len(itemlist) == len(data["sortedPlaylistChilds"]
                                    ) and item.contentTitle != "Series":

                itemlist.sort(key=lambda item: item.title, reverse=True)
                if config.get_videolibrary_support():
                    itemlist.append(
                        Item(channel=item.channel,
                             title="Añadir esta serie a la videoteca",
                             url=item.url,
                             action="add_serie_to_library",
                             show=data['name'],
                             text_color="green",
                             extra="series_library"))

    if item.title == "Últimas Series": return itemlist
    if item.title == "Lista de Series A-Z":
        itemlist.sort(key=lambda item: item.fulltitle)

    if data["sortedRepoChilds"] and len(itemlist) > 0:
        itemlist.append(
            Item(channel=item.channel,
                 title="**VÍDEOS RELACIONADOS/MISMA TEMÁTICA**",
                 text_color="blue",
                 text_bold=True,
                 action="",
                 folder=False))
    for child in data["sortedRepoChilds"]:
        infolabels = {}

        if child['description']:
            infolabels['plot'] = data['description']
        else:
            infolabels['plot'] = child['description']
        infolabels['year'] = data['year']
        if not child['tags']:
            infolabels['genre'] = ', '.join([x.strip() for x in data['tags']])
        else:
            infolabels['genre'] = ', '.join([x.strip() for x in child['tags']])
        infolabels['rating'] = child['rate'].replace(',', '.')
        infolabels['duration'] = child['duration']
        if child['cast']: infolabels['cast'] = child['cast'].split(",")
        infolabels['director'] = child['director']

        url = "http://tv-vip.com/json/repo/%s/index.json" % child["id"]
        # Fanart
        if child['hashBackground']:
            fanart = "http://tv-vip.com/json/repo/%s/background.jpg" % child[
                "id"]
        else:
            fanart = "http://tv-vip.com/json/repo/%s/thumbnail.jpg" % child[
                "id"]
        # Thumbnail
        if child['hasPoster']:
            thumbnail = "http://tv-vip.com/json/repo/%s/poster.jpg" % child[
                "id"]
        else:
            thumbnail = fanart
        thumbnail += head
        fanart += head

        if child['height'] < 720:
            quality = "[B]  [SD][/B]"
        elif child['height'] < 1080:
            quality = "[B]  [720p][/B]"
        elif child['height'] >= 1080:
            quality = "[B]  [1080p][/B]"
        fulltitle = unicodedata.normalize('NFD', unicode(child['name'], 'utf-8')).encode('ASCII', 'ignore') \
            .decode("utf-8")

        if child['name'] == "":
            title = child['id'].rsplit(".", 1)[0]
        else:
            title = child['name']
        if child['year']:
            title += " (" + child['year'] + ")"
        title += quality

        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 title=bbcode_kodi2html(title),
                 url=url,
                 server="",
                 thumbnail=thumbnail,
                 fanart=fanart,
                 fulltitle=fulltitle,
                 infoLabels=infolabels,
                 contentTitle=fulltitle,
                 context="25",
                 viewmode="movie_with_plot",
                 folder=True))
    if item.extra == "new":
        itemlist.sort(key=lambda item: item.title, reverse=True)

    return itemlist
Example #3
0
def findvideos(item):
    logger.info()
    itemlist = []

    # En caso de llamarse a la función desde una serie de la videoteca
    if item.extra.startswith("http"): item.url = item.extra
    data = scrapertools.anti_cloudflare(item.url, host=host, headers=headers)
    data = jsontools.load(data)
    id = urllib.quote(data['id'])
    for child in data["profiles"].keys():
        videopath = urllib.quote(data["profiles"][child]['videoUri'])
        for i in range(0, len(data["profiles"][child]['servers'])):
            url = data["profiles"][child]['servers'][i]['url'] + videopath
            size = "  " + data["profiles"][child]["sizeHuman"]
            resolution = " [" + (
                data["profiles"][child]['videoResolution']) + "]"
            title = "Ver vídeo en " + resolution.replace(
                '1920x1080', 'HD-1080p')
            if i == 0:
                title += size + " [COLOR purple]Mirror " + str(i +
                                                               1) + "[/COLOR]"
            else:
                title += size + " [COLOR green]Mirror " + str(i +
                                                              1) + "[/COLOR]"
            # Para poner enlaces de mayor calidad al comienzo de la lista
            if data["profiles"][child]["profileId"] == "default":
                itemlist.insert(
                    i,
                    item.clone(action="play",
                               server="directo",
                               title=bbcode_kodi2html(title),
                               url=url,
                               contentTitle=item.fulltitle,
                               viewmode="list",
                               extra=id,
                               folder=False))
            else:
                itemlist.append(
                    item.clone(action="play",
                               server="directo",
                               title=bbcode_kodi2html(title),
                               url=url,
                               contentTitle=item.fulltitle,
                               viewmode="list",
                               extra=id,
                               folder=False))

    itemlist.append(
        item.clone(channel="trailertools",
                   action="buscartrailer",
                   title="Buscar Tráiler",
                   text_color="magenta"))
    if len(itemlist) > 0 and item.extra == "":
        if config.get_videolibrary_support():
            itemlist.append(
                Item(channel=item.channel,
                     title="Añadir enlaces a la videoteca",
                     text_color="green",
                     contentTitle=item.fulltitle,
                     url=item.url,
                     action="add_pelicula_to_library",
                     infoLabels={'title': item.fulltitle},
                     extra="findvideos",
                     fulltitle=item.fulltitle))

    return itemlist
Example #4
0
def episodios(item):
    logger.info()
    itemlist = []

    if item.extra == "ultimos":
        data = httptools.downloadpage(item.url).data
        item.url = scrapertools.find_single_match(
            data, '<a href="([^"]+)" class="h1-like media-title"')
        item.url += "/episodios"

    data = httptools.downloadpage(item.url).data
    data_season = data[:]

    if "episodios" in item.extra or not __menu_info__ or item.path:
        action = "findvideos"
    else:
        action = "menu_info_episode"

    seasons = scrapertools.find_multiple_matches(
        data, '<a href="([^"]+)"[^>]+><span class="season-toggle')
    for i, url in enumerate(seasons):
        if i != 0:
            data_season = httptools.downloadpage(url, add_referer=True).data
        patron = '<div class="ep-list-number">.*?href="([^"]+)">([^<]+)</a>.*?<span class="name">([^<]+)</span>'
        matches = scrapertools.find_multiple_matches(data_season, patron)
        for scrapedurl, episode, scrapedtitle in matches:
            new_item = item.clone(action=action,
                                  url=scrapedurl,
                                  text_color=color2,
                                  contentType="episode")
            new_item.contentSeason = episode.split("x")[0]
            new_item.contentEpisodeNumber = episode.split("x")[1]

            new_item.title = episode + " - " + scrapedtitle
            new_item.extra = "episode"
            if "episodios" in item.extra or item.path:
                new_item.extra = "episode|"
            itemlist.append(new_item)

    if "episodios" not in item.extra and not item.path:
        try:
            from core import tmdb
            tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
        except:
            pass

    itemlist.reverse()
    if "episodios" not in item.extra and not item.path:
        id = scrapertools.find_single_match(item.url, '/(\d+)/')
        data_trailer = httptools.downloadpage(
            host + "/media/trailer?idm=%s&mediaType=1" % id).data
        item.infoLabels["trailer"] = jsontools.load(
            data_trailer)["video"]["url"]
        itemlist.append(
            item.clone(channel="trailertools",
                       action="buscartrailer",
                       title="Buscar Tráiler",
                       text_color="magenta"))
        if config.get_videolibrary_support():
            itemlist.append(
                Item(channel=item.channel,
                     action="add_serie_to_library",
                     text_color=color5,
                     title="Añadir serie a la videoteca",
                     show=item.show,
                     thumbnail=item.thumbnail,
                     url=item.url,
                     fulltitle=item.fulltitle,
                     fanart=item.fanart,
                     extra="episodios###episodios",
                     contentTitle=item.fulltitle))

    return itemlist
Example #5
0
def findvideos(item):
    logger.info()
    itemlist = []
    item.text_color = color3

    # Rellena diccionarios idioma y calidad
    idiomas_videos, calidad_videos = dict_videos()

    data = httptools.downloadpage(item.url).data
    data = data.replace("\n", "").replace("\t", "")
    data = scrapertools.decodeHtmlentities(data)

    if item.extra != "library":
        try:
            from core import tmdb
            tmdb.set_infoLabels(item, __modo_grafico__)
        except:
            pass

    # Enlaces Online
    patron = '<span class="movie-online-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?id_lang=' \
             '"([^"]+)".*?online-link="([^"]+)"'
    matches = scrapertools.find_multiple_matches(data, patron)
    for calidad, servidor_num, language, url in matches:

        if servidor_num == '94' and not 'stormo.tv' in url:
            url = "http://tusfiles.org/?%s" % url

        if 'vimeo' in url:
            url += "|" + item.url

        if "filescdn" in url and url.endswith("htm"):
            url += "l"

        idioma = IDIOMAS.get(idiomas_videos.get(language))
        titulo = "%s  [" + idioma + "] [" + calidad_videos.get(calidad) + "]"
        itemlist.append(
            item.clone(action="play", title=titulo, url=url, extra=idioma))

    # Enlace Descarga
    patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?id_lang=' \
             '"([^"]+)".*?online-link="([^"]+)"'
    matches = scrapertools.find_multiple_matches(data, patron)
    for calidad, servidor_num, language, url in matches:
        idioma = IDIOMAS.get(idiomas_videos.get(language))
        titulo = "[%s]  [" + idioma + "] [" + calidad_videos.get(calidad) + "]"
        itemlist.append(
            item.clone(action="play", title=titulo, url=url, extra=idioma))

    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda i: i.title % i.server.capitalize())
    itemlist.sort(key=lambda item: (item.extra, item.server))
    if itemlist:
        if not "trailer" in item.infoLabels:
            trailer_url = scrapertools.find_single_match(
                data, 'class="n-movie-trailer">([^<]+)</span>')
            item.infoLabels['trailer'] = trailer_url.replace(
                "youtu.be/", "http://www.youtube.com/watch?v=")

        itemlist.append(
            item.clone(channel="trailertools",
                       action="buscartrailer",
                       title="Buscar Tráiler",
                       text_color="magenta",
                       context=""))
        if item.extra != "library":
            if config.get_videolibrary_support():
                itemlist.append(
                    Item(channel=item.channel,
                         title="Añadir película a la videoteca",
                         action="add_pelicula_to_library",
                         url=item.url,
                         text_color="green",
                         infoLabels={'title': item.fulltitle},
                         fulltitle=item.fulltitle,
                         extra="library"))

    return itemlist
Example #6
0
def episodios(item):
    logger.info()
    itemlist = list()

    # Descarga la página
    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|&nbsp;|\s{2}", "", data)

    # Extrae las entradas (carpetas)
    bloque = scrapertools.find_single_match(data,
                                            '<strong>Temporada:(.*?)</div>')
    matches = scrapertools.find_multiple_matches(bloque,
                                                 'href="([^"]+)">(.*?)</a>')

    for scrapedurl, scrapedtitle in matches:
        title = "Temporada %s" % scrapedtitle

        new_item = item.clone(action="", title=title, text_color=color2)
        new_item.infoLabels["season"] = scrapedtitle
        new_item.infoLabels["mediatype"] = "season"
        data_season = httptools.downloadpage(scrapedurl).data
        data_season = re.sub(r"\n|\r|\t|&nbsp;|\s{2}", "", data_season)
        patron = '<li class="media">.*?href="([^"]+)"(.*?)<div class="media-body">.*?href.*?>' \
                 '(.*?)</a>'
        matches = scrapertools.find_multiple_matches(data_season, patron)

        elementos = []
        for url, status, title in matches:
            if not "Enlaces Disponibles" in status:
                continue
            elementos.append(title)
            item_epi = item.clone(action="findvideos",
                                  url=url,
                                  text_color=color1)
            item_epi.infoLabels["season"] = scrapedtitle
            episode = scrapertools.find_single_match(title, 'Capitulo (\d+)')
            titulo = scrapertools.find_single_match(
                title, 'Capitulo \d+\s*-\s*(.*?)$')
            item_epi.infoLabels["episode"] = episode
            item_epi.infoLabels["mediatype"] = "episode"
            item_epi.title = "%sx%s  %s" % (scrapedtitle, episode.zfill(2),
                                            titulo)

            itemlist.insert(0, item_epi)
        if elementos:
            itemlist.insert(0, new_item)

    if item.infoLabels["tmdb_id"] and itemlist:
        try:
            tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
        except:
            pass

    if itemlist:
        if config.get_videolibrary_support():
            itemlist.append(
                Item(channel=item.channel,
                     title="Añadir serie a la videoteca",
                     text_color="green",
                     filtro=True,
                     action="add_serie_to_library",
                     fulltitle=item.fulltitle,
                     extra="episodios",
                     url=item.url,
                     infoLabels=item.infoLabels,
                     show=item.show))
    else:
        itemlist.append(
            item.clone(title="Serie sin episodios disponibles",
                       action="",
                       text_color=color3))
    return itemlist
Example #7
0
def findvideos(item):
    logger.info()
    itemlist = []
    item.text_color = color2

    # Descarga la pagina
    data = httptools.downloadpage(item.url).data
    sinopsis = scrapertools.find_single_match(
        data, '<h2>Sinopsis</h2>.*?>(.*?)</p>')
    item.infoLabels["plot"] = scrapertools.htmlclean(sinopsis)
    # Busca en tmdb si no se ha hecho antes
    if item.extra != "eroticas":
        if item.extra != "library":
            year = scrapertools.find_single_match(
                data, 'Año de lanzamiento.*?"ab">(\d+)')
            if year:
                try:
                    item.infoLabels['year'] = year
                    # Obtenemos los datos basicos de todas las peliculas mediante multihilos
                    tmdb.set_infoLabels(item, __modo_grafico__)
                except:
                    pass
        trailer_url = scrapertools.find_single_match(
            data, 'id="trailerpro">.*?src="([^"]+)"')
        item.infoLabels["trailer"] = "www.youtube.com/watch?v=TqqF3-qgJw4"

    patron = '<td><a href="([^"]+)".*?title="([^"]+)".*?<td>([^"]+)<\/td><td>([^"]+)<\/td>'
    matches = scrapertools.find_multiple_matches(data, patron)
    for url, server, idioma, calidad in matches:
        if server == "Embed":
            server = "Nowvideo"
        if server == "Ul":
            server = "Uploaded"
        title = "%s  [%s][%s]" % (server, idioma, calidad)
        itemlist.append(item.clone(action="play", title=title, url=url))

    patron = 'id="(embed[0-9]*)".*?<div class="calishow">(.*?)<.*?src="([^"]+)"'
    matches = scrapertools.find_multiple_matches(data, patron)
    for id_embed, calidad, url in matches:
        title = scrapertools.find_single_match(
            url, "(?:http://|https://|//)(.*?)(?:embed.|videoembed|)/")
        if re.search(r"(?i)inkapelis|goo.gl", title):
            title = "Directo"
        idioma = scrapertools.find_single_match(
            data, 'href="#%s".*?>([^<]+)<' % id_embed)
        title = "%s  [%s][%s]" % (title.capitalize(), idioma, calidad)
        itemlist.append(item.clone(action="play", title=title, url=url))

    if itemlist:
        if not config.get_setting('menu_trailer', item.channel):
            itemlist.append(
                item.clone(channel="trailertools",
                           action="buscartrailer",
                           title="Buscar Tráiler",
                           text_color="magenta",
                           context=""))
        if item.extra != "library":
            if config.get_videolibrary_support():
                itemlist.append(
                    Item(channel=item.channel,
                         title="Añadir película a la videoteca",
                         action="add_pelicula_to_library",
                         url=item.url,
                         fulltitle=item.fulltitle,
                         infoLabels={'title': item.fulltitle},
                         text_color="green",
                         extra="library"))

    return itemlist
Example #8
0
def findvideos(item):
    logger.info()
    itemlist = []

    data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "",
                  httptools.downloadpage(item.url).data)
    patron = 'vars.title =(.*?)};'
    try:
        data_dict = jsontools.load(scrapertools.get_match(data, patron) + '}')
    except:
        return itemlist  # Devolvemos lista vacia

    lista_servers = servertools.get_servers_list()

    for link in data_dict["link"]:
        if item.contentType == 'episode' \
                and (item.contentSeason != link['season'] or item.contentEpisodeNumber != link['episode']):
            # Si buscamos enlaces de un episodio descartamos los q no sean de este episodio
            continue

        url = link["url"]
        flag = scrapertools.find_single_match(link["label"],
                                              '(\s*\<img src=.*\>)')
        idioma = link["label"].replace(flag, "")
        if link["quality"] != "?":
            calidad = (' [' + link["quality"] + ']')
        else:
            calidad = ""
        video = find_videos(link["url"], lista_servers)

        if video["servidor"] != "":
            servidor = video["servidor"]
            url = video["url"]
            title = "Ver en " + servidor.capitalize(
            ) + calidad + ' (' + idioma + ')'
            itemlist.append(
                item.clone(action="play",
                           viewmode="list",
                           server=servidor,
                           title=title,
                           text_color="0xFF994D00",
                           url=url,
                           folder=False))

    if config.get_videolibrary_support(
    ) and itemlist and item.contentType == "movie":
        infoLabels = {
            'tmdb_id': item.infoLabels['tmdb_id'],
            'title': item.infoLabels['title']
        }
        itemlist.append(
            Item(
                channel=item.channel,
                title="Añadir esta película a la videoteca",
                action="add_pelicula_to_library",
                url=item.url,
                infoLabels=infoLabels,
                text_color="0xFFe5ffcc",
                thumbnail=
                'https://raw.githubusercontent.com/master-1970/resources/master/images/channels/pepecine/tv.png'
            ))

    return itemlist
Example #9
0
def episodios(item):
    logger.info()

    itemlist = []
    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
    data_lista = scrapertools.find_single_match(
        data,
        '<div class="su-list su-list-style-"><ulclass="lista-capitulos">.+?<\/div><\/p>'
    )
    if '&#215;' in data_lista:
        data_lista = data_lista.replace('&#215;', 'x')

    show = item.title
    if "[Latino]" in show:
        show = show.replace("[Latino]", "")
    if "Ranma" in show:
        patron_caps = '<\/i> <strong>.+?Capitulo ([^"]+)\: <a .+? href="([^"]+)">([^"]+)<\/a>'
    else:
        patron_caps = '<\/i> <strong>Capitulo ([^"]+)x.+?\: <a .+? href="([^"]+)">([^"]+)<\/a>'
    matches = scrapertools.find_multiple_matches(data_lista, patron_caps)
    scrapedplot = scrapertools.find_single_match(
        data, '<strong>Sinopsis<\/strong><strong>([^"]+)<\/strong><\/pre>')
    number = 0
    ncap = 0
    A = 1
    for temp, link, name in matches:
        if A != temp:
            number = 0
        if "Ranma" in show:
            number = int(temp)
            temp = str(1)
        else:
            number = number + 1
        if number < 10:
            capi = "0" + str(number)
        else:
            capi = str(number)
        if "Ranma" in show:
            season = 1
            episode = number
            season, episode = renumbertools.numbered_for_tratk(
                item.channel, item.show, season, episode)
            date = name
            if episode < 10:
                capi = "0" + str(episode)
            else:
                capi = episode
            title = str(season) + "x" + str(
                capi
            ) + " - " + name  # "{0}x{1} - ({2})".format(season, episode, date)
        else:
            title = str(temp) + "x" + capi + " - " + name
        url = link
        A = temp
        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 title=title,
                 url=url,
                 show=show))

    if config.get_videolibrary_support() and len(itemlist) > 0:
        itemlist.append(
            Item(channel=item.channel,
                 title="Añadir " + show + " a la videoteca",
                 url=item.url,
                 action="add_serie_to_library",
                 extra="episodios",
                 show=show))

    return itemlist
Example #10
0
def episodios(item):
    logger.info()
    itemlist = []

    # Descarga la pagina
    idserie = ''
    data = httptools.downloadpage(item.url).data
    logger.debug("data=" + data)

    patrontemporada = '<div class="checkSeason"[^>]+>([^<]+)<div class="right" onclick="controller.checkSeason(.*?)\s+</div></div>'
    matchestemporadas = re.compile(patrontemporada, re.DOTALL).findall(data)

    idserie = scrapertools.find_single_match(data,
                                             '<div id="layout4" class="itemProfile modelContainer" data-model="serie" data-id="(\d+)"')

    for nombre_temporada, bloque_episodios in matchestemporadas:
        logger.debug("nombre_temporada=" + nombre_temporada)
        logger.debug("bloque_episodios=" + bloque_episodios)

        # Extrae los episodios
        patron = '<span class="title defaultPopup" href="([^"]+)"><span class="number">([^<]+)</span>([^<]+)</span>(\s*</div>\s*<span[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></span><div[^>]*><button[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></button><div class="action([^"]*)" data-action="seen">)?'
        matches = re.compile(patron, re.DOTALL).findall(bloque_episodios)

        for scrapedurl, numero, scrapedtitle, info, visto in matches:
            # visto_string = "[visto] " if visto.strip()=="active" else ""
            if visto.strip() == "active":
                visto_string = "[visto] "
            else:
                visto_string = ""

            title = visto_string + nombre_temporada.replace("Temporada ", "").replace("Extras",
                                                                                      "Extras 0") + "x" + numero + " " + scrapertools.htmlclean(
                scrapedtitle)
            thumbnail = item.thumbnail
            fanart = item.fanart
            plot = ""
            # http://www.pordede.com/peli/the-lego-movie
            # http://www.pordede.com/links/view/slug/the-lego-movie/what/peli?popup=1
            # http://www.pordede.com/links/viewepisode/id/475011?popup=1
            epid = scrapertools.find_single_match(scrapedurl, "id/(\d+)")
            url = "http://www.pordede.com/links/viewepisode/id/" + epid
            itemlist.append(
                Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
                     fulltitle=title, fanart=fanart, show=item.show))

            logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")

    if config.get_videolibrary_support():
        # con año y valoracion la serie no se puede actualizar correctamente, si ademas cambia la valoracion, creara otra carpeta
        # Sin año y sin valoración:
        show = re.sub(r"\s\(\d+\)\s\(\d+\.\d+\)", "", item.show)
        # Sin año:
        # show = re.sub(r"\s\(\d+\)", "", item.show)
        # Sin valoración:
        # show = re.sub(r"\s\(\d+\.\d+\)", "", item.show)
        itemlist.append(Item(channel='pordede', title="Añadir esta serie a la videoteca", url=item.url,
                             action="add_serie_to_library", extra="episodios###", show=show))
        itemlist.append(Item(channel='pordede', title="Descargar todos los episodios de la serie", url=item.url,
                             action="download_all_episodes", extra="episodios", show=show))
        itemlist.append(Item(channel='pordede', title="Marcar como Pendiente", tipo="serie", idtemp=idserie, valor="1",
                             action="pordede_check", show=show))
        itemlist.append(Item(channel='pordede', title="Marcar como Siguiendo", tipo="serie", idtemp=idserie, valor="2",
                             action="pordede_check", show=show))
        itemlist.append(Item(channel='pordede', title="Marcar como Finalizada", tipo="serie", idtemp=idserie, valor="3",
                             action="pordede_check", show=show))
        itemlist.append(Item(channel='pordede', title="Marcar como Favorita", tipo="serie", idtemp=idserie, valor="4",
                             action="pordede_check", show=show))
        itemlist.append(Item(channel='pordede', title="Quitar marca", tipo="serie", idtemp=idserie, valor="0",
                             action="pordede_check", show=show))

    return itemlist
Example #11
0
def get_temporadas(item):
    logger.info()

    itemlist = []
    infoLabels = {}

    data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "",
                  httptools.downloadpage(item.url).data)
    patron = 'vars.title =(.*?)};'
    try:
        data_dict = jsontools.load(scrapertools.get_match(data, patron) + '}')
    except:
        return itemlist  # Devolvemos lista vacia

    if item.extra == "serie_add":
        itemlist = get_episodios(item)

    else:
        if len(data_dict["season"]) == 1:
            # Si solo hay una temporada ...
            item.infoLabels['season'] = data_dict["season"][0]["number"]
            itemlist = get_episodios(item)

        else:  # ... o si hay mas de una temporada
            item.viewcontent = "seasons"
            data_dict["season"].sort(
                key=lambda x:
                (x['number']))  # ordenamos por numero de temporada
            for season in data_dict["season"]:
                # filtramos enlaces por temporada
                enlaces = filter(lambda l: l["season"] == season['number'],
                                 data_dict["link"])
                if enlaces:
                    item.infoLabels['season'] = season['number']
                    title = '%s Temporada %s' % (item.title, season['number'])

                    itemlist.append(
                        item.clone(action="get_episodios",
                                   title=title,
                                   text_color="0xFFFFCE9C",
                                   viewmode="movie_with_plot"))

                    # Obtenemos los datos de todas las temporadas mediante multihilos
                    tmdb.set_infoLabels(itemlist)

        if config.get_videolibrary_support() and itemlist:
            infoLabels = {
                'tmdb_id': item.infoLabels['tmdb_id'],
                'tvdb_id': item.infoLabels['tvdb_id'],
                'imdb_id': item.infoLabels['imdb_id']
            }
            itemlist.append(
                Item(
                    channel=item.channel,
                    title="Añadir esta serie a la videoteca",
                    text_color="0xFFe5ffcc",
                    action="add_serie_to_library",
                    extra='get_episodios###serie_add',
                    url=item.url,
                    contentSerieName=data_dict["title"],
                    infoLabels=infoLabels,
                    thumbnail=
                    'https://raw.githubusercontent.com/master-1970/resources/master/images/channels/pepecine/tv.png'
                ))

    return itemlist
Example #12
0
def findvideos(item):
    logger.info()
    if (item.extra and item.extra != "findvideos") or item.path:
        return epienlaces(item)

    itemlist = []
    item.text_color = color3

    data = get_data(item.url)
    item.plot = scrapertools.find_single_match(
        data, 'SINOPSIS(?:</span>|</strong>):(.*?)</p>')
    year = scrapertools.find_single_match(
        data,
        '(?:<span class="bold">|<strong>)AÑO(?:</span>|</strong>):\s*(\d+)')
    if year:
        try:
            from core import tmdb
            item.infoLabels['year'] = year
            tmdb.set_infoLabels_item(item, __modo_grafico__)
        except:
            pass

    old_format = False
    # Patron torrent antiguo formato
    if "Enlaces de descarga</div>" in data:
        old_format = True
        matches = scrapertools.find_multiple_matches(
            data, 'class="separate3 magnet".*?href="([^"]+)"')
        for scrapedurl in matches:
            scrapedurl = scrapertools.find_single_match(
                scrapedurl, '(magnet.*)')
            scrapedurl = urllib.unquote(re.sub(r'&amp;b=4', '', scrapedurl))
            title = "[Torrent] "
            title += urllib.unquote(
                scrapertools.find_single_match(scrapedurl,
                                               'dn=(.*?)(?i)WWW.DescargasMix'))
            itemlist.append(
                item.clone(action="play",
                           server="torrent",
                           title=title,
                           url=scrapedurl,
                           text_color="green"))

    # Patron online
    data_online = scrapertools.find_single_match(
        data, 'Ver online</div>(.*?)<div class="section-box related-posts">')
    if data_online:
        title = "Enlaces Online"
        if '"l-latino2"' in data_online:
            title += " [LAT]"
        elif '"l-esp2"' in data_online:
            title += " [ESP]"
        elif '"l-vose2"' in data_online:
            title += " [VOSE]"

        patron = 'make_links.*?,[\'"]([^"\']+)["\']'
        matches = scrapertools.find_multiple_matches(data_online, patron)
        for i, code in enumerate(matches):
            enlace = mostrar_enlaces(code)
            enlaces = servertools.findvideos(data=enlace[0])
            if enlaces and "peliculas.nu" not in enlaces:
                if i == 0:
                    extra_info = scrapertools.find_single_match(
                        data_online, '<span class="tooltiptext">(.*?)</span>')
                    size = scrapertools.find_single_match(
                        data_online, '(?i)TAMAÑO:\s*(.*?)<').strip()

                    if size:
                        title += " [%s]" % size
                    new_item = item.clone(title=title,
                                          action="",
                                          text_color=color1)
                    if extra_info:
                        extra_info = scrapertools.htmlclean(extra_info)
                        new_item.infoLabels["plot"] = extra_info
                        new_item.title += " +INFO"
                    itemlist.append(new_item)

                title = "   Ver vídeo en " + enlaces[0][2]
                itemlist.append(
                    item.clone(action="play",
                               server=enlaces[0][2],
                               title=title,
                               url=enlaces[0][1]))
    scriptg = scrapertools.find_single_match(
        data, "<script type='text/javascript'>str='([^']+)'")
    if scriptg:
        gvideo = urllib.unquote_plus(scriptg.replace("@", "%"))
        url = scrapertools.find_single_match(gvideo, 'src="([^"]+)"')
        if url:
            itemlist.append(
                item.clone(
                    action="play",
                    server="directo",
                    url=url,
                    extra=item.url,
                    title="   Ver vídeo en Googlevideo (Máxima calidad)"))

    # Patron descarga
    patron = '<div class="(?:floatLeft |)double(?:nuevo|)">(.*?)</div>(.*?)' \
             '(?:<div(?: id="mirrors"|) class="(?:contentModuleSmall |)mirrors">|<div class="section-box related-' \
             'posts">)'
    bloques_descarga = scrapertools.find_multiple_matches(data, patron)
    for title_bloque, bloque in bloques_descarga:
        if title_bloque == "Ver online":
            continue
        if '"l-latino2"' in bloque:
            title_bloque += " [LAT]"
        elif '"l-esp2"' in bloque:
            title_bloque += " [ESP]"
        elif '"l-vose2"' in bloque:
            title_bloque += " [VOSE]"

        extra_info = scrapertools.find_single_match(
            bloque, '<span class="tooltiptext">(.*?)</span>')
        size = scrapertools.find_single_match(bloque,
                                              '(?i)TAMAÑO:\s*(.*?)<').strip()

        if size:
            title_bloque += " [%s]" % size
        new_item = item.clone(title=title_bloque, action="", text_color=color1)
        if extra_info:
            extra_info = scrapertools.htmlclean(extra_info)
            new_item.infoLabels["plot"] = extra_info
            new_item.title += " +INFO"
        itemlist.append(new_item)

        if '<div class="subiendo">' in bloque:
            itemlist.append(
                item.clone(title="   Los enlaces se están subiendo",
                           action=""))
            continue
        patron = 'class="separate.*? ([^"]+)".*?(?:make_links.*?,|href=)[\'"]([^"\']+)["\']'
        matches = scrapertools.find_multiple_matches(bloque, patron)
        for scrapedserver, scrapedurl in matches:
            if (scrapedserver == "ul") | (scrapedserver == "uploaded"):
                scrapedserver = "uploadedto"
            titulo = unicode(scrapedserver,
                             "utf-8").capitalize().encode("utf-8")
            if titulo == "Magnet" and old_format:
                continue
            elif titulo == "Magnet" and not old_format:
                title = "   Enlace Torrent"
                scrapedurl = scrapertools.find_single_match(
                    scrapedurl, '(magnet.*)')
                scrapedurl = urllib.unquote(re.sub(r'&amp;b=4', '',
                                                   scrapedurl))
                itemlist.append(
                    item.clone(action="play",
                               server="torrent",
                               title=title,
                               url=scrapedurl,
                               text_color="green"))
                continue
            if servertools.is_server_enabled(scrapedserver):
                try:
                    servers_module = __import__("servers." + scrapedserver)
                    # Saca numero de enlaces
                    urls = mostrar_enlaces(scrapedurl)
                    numero = str(len(urls))
                    titulo = "   %s - Nº enlaces: %s" % (titulo, numero)
                    itemlist.append(
                        item.clone(action="enlaces",
                                   title=titulo,
                                   extra=scrapedurl,
                                   server=scrapedserver))
                except:
                    pass

    itemlist.append(
        item.clone(channel="trailertools",
                   title="Buscar Tráiler",
                   action="buscartrailer",
                   context="",
                   text_color="magenta"))
    if item.extra != "findvideos" and config.get_videolibrary_support():
        itemlist.append(
            Item(channel=item.channel,
                 title="Añadir a la videoteca",
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 url=item.url,
                 infoLabels={'title': item.fulltitle},
                 fulltitle=item.fulltitle,
                 text_color="green"))

    return itemlist
Example #13
0
def episodios(item):
    logger.info()

    itemlist = []

    # Descarga la página
    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "",
                  data)

    patron = '<li class="clearfix gutterVertical20"><a href="([^"]+)".*?><small>(.*?)</small>.*?' \
             '<span class.+?>(.*?)</span>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedname in matches:
        # logger.info("scrap {}".format(scrapedtitle))
        patron = 'Season\s+(\d),\s+Episode\s+(\d+)'
        match = re.compile(patron, re.DOTALL).findall(scrapedtitle)
        season, episode = match[0]

        if 'season' in item.infoLabels and int(
                item.infoLabels['season']) != int(season):
            continue

        title = "%sx%s: %s" % (season, episode.zfill(2),
                               scrapertools.unescape(scrapedname))
        new_item = item.clone(title=title,
                              url=scrapedurl,
                              action="findvideos",
                              text_color=color3,
                              fulltitle=title,
                              contentType="episode")
        if 'infoLabels' not in new_item:
            new_item.infoLabels = {}

        new_item.infoLabels['season'] = season
        new_item.infoLabels['episode'] = episode.zfill(2)

        itemlist.append(new_item)

    # TODO no hacer esto si estamos añadiendo a la videoteca
    if not item.extra:
        # Obtenemos los datos de todos los capitulos de la temporada mediante multihilos
        tmdb.set_infoLabels(itemlist, __modo_grafico__)
        for i in itemlist:
            if i.infoLabels['title']:
                # Si el capitulo tiene nombre propio añadirselo al titulo del item
                i.title = "%sx%s %s" % (i.infoLabels['season'],
                                        i.infoLabels['episode'],
                                        i.infoLabels['title'])
            if i.infoLabels.has_key('poster_path'):
                # Si el capitulo tiene imagen propia remplazar al poster
                i.thumbnail = i.infoLabels['poster_path']

    itemlist.sort(key=lambda it: int(it.infoLabels['episode']),
                  reverse=config.get_setting('orden_episodios', __channel__))

    # Opción "Añadir esta serie a la videoteca"
    if config.get_videolibrary_support() and len(itemlist) > 0:
        itemlist.append(
            Item(channel=__channel__,
                 title="Añadir esta serie a la videoteca",
                 url=item.url,
                 action="add_serie_to_library",
                 extra="episodios",
                 show=item.show,
                 category="Series",
                 text_color=color1,
                 thumbnail=thumbnail_host,
                 fanart=fanart_host))

    return itemlist
Example #14
0
def getmainlist():
    logger.info()
    itemlist = list()

    # Añade los canales que forman el menú principal
    itemlist.append(
        Item(title=config.get_localized_string(30130),
             channel="news",
             action="mainlist",
             thumbnail=config.get_thumb("thumb_news.png"),
             category=config.get_localized_string(30119),
             viewmode="thumbnails",
             context=[{
                 "title": "Configurar Novedades",
                 "channel": "news",
                 "action": "menu_opciones",
                 "goto": True
             }]))

    itemlist.append(
        Item(title=config.get_localized_string(30118),
             channel="channelselector",
             action="getchanneltypes",
             thumbnail=config.get_thumb("thumb_channels.png"),
             category=config.get_localized_string(30119),
             viewmode="thumbnails"))

    itemlist.append(
        Item(title=config.get_localized_string(30103),
             channel="search",
             action="mainlist",
             thumbnail=config.get_thumb("thumb_search.png"),
             category=config.get_localized_string(30119),
             viewmode="list",
             context=[{
                 "title": "Configurar Buscador",
                 "channel": "search",
                 "action": "opciones",
                 "goto": True
             }]))

    itemlist.append(
        Item(title=config.get_localized_string(30102),
             channel="favorites",
             action="mainlist",
             thumbnail=config.get_thumb("thumb_favorites.png"),
             category=config.get_localized_string(30102),
             viewmode="thumbnails"))

    if config.get_videolibrary_support():
        itemlist.append(
            Item(title=config.get_localized_string(30131),
                 channel="videolibrary",
                 action="mainlist",
                 thumbnail=config.get_thumb("thumb_videolibrary.png"),
                 category=config.get_localized_string(30119),
                 viewmode="thumbnails",
                 context=[{
                     "title": "Configurar Videoteca",
                     "channel": "videolibrary",
                     "action": "channel_config"
                 }]))

    itemlist.append(
        Item(title=config.get_localized_string(30101),
             channel="downloads",
             action="mainlist",
             thumbnail=config.get_thumb("thumb_downloads.png"),
             viewmode="list",
             context=[{
                 "title": "Configurar Descargas",
                 "channel": "setting",
                 "config": "downloads",
                 "action": "channel_config"
             }]))

    thumb_configuracion = "thumb_setting_%s.png" % 0  # config.get_setting("plugin_updates_available")

    itemlist.append(
        Item(title=config.get_localized_string(30100),
             channel="setting",
             action="mainlist",
             thumbnail=config.get_thumb(thumb_configuracion),
             category=config.get_localized_string(30100),
             viewmode="list"))
    # TODO REVISAR LA OPCION AYUDA
    # itemlist.append(Item(title=config.get_localized_string(30104), channel="help", action="mainlist",
    #                      thumbnail=config.get_thumb("thumb_help.png"),
    #                      category=config.get_localized_string(30104), viewmode="list"))
    return itemlist
Example #15
0
def episodios(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = jsontools.load(data)

    capitulos = []
    if data.get("b"):
        for child in data["b"]:
            for child2 in child["a"]:
                capitulos.append([child["season"], child2, child["id"]])
    else:
        for child in data.get("a", []):
            capitulos.append(['', child, ''])

    for season, child, id_season in capitulos:
        infoLabels = item.infoLabels.copy()

        if child.get('runtime'):
            try:
                infoLabels['duration'] = int(child['runtime'].replace(
                    " min.", "")) * 60
            except:
                pass
        if not season or not season.isdigit():
            season = scrapertools.find_single_match(child['name'], '(\d+)x\d+')
        try:
            infoLabels['season'] = int(season)
        except:
            infoLabels['season'] = 0

        if not child['episode']:
            episode = scrapertools.find_single_match(child['name'],
                                                     '\d+x(\d+)')
            if not episode:
                episode = "0"
            infoLabels['episode'] = int(episode)
        else:
            infoLabels['episode'] = int(child['episode'])
        infoLabels['mediatype'] = "episode"

        url = host % "movie/%s/movie.js" % child["id"]
        thumbnail = host % "movie/%s/thumbnail_200x112.jpg" % child["id"]
        if id_season:
            fanart = host % "list/%s/background_1080.jpg" % id_season
        else:
            fanart = item.fanart

        video_urls = []
        for k, v in child.get("video", {}).items():
            for vid in v:
                video_urls.append([
                    "http://%s.pelisipad.com/s/transcoder/%s" %
                    (vid["server"], vid["url"]) + "?%s", vid["height"]
                ])

        try:
            title = fulltitle = child['name'].rsplit(
                " ", 1)[0] + " - " + child['name'].rsplit(" ", 1)[1]
        except:
            title = fulltitle = child['id'].replace("-", " ")
        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 title=title,
                 url=url,
                 thumbnail=thumbnail,
                 fanart=fanart,
                 fulltitle=fulltitle,
                 contentTitle=fulltitle,
                 viewmode="movie",
                 show=item.show,
                 infoLabels=infoLabels,
                 video_urls=video_urls,
                 extra="episodios",
                 text_color=color3))

    itemlist.sort(key=lambda it:
                  (it.infoLabels["season"], it.infoLabels["episode"]),
                  reverse=True)
    if itemlist and config.get_videolibrary_support():
        itemlist.append(
            Item(channel=item.channel,
                 title="Añadir esta serie a la videoteca",
                 text_color=color5,
                 url=item.url,
                 action="add_serie_to_library",
                 infoLabels=item.infoLabels,
                 show=item.show,
                 extra="episodios"))

    return itemlist
Example #16
0
def findvideos(item):
    logger.info()
    itemlist = []

    datas = httptools.downloadpage(item.url).data
    datas = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}|&nbsp;", "", datas)
    # logger.info(data)
    patron = '<a style="cursor:pointer; cursor: hand;" rel="([^"]+)".*?'
    patron += 'clearfix colores title_calidad">.*?<span>([^<]+)</span></a>'

    matches = re.compile(patron, re.DOTALL).findall(datas)

    for scrapedurl, servidores, in matches:
        if 'pelispp.com' or 'ultrapelis' in scrapedurl:
            data = httptools.downloadpage(scrapedurl, headers=headers).data
            patronr = 'file: "([^"]+)",label:"([^"]+)",type'
            matchesr = re.compile(patronr, re.DOTALL).findall(data)
            for scrapedurl, label in matchesr:
                url = scrapedurl.replace('\\', '')
                language = 'latino'
                quality = label.decode('cp1252').encode('utf8')
                title = item.contentTitle + ' (' + str(label) + ')'
                thumbnail = item.thumbnail
                fanart = item.fanart
                itemlist.append(
                    item.clone(
                        action="play",
                        title=title,
                        url=url,
                        server='directo',
                        thumbnail=thumbnail,
                        fanart=fanart,
                        extra='directo',
                        quality=quality,
                        language=language,
                    ))
                itemlist.sort(key=lambda it: it.title, reverse=True)

        # if 'youtube' not in scrapedurl:
        if 'youtube' not in scrapedurl:
            quality = scrapertools.find_single_match(
                datas,
                '<p class="hidden-xs hidden-sm">.*?class="magnet-download">([^<]+)p</a>'
            )
            title = "[COLOR green]%s[/COLOR] [COLOR yellow][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (
                item.contentTitle, quality.upper(), servidores.capitalize())
            url = scrapedurl.replace('\\', '')
            thumbnail = item.thumbnail
            server = servertools.get_server_from_url(url)

            itemlist.append(
                item.clone(action='play',
                           title=title,
                           url=url,
                           quality=quality,
                           server=server,
                           text_color=color3,
                           thumbnail=thumbnail))

    for videoitem in itemlist:
        videoitem.infoLabels = item.infoLabels
        videoitem.channel = item.channel
        videoitem.action = 'play'
        videoitem.fulltitle = item.title

    if config.get_videolibrary_support() and len(itemlist) > 0:
        itemlist.append(
            Item(
                channel=item.channel,
                title=
                '[COLOR yellow]Añadir esta pelicula a la biblioteca[/COLOR]',
                url=item.url,
                action="add_pelicula_to_library",
                thumbnail=
                'https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/libreria.png',
                extra="findvideos",
                contentTitle=item.contentTitle))

    return itemlist
Example #17
0
def findvideos(item):
    logger.info()
    itemlist = []

    if not item.video_urls:
        data = httptools.downloadpage(item.url)
        if not data.sucess:
            itemlist.append(
                item.clone(title="Película no disponible", action=""))
            return itemlist
        data = jsontools.load(data.data)

        item.video_urls = []
        for k, v in data.get("video", {}).items():
            for vid in v:
                item.video_urls.append([
                    "http://%s.pelisipad.com/s/transcoder/%s" %
                    (vid["server"], vid["url"]) + "?%s", vid["height"]
                ])

    if item.video_urls:
        import random
        import base64
        item.video_urls.sort(key=lambda it: (it[1], random.random()),
                             reverse=True)
        i = 0
        calidad_actual = ""
        for vid, calidad in item.video_urls:
            title = "Ver vídeo en %sp" % calidad
            if calidad != calidad_actual:
                i = 0
                calidad_actual = calidad

            if i % 2 == 0:
                title += " [COLOR purple]Mirror %s[/COLOR] - %s" % (
                    str(i + 1), item.fulltitle)
            else:
                title += " [COLOR green]Mirror %s[/COLOR] - %s" % (
                    str(i + 1), item.fulltitle)
            url = vid % "%s" % base64.b64decode(
                "dHQ9MTQ4MDE5MDQ1MSZtbT1NRzZkclhFand6QmVzbmxSMHNZYXhBJmJiPUUwb1dVVVgx"
                "WTBCQTdhWENpeU9paUE=")
            itemlist.append(
                item.clone(title=title,
                           action="play",
                           url=url,
                           server="directo",
                           video_urls=""))
            i += 1

        if itemlist and item.extra == "" and config.get_videolibrary_support():
            itemlist.append(
                Item(channel=item.channel,
                     title="Añadir enlaces a la videoteca",
                     text_color=color5,
                     contentTitle=item.fulltitle,
                     url=item.url,
                     action="add_pelicula_to_library",
                     infoLabels={'title': item.fulltitle},
                     extra="findvideos",
                     fulltitle=item.fulltitle))

    return itemlist
Example #18
0
def findvideos(item):
    logger.info()
    itemlist = []
    dic_langs = {
        'esp': 'Español',
        'english': 'Ingles',
        'japo': 'Japones',
        'argentina': 'Latino',
        'ntfof': ''
    }
    dic_servers = {
        'ntfof': 'Servidor Desconocido',
        'stramango': 'streamango',
        'flasht': 'flashx'
    }

    data1 = downloadpage(item.url)
    patron = 'onclick="redir\(([^\)]+).*?'
    patron += '<img style="float:left" src="./[^/]+/([^\.]+).+?'
    patron += '<span[^>]+>([^<]+).*?'
    patron += '<img(.*?)onerror'

    if "Descarga:</h1>" in data1:
        list_showlinks = [('Online:', 'Online:</h1>(.*?)Descarga:</h1>'),
                          ('Download:', 'Descarga:</h1>(.*?)</section>')]
    else:
        list_showlinks = [('Online:', 'Online:</h1>(.*?)</section>')]

    for t in list_showlinks:
        data = scrapertools.find_single_match(data1, t[1])

        if data:
            itemlist.append(
                Item(title=t[0],
                     text_color=color3,
                     text_bold=True,
                     folder=False,
                     thumbnail=thumbnail_host))

            for redir, server, quality, langs in scrapertools.find_multiple_matches(
                    data, patron):  # , server, quality, langs
                redir = redir.split(",")
                url = redir[0][1:-1]
                id = redir[1][1:-1]
                # type = redir[2][1:-1]
                # url = url.split("','")[0] # [2] = 0 movies, [2] = 1 tvshows

                langs = scrapertools.find_multiple_matches(
                    langs, 'src="./images/([^\.]+)')
                idioma = dic_langs.get(langs[0], langs[0])
                subtitulos = dic_langs.get(langs[1], langs[1])
                if subtitulos:
                    idioma = "%s (Sub: %s)" % (idioma, subtitulos)

                if server in dic_servers: server = dic_servers[server]

                itemlist.append(
                    item.clone(url=url,
                               action="play",
                               language=idioma,
                               contentQuality=quality,
                               server=server,
                               title="    %s: %s [%s]" %
                               (server.capitalize(), idioma, quality)))

    if itemlist and config.get_videolibrary_support(
    ) and not "library" in item.extra:
        if item.contentType == 'movie':
            itemlist.append(
                item.clone(title="Añadir película a la videoteca",
                           action="add_pelicula_to_library",
                           text_color=color1,
                           contentTitle=item.contentTitle,
                           extra="library",
                           thumbnail=thumbnail_host))
        else:
            # http://www.wopelis.com/serie.php?id=275641
            item.url = "http://www.wopelis.com/serie.php?id=" + id
            item.contentSeason = 0
            item.contentEpisodeNumber = 0
            # logger.error(item)
            itemlist.append(
                item.clone(title="Añadir esta serie a la videoteca",
                           action="add_serie_to_library",
                           extra="episodios###library",
                           text_color=color1,
                           thumbnail=thumbnail_host))

    return itemlist
Example #19
0
def findvideos(item):
    logger.info()
    itemlist = list()

    try:
        filtro_idioma = config.get_setting("filterlanguages", item.channel)
        filtro_enlaces = config.get_setting("filterlinks", item.channel)
    except:
        filtro_idioma = 3
        filtro_enlaces = 2

    dict_idiomas = {'Castellano': 2, 'Latino': 1, 'Subtitulada': 0}

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|&nbsp;|\s{2}", "", data)

    if not item.infoLabels["tmdb_id"]:
        year = scrapertools.find_single_match(data, 'Lanzamiento.*?(\d{4})')

        if year != "":
            item.infoLabels['filtro'] = ""
            item.infoLabels['year'] = int(year)

            # Ampliamos datos en tmdb
            try:
                tmdb.set_infoLabels_item(item, __modo_grafico__)
            except:
                pass

    if not item.infoLabels['plot']:
        plot = scrapertools.find_single_match(data,
                                              '<p class="plot">(.*?)</p>')
        item.infoLabels['plot'] = plot

    if filtro_enlaces != 0:
        list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas,
                                      "Ver Online", item)
        if list_enlaces:
            itemlist.append(
                item.clone(action="",
                           title="Enlaces Online",
                           text_color=color1,
                           text_bold=True))
            itemlist.extend(list_enlaces)
    if filtro_enlaces != 1:
        list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas,
                                      "Descarga Directa", item)
        if list_enlaces:
            itemlist.append(
                item.clone(action="",
                           title="Enlaces Descarga",
                           text_color=color1,
                           text_bold=True))
            itemlist.extend(list_enlaces)

    # Opción "Añadir esta película a la videoteca de XBMC"
    if itemlist and item.contentType == "movie":
        contextual = config.is_xbmc()
        itemlist.append(
            item.clone(channel="trailertools",
                       title="Buscar Tráiler",
                       action="buscartrailer",
                       context="",
                       text_color="magenta",
                       contextual=contextual))
        if item.extra != "findvideos":
            if config.get_videolibrary_support():
                itemlist.append(
                    Item(channel=item.channel,
                         title="Añadir enlaces a la videoteca",
                         text_color="green",
                         filtro=True,
                         action="add_pelicula_to_library",
                         fulltitle=item.fulltitle,
                         extra="findvideos",
                         url=item.url,
                         infoLabels=item.infoLabels,
                         contentType=item.contentType,
                         contentTitle=item.contentTitle,
                         show=item.show))
    elif not itemlist and item.contentType == "movie":
        itemlist.append(
            item.clone(title="Película sin enlaces disponibles",
                       action="",
                       text_color=color3))

    return itemlist
Example #20
0
def findvideos(item):
    logger.info()
    itemlist = []

    try:
        filtro_idioma = config.get_setting("filterlanguages", item.channel)
        filtro_enlaces = config.get_setting("filterlinks", item.channel)
    except:
        filtro_idioma = 3
        filtro_enlaces = 2
    dict_idiomas = {'Español': 2, 'Latino': 1, 'Subtitulado': 0}

    # Busca el argumento
    data = httptools.downloadpage(item.url).data
    year = scrapertools.find_single_match(item.title, "\(([0-9]+)")

    if year and item.extra != "library":
        item.infoLabels['year'] = int(year)
        # Ampliamos datos en tmdb
        if not item.infoLabels['plot']:
            try:
                tmdb.set_infoLabels(item, __modo_grafico__)
            except:
                pass

    if not item.infoLabels.get('plot'):
        plot = scrapertools.find_single_match(
            data, '<div class="sinopsis"><p>(.*?)</p>')
        item.infoLabels['plot'] = plot

    if filtro_enlaces != 0:
        list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas,
                                      "online", item)
        if list_enlaces:
            itemlist.append(
                item.clone(action="",
                           title="Enlaces Online",
                           text_color=color1,
                           text_bold=True))
            itemlist.extend(list_enlaces)
    if filtro_enlaces != 1:
        list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas,
                                      "descarga", item)
        if list_enlaces:
            itemlist.append(
                item.clone(action="",
                           title="Enlaces Descarga",
                           text_color=color1,
                           text_bold=True))
            itemlist.extend(list_enlaces)

    if itemlist:
        itemlist.append(
            item.clone(channel="trailertools",
                       title="Buscar Tráiler",
                       action="buscartrailer",
                       context="",
                       text_color="magenta"))
        # Opción "Añadir esta película a la videoteca"
        if item.extra != "library":
            if config.get_videolibrary_support():
                itemlist.append(
                    Item(channel=item.channel,
                         title="Añadir a la videoteca",
                         text_color="green",
                         filtro=True,
                         action="add_pelicula_to_library",
                         url=item.url,
                         infoLabels={'title': item.fulltitle},
                         fulltitle=item.fulltitle,
                         extra="library"))

    else:
        itemlist.append(
            item.clone(title="No hay enlaces disponibles",
                       action="",
                       text_color=color3))

    return itemlist
Example #21
0
def menu_info(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)

    item.infoLabels["tmdb_id"] = scrapertools.find_single_match(data, '<a href="https://www.themoviedb.org/[^/]+/(\d+)')
    item.infoLabels["year"] = scrapertools.find_single_match(data, 'class="e_new">(\d{4})')
    item.infoLabels["plot"] = scrapertools.find_single_match(data, 'itemprop="description">([^<]+)</div>')
    item.infoLabels["genre"] = ", ".join(scrapertools.find_multiple_matches(data,
                                                                            '<a itemprop="genre"[^>]+>([^<]+)</a>'))
    if __modo_grafico__:
        tmdb.set_infoLabels_item(item, __modo_grafico__)

    action = "findvideos"
    title = "Ver enlaces"
    if item.contentType == "tvshow":
        action = "episodios"
        title = "Ver capítulos"
    itemlist.append(item.clone(action=action, title=title))

    carpeta = "CINE"
    tipo = "película"
    action = "add_pelicula_to_library"
    extra = ""
    if item.contentType == "tvshow":
        carpeta = "SERIES"
        tipo = "serie"
        action = "add_serie_to_library"
        extra = "episodios###library"

    library_path = config.get_videolibrary_path()
    if config.get_videolibrary_support():
        title = "Añadir %s a la videoteca" % tipo
        if item.infoLabels["imdb_id"] and not library_path.lower().startswith("smb://"):
            try:
                from core import filetools
                path = filetools.join(library_path, carpeta)
                files = filetools.walk(path)
                for dirpath, dirname, filename in files:
                    if item.infoLabels["imdb_id"] in dirpath:
                        namedir = dirpath.replace(path, '')[1:]
                        for f in filename:
                            if f != namedir + ".nfo" and f != "tvshow.nfo":
                                continue
                            from core import videolibrarytools
                            head_nfo, it = videolibrarytools.read_nfo(filetools.join(dirpath, f))
                            canales = it.library_urls.keys()
                            canales.sort()
                            if "playmax" in canales:
                                canales.pop(canales.index("playmax"))
                                canales.insert(0, "[COLOR red]playmax[/COLOR]")
                            title = "%s ya en tu videoteca. [%s] ¿Añadir?" % (tipo.capitalize(), ",".join(canales))
                            break
            except:
                import traceback
                logger.error(traceback.format_exc())
                pass

        itemlist.append(item.clone(action=action, title=title, text_color=color5, extra=extra))

    token_auth = config.get_setting("token_trakt", "tvmoviedb")
    if token_auth and item.infoLabels["tmdb_id"]:
        extra = "movie"
        if item.contentType != "movie":
            extra = "tv"
        itemlist.append(item.clone(channel="tvmoviedb", title="[Trakt] Gestionar con tu cuenta", action="menu_trakt",
                                   extra=extra))
    itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
                               text_color="magenta", context=""))

    itemlist.append(item.clone(action="", title=""))
    ficha = scrapertools.find_single_match(item.url, '-f(\d+)-')
    if not ficha:
        ficha = scrapertools.find_single_match(item.url, 'f=(\d+)')

    itemlist.extend(acciones_fichas(item, sid, ficha, season=True))
    itemlist.append(item.clone(action="acciones_cuenta", title="Añadir a una lista", text_color=color3, ficha=ficha))

    return itemlist
Example #22
0
def findvideos(item):
    logger.info()
    url_list = []
    itemlist = []
    duplicados = []
    data = get_source(item.url)
    src = data
    patron = 'id=(?:div|player)(\d+)>.*?<iframe src=.*? data-lazy-src=(.*?) marginheight'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for option, videoitem in matches:
        lang = scrapertools.find_single_match(src,
                                              '<a href=#(?:div|player)%s.*?>.*?(Doblado|Subtitulado)<\/a>' % option)
        data = get_source(videoitem)
        if 'play' in videoitem:
            url = scrapertools.find_single_match(data, '<span>Ver Online<.*?<li><a href=(.*?)><span class=icon>')
        else:
            url = scrapertools.find_single_match(data, '<iframe src=(.*?) scrolling=')

        url_list.append([url, lang])

    for video_url in url_list:
        language = video_url[1]
        if 'jw.miradetodo' in video_url[0]:
            data = get_source('http:' + video_url[0])
            patron = 'label:.*?(.*?),.*?file:.*?(.*?)&app.*?\}'
            matches = re.compile(patron, re.DOTALL).findall(data)

            for quality, scrapedurl in matches:
                quality = quality
                title = item.contentTitle + ' (%s) %s' % (quality, language)
                server = 'directo'
                url = scrapedurl
                url = url.replace('\/', '/')
                subtitle = scrapertools.find_single_match(data, "tracks: \[\{file: '.*?linksub=(.*?)',label")
                if url not in duplicados:
                    itemlist.append(item.clone(title=title,
                                               action='play',
                                               url=url,
                                               quality=quality,
                                               server=server,
                                               subtitle=subtitle,
                                               language=language
                                               ))
                    duplicados.append(url)
        elif video_url != '':
            itemlist.extend(servertools.find_video_items(data=video_url[0]))

        for videoitem in itemlist:
            if videoitem.server != 'directo':

                quality = item.quality
                title = item.contentTitle + ' (%s) %s' % (videoitem.server, language)
                if item.quality != '':
                    title = item.contentTitle + ' (%s) %s' % (quality, language)
                videoitem.title = title
                videoitem.channel = item.channel
                videoitem.thumbnail = config.get_thumb("server_%s.png" % videoitem.server)
                videoitem.quality = item.quality

    if item.infoLabels['mediatype'] == 'movie':
        if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
            itemlist.append(Item(channel=item.channel,
                                 title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                                 url=item.url,
                                 action="add_pelicula_to_library",
                                 extra="findvideos",
                                 contentTitle=item.contentTitle
                                 ))

    return itemlist
Example #23
0
def findvideos(item):
    logger.info()
    itemlist = []

    if not "|" in item.extra and not __menu_info__:
        data = httptools.downloadpage(item.url, add_referer=True).data
        year = scrapertools.find_single_match(
            data, '<div class="media-summary">.*?release.*?>(\d+)<')
        if year != "" and not "tmdb_id" in item.infoLabels:
            try:
                from core import tmdb
                item.infoLabels["year"] = year
                tmdb.set_infoLabels_item(item, __modo_grafico__)
            except:
                pass

        if item.infoLabels["plot"] == "":
            sinopsis = scrapertools.find_single_match(
                data, '<p id="media-plot".*?>.*?\.\.\.(.*?)Si te parece')
            item.infoLabels["plot"] = scrapertools.htmlclean(sinopsis)

    id = scrapertools.find_single_match(item.url, '/(\d+)/')
    if "|" in item.extra or not __menu_info__:
        extra = item.extra
        if "|" in item.extra:
            extra = item.extra[:-1]
        url = host + "/sources/list?id=%s&type=%s&order=%s" % (id, extra,
                                                               "streaming")
        itemlist.extend(get_enlaces(item, url, "Online"))
        url = host + "/sources/list?id=%s&type=%s&order=%s" % (id, extra,
                                                               "download")
        itemlist.extend(get_enlaces(item, url, "de Descarga"))

        if extra == "media":
            data_trailer = httptools.downloadpage(
                host + "/media/trailer?idm=%s&mediaType=1" % id).data
            trailer_url = jsontools.load(data_trailer)["video"]["url"]
            if trailer_url != "":
                item.infoLabels["trailer"] = trailer_url

            title = "Ver enlaces %s - [" + item.contentTitle + "]"
            itemlist.append(
                item.clone(channel="trailertools",
                           action="buscartrailer",
                           title="Buscar Tráiler",
                           text_color="magenta",
                           context=""))

            if config.get_videolibrary_support() and not "|" in item.extra:
                itemlist.append(
                    Item(channel=item.channel,
                         action="add_pelicula_to_library",
                         text_color=color5,
                         title="Añadir película a la videoteca",
                         url=item.url,
                         thumbnail=item.thumbnail,
                         fanart=item.fanart,
                         fulltitle=item.fulltitle,
                         extra="media|"))
    else:
        url = host + "/sources/list?id=%s&type=%s&order=%s" % (id, item.extra,
                                                               item.type)
        type = item.type.replace("streaming",
                                 "Online").replace("download", "de Descarga")
        itemlist.extend(get_enlaces(item, url, type))

    return itemlist
Example #24
0
def mainlist(item):
    logger.info()

    itemlist = list()
    itemlist.append(
        Item(channel=CHANNELNAME,
             title="Preferencias",
             action="settings",
             folder=False,
             thumbnail=config.get_thumb("thumb_setting_0.png")))

    # if config.get_setting("plugin_updates_available") == 0:
    #     nuevas = ""
    # elif config.get_setting("plugin_updates_available") == 1:
    #     nuevas = " (1 nueva)"
    # else:
    #     nuevas = " (%s nuevas)" % config.get_setting("plugin_updates_available")
    #
    # thumb_configuracion = "thumb_setting_%s.png" % config.get_setting("plugin_updates_available")
    #
    # itemlist.append(Item(channel=CHANNELNAME, title="Descargar e instalar otras versiones" + nuevas,
    #                      action="get_all_versions", folder=True,
    #                      thumbnail=config.get_thumb(thumb_configuracion)))

    itemlist.append(
        Item(channel=CHANNELNAME,
             title="",
             action="",
             folder=False,
             thumbnail=config.get_thumb("thumb_setting_0.png")))

    itemlist.append(
        Item(channel=CHANNELNAME,
             title="Ajustes especiales",
             action="",
             folder=False,
             thumbnail=config.get_thumb("thumb_setting_0.png")))
    itemlist.append(
        Item(channel=CHANNELNAME,
             title="   Ajustes de Canales",
             action="menu_channels",
             folder=True,
             thumbnail=config.get_thumb("thumb_channels.png")))
    itemlist.append(
        Item(channel=CHANNELNAME,
             title="   Ajustes de Servidores",
             action="menu_servers",
             folder=True,
             thumbnail=config.get_thumb("thumb_channels.png")))
    itemlist.append(
        Item(channel="news",
             title="   Ajustes de la sección 'Novedades'",
             action="menu_opciones",
             folder=True,
             thumbnail=config.get_thumb("thumb_news.png")))
    itemlist.append(
        Item(channel="search",
             title="   Ajustes del buscador global",
             action="opciones",
             folder=True,
             thumbnail=config.get_thumb("thumb_search.png")))
    itemlist.append(
        Item(channel=CHANNELNAME,
             title="   Ajustes de descargas",
             action="channel_config",
             config="downloads",
             folder=True,
             thumbnail=config.get_thumb("thumb_downloads.png")))

    if config.get_videolibrary_support():
        itemlist.append(
            Item(channel="videolibrary",
                 title="   Ajustes de la videoteca",
                 action="channel_config",
                 folder=True,
                 thumbnail=config.get_thumb("thumb_videolibrary.png")))

    if config.is_xbmc():
        itemlist.append(
            Item(channel=CHANNELNAME,
                 title="   Ajustes de cliente Torrent",
                 action="setting_torrent",
                 folder=True,
                 thumbnail=config.get_thumb("thumb_channels_torrent.png")))

    # itemlist.append(Item(channel=CHANNELNAME, title="   Añadir o Actualizar canal/conector desde una URL",
    #                      action="menu_addchannels"))
    itemlist.append(
        Item(channel=CHANNELNAME,
             action="",
             title="",
             folder=False,
             thumbnail=config.get_thumb("thumb_setting_0.png")))
    itemlist.append(
        Item(channel=CHANNELNAME,
             title="Otras herramientas",
             action="submenu_tools",
             folder=True,
             thumbnail=config.get_thumb("thumb_setting_0.png")))

    return itemlist
Example #25
0
def entradasconlistas(item):
    logger.info()
    itemlist = []

    data = scrapertools.anti_cloudflare(item.url, host=host, headers=headers)
    data = jsontools.load(data)
    head = header_string + get_cookie_value()
    # Si hay alguna lista
    contentSerie = False
    contentList = False
    if data['playListChilds']:
        itemlist.append(
            Item(channel=item.channel,
                 title="**LISTAS**",
                 action="",
                 text_color="red",
                 text_bold=True,
                 folder=False))
        for child in data['sortedPlaylistChilds']:
            infolabels = {}

            infolabels['plot'] = "Contiene:\n" + "\n".join(
                child['playListChilds']) + "\n".join(child['repoChilds'])
            if child['seasonNumber'] and not contentList and re.search(
                    r'(?i)temporada', child['id']):
                infolabels['season'] = child['seasonNumber']
                contentSerie = True
            else:
                contentSerie = False
                contentList = True
            title = child['id'].replace(
                '-', ' ').capitalize() + " ([COLOR gold]" + str(
                    child['number']) + "[/COLOR])"
            url = "http://tv-vip.com/json/playlist/%s/index.json" % child["id"]
            thumbnail = "http://tv-vip.com/json/playlist/%s/thumbnail.jpg" % child[
                "id"]
            if child['hashBackground']:
                fanart = "http://tv-vip.com/json/playlist/%s/background.jpg" % child[
                    "id"]
            else:
                fanart = "http://tv-vip.com/json/playlist/%s/thumbnail.jpg" % child[
                    "id"]

            thumbnail += head
            fanart += head
            itemlist.append(
                Item(channel=item.channel,
                     action="entradasconlistas",
                     title=bbcode_kodi2html(title),
                     url=url,
                     thumbnail=thumbnail,
                     fanart=fanart,
                     fulltitle=child['id'],
                     infoLabels=infolabels,
                     viewmode="movie_with_plot"))
    else:
        contentList = True
    if data["sortedRepoChilds"] and len(itemlist) > 0:
        itemlist.append(
            Item(channel=item.channel,
                 title="**VÍDEOS**",
                 action="",
                 text_color="blue",
                 text_bold=True,
                 folder=False))

    for child in data["sortedRepoChilds"]:
        infolabels = {}

        infolabels['plot'] = child['description']
        infolabels['year'] = data['year']
        if child['tags']:
            infolabels['genre'] = ', '.join([x.strip() for x in child['tags']])
        infolabels['rating'] = child['rate'].replace(',', '.')
        infolabels['votes'] = child['rateCount']
        infolabels['duration'] = child['duration']
        if child['cast']: infolabels['cast'] = child['cast'].split(",")
        infolabels['director'] = child['director']
        url = "http://tv-vip.com/json/repo/%s/index.json" % child["id"]
        # Fanart
        if child['hashBackground']:
            fanart = "http://tv-vip.com/json/repo/%s/background.jpg" % child[
                "id"]
        else:
            fanart = "http://tv-vip.com/json/repo/%s/thumbnail.jpg" % child[
                "id"]
        # Thumbnail
        if child['hasPoster']:
            thumbnail = "http://tv-vip.com/json/repo/%s/poster.jpg" % child[
                "id"]
        else:
            thumbnail = fanart
        thumbnail += head
        fanart += head
        if child['height'] < 720:
            quality = "[B]  [SD][/B]"
        elif child['height'] < 1080:
            quality = "[B]  [720p][/B]"
        elif child['height'] >= 1080:
            quality = "[B]  [1080p][/B]"
        fulltitle = unicodedata.normalize('NFD', unicode(child['name'], 'utf-8')).encode('ASCII', 'ignore') \
            .decode("utf-8")
        if child['name'] == "":
            title = child['id'].rsplit(".", 1)[0]
        else:
            title = child['name']
        if child['year']:
            title += " (" + child['year'] + ")"
        title += quality

        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 title=bbcode_kodi2html(title),
                 url=url,
                 thumbnail=thumbnail,
                 fanart=fanart,
                 fulltitle=fulltitle,
                 infoLabels=infolabels,
                 contentTitle=fulltitle,
                 context="05",
                 viewmode="movie_with_plot",
                 folder=True))

    # Se añade item para añadir la lista de vídeos a la videoteca
    if data['sortedRepoChilds'] and len(itemlist) > 0 and contentList:
        if config.get_videolibrary_support():
            itemlist.append(
                Item(channel=item.channel,
                     text_color="green",
                     title="Añadir esta lista a la videoteca",
                     url=item.url,
                     action="listas"))
    elif contentSerie:
        if config.get_videolibrary_support():
            itemlist.append(
                Item(channel=item.channel,
                     title="Añadir esta serie a la videoteca",
                     url=item.url,
                     action="series_library",
                     fulltitle=data['name'],
                     show=data['name'],
                     text_color="green"))

    return itemlist
Example #26
0
def episodios(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = data.replace("\n", "").replace("\t", "")

    plot = scrapertools.find_single_match(data, '<p><p>(.*?)</p>')
    item.plot = scrapertools.htmlclean(plot)
    bloque = scrapertools.find_multiple_matches(
        data, '<td data-th="Temporada"(.*?)</div>')
    for match in bloque:
        matches = scrapertools.find_multiple_matches(
            match, '.*?href="([^"]+)".*?title="([^"]+)"')
        for scrapedurl, scrapedtitle in matches:
            try:
                season, episode = scrapertools.find_single_match(
                    scrapedtitle, '(\d+)(?:×|x)(\d+)')
                item.infoLabels['season'] = season
                item.infoLabels['episode'] = episode
                contentType = "episode"
            except:
                try:
                    episode = scrapertools.find_single_match(
                        scrapedtitle,
                        '(?i)(?:Capitulo|Capítulo|Episodio)\s*(\d+)')
                    item.infoLabels['season'] = "1"
                    item.infoLabels['episode'] = episode
                    contentType = "episode"
                except:
                    contentType = "tvshow"

            scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle) + "  "
            scrapedtitle = scrapedtitle.replace('Temporada', '')
            if "ES.png" in match:
                scrapedtitle += "[CAST]"
            if "SUB.png" in match:
                scrapedtitle += "[VOSE]"
            if "LA.png" in match:
                scrapedtitle += "[LAT]"
            if "EN.png" in match:
                scrapedtitle += "[V.O]"

            itemlist.append(
                item.clone(action="findvideos",
                           title=scrapedtitle,
                           url=scrapedurl,
                           fulltitle=scrapedtitle,
                           contentType=contentType))

    itemlist.reverse()
    if itemlist and item.extra != "episodios":
        try:
            from core import tmdb
            tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
        except:
            pass
        itemlist.append(
            item.clone(channel="trailertools",
                       title="Buscar Tráiler",
                       action="buscartrailer",
                       context="",
                       text_color="magenta"))
        if item.category != "" and config.get_videolibrary_support():
            itemlist.append(
                Item(channel=item.channel,
                     title="Añadir esta temporada a la videoteca",
                     url=item.url,
                     action="add_serie_to_library",
                     extra="episodios",
                     text_color="green",
                     show=item.show))

    return itemlist
Example #27
0
def episodios(item):
    logger.info()
    logger.info("categoriaaa es " + item.tostring())
    itemlist = []
    # Redirección para actualización de videoteca
    if item.extra == "series_library":
        itemlist = series_library(item)
        return itemlist

    data = scrapertools.anti_cloudflare(item.url, host=host, headers=headers)
    data = jsontools.load(data)
    head = header_string + get_cookie_value()
    # Se prueba un método u otro porque algunas series no están bien listadas
    if data["sortedRepoChilds"]:
        for child in data["sortedRepoChilds"]:
            if item.infoLabels:
                item.infoLabels['duration'] = str(child['duration'])
                item.infoLabels['season'] = str(data['seasonNumber'])
                item.infoLabels['episode'] = str(child['episode'])
                item.infoLabels['mediatype'] = "episode"
            contentTitle = item.fulltitle + "|" + str(
                data['seasonNumber']) + "|" + str(child['episode'])
            # En caso de venir del apartado nuevos capítulos se redirige a la función series para mostrar los demás
            if item.title == "Nuevos Capítulos":
                url = "http://tv-vip.com/json/playlist/%s/index.json" % child[
                    "season"]
                action = "series"
                extra = "new"
            else:
                url = "http://tv-vip.com/json/repo/%s/index.json" % child["id"]
                action = "findvideos"
                extra = ""
            if child['hasPoster']:
                thumbnail = "http://tv-vip.com/json/repo/%s/poster.jpg" % child[
                    "id"]
            else:
                thumbnail = "http://tv-vip.com/json/repo/%s/thumbnail.jpg" % child[
                    "id"]
            thumbnail += head
            try:
                title = fulltitle = child['name'].rsplit(
                    " ", 1)[0] + " - " + child['name'].rsplit(" ", 1)[1]
            except:
                title = fulltitle = child['id']
            itemlist.append(
                item.clone(action=action,
                           server="",
                           title=title,
                           url=url,
                           thumbnail=thumbnail,
                           fanart=item.fanart,
                           fulltitle=fulltitle,
                           contentTitle=contentTitle,
                           context="35",
                           viewmode="movie",
                           extra=extra,
                           show=item.fulltitle,
                           folder=True))
    else:
        for child in data["repoChilds"]:
            url = "http://tv-vip.com/json/repo/%s/index.json" % child
            if data['hasPoster']:
                thumbnail = "http://tv-vip.com/json/repo/%s/poster.jpg" % child
            else:
                thumbnail = "http://tv-vip.com/json/repo/%s/thumbnail.jpg" % child
            thumbnail += head
            title = fulltitle = child.capitalize().replace('_', ' ')
            itemlist.append(
                item.clone(action="findvideos",
                           server="",
                           title=title,
                           url=url,
                           thumbnail=thumbnail,
                           fanart=item.fanart,
                           fulltitle=fulltitle,
                           contentTitle=item.fulltitle,
                           context="25",
                           show=item.fulltitle,
                           folder=True))

    # Opción de añadir a la videoteca en casos de series de una única temporada
    if len(
            itemlist
    ) > 0 and not "---" in item.title and item.title != "Nuevos Capítulos":
        if config.get_videolibrary_support() and item.show == "":
            if "-" in item.title:
                show = item.title.split('-')[0]
            else:
                show = item.title.split('(')[0]
            itemlist.append(
                Item(channel=item.channel,
                     title="Añadir esta serie a la videoteca",
                     text_color="green",
                     url=item.url,
                     action="add_serie_to_library",
                     show=show,
                     extra="series_library"))
    return itemlist
Example #28
0
def entradasconlistas(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = jsontools.load(data)

    # Si hay alguna lista
    contentSerie = False
    contentList = False
    if data.get('b'):
        for child in data['b']:
            infolabels = {}

            infolabels['originaltitle'] = child['originalTitle']
            infolabels['plot'] = child['description']
            infolabels['year'] = data['year']
            if child.get('tags'):
                infolabels['genre'] = ', '.join(
                    [x.strip() for x in child['tags']])
            infolabels['rating'] = child['rateHuman'].replace(',', '.')
            infolabels['votes'] = child['rateCount']
            if child.get('runtime'):
                try:
                    infolabels['duration'] = int(child['runtime'].replace(
                        " min.", "")) * 60
                except:
                    pass
            if child.get('cast'): infolabels['cast'] = child['cast'].split(",")
            infolabels['director'] = child['director']
            season = child.get('season', '')
            if season.isdigit() and not contentList:
                contentSerie = True
                action = "episodios"
            else:
                contentSerie = False
                contentList = True
                action = "entradasconlistas"

            url = host % "list/%s" % child["id"] + ext
            title = re.sub(r"(\w)-(\w)", '\g<1> \g<2>', child['name'])
            fulltitle = re.sub(r"(\w)-(\w)", '\g<1> \g<2>', child['name'])
            if not title:
                title = re.sub(r"(\w)-(\w)", '\g<1> \g<2>', child['id'])
                fulltitle = re.sub(r"(\w)-(\w)", '\g<1> \g<2>', child['id'])
            title = unicode(title, "utf-8").capitalize().encode("utf-8")
            fulltitle = unicode(fulltitle,
                                "utf-8").capitalize().encode("utf-8")
            show = ""
            if contentSerie:
                title += " (Serie TV)"
                show = fulltitle
            thumbnail = host % "list/%s/thumbnail_167x250.jpg" % child["id"]
            fanart = host % "list/%s/background_1080.jpg" % child["id"]

            itemlist.append(
                Item(channel=item.channel,
                     action=action,
                     title=title,
                     url=url,
                     thumbnail=thumbnail,
                     fanart=fanart,
                     fulltitle=fulltitle,
                     show=show,
                     infoLabels=infolabels,
                     contentTitle=fulltitle,
                     viewmode="movie_with_plot",
                     text_color=color3))
    else:
        contentList = True

    if contentSerie and itemlist:
        itemlist.sort(key=lambda it: it.infoLabels['season'], reverse=True)

    if itemlist:
        itemlist.insert(
            0,
            Item(channel=item.channel,
                 title="**LISTAS**",
                 action="",
                 text_color=color4,
                 text_bold=True,
                 thumbnail=item.thumbnail,
                 fanart=item.fanart))

    if data.get("a") and itemlist:
        itemlist.append(
            Item(channel=item.channel,
                 title="**VÍDEOS**",
                 action="",
                 text_color=color6,
                 text_bold=True,
                 thumbnail=item.thumbnail,
                 fanart=item.fanart))

    for child in data.get("a", []):
        infolabels = {}

        infolabels['originaltitle'] = child['originalTitle']
        infolabels['plot'] = child['description']
        infolabels['year'] = data['year']
        if child.get('tags'):
            infolabels['genre'] = ', '.join([x.strip() for x in child['tags']])
        infolabels['rating'] = child['rateHuman'].replace(',', '.')
        infolabels['votes'] = child['rateCount']
        if child.get('runtime'):
            try:
                infolabels['duration'] = int(child['runtime'].replace(
                    " min.", "")) * 60
            except:
                pass
        if child.get('cast'): infolabels['cast'] = child['cast'].split(",")
        infolabels['director'] = child['director']
        url = host % "movie/%s/movie.js" % child["id"]
        # Fanart
        fanart = host % "movie/%s/background_1080.jpg" % child["id"]
        if child.get("episode"):
            thumbnail = host % "movie/%s/thumbnail.jpg" % child["id"]
        else:
            thumbnail = host % "movie/%s/poster_167x250.jpg" % child["id"]

        if child['height'] < 720:
            quality = "[B]  [SD][/B]"
        elif child['height'] < 1080:
            quality = "[B]  [720p][/B]"
        elif child['height'] >= 1080:
            quality = "[B]  [1080p][/B]"
        fulltitle = unicodedata.normalize('NFD', unicode(child['name'], 'utf-8')).encode('ASCII', 'ignore') \
            .decode("utf-8")
        if not child['name']:
            title = child['id'].rsplit(".", 1)[0]
        else:
            title = child['name']
        if child['year']:
            title += " (" + child['year'] + ")"
        title += quality

        video_urls = []
        for k, v in child.get("video", {}).items():
            for vid in v:
                video_urls.append([
                    "http://%s.pelisipad.com/s/transcoder/%s" %
                    (vid["server"], vid["url"]) + "?%s", vid["height"]
                ])

        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 title=title,
                 url=url,
                 video_urls=video_urls,
                 thumbnail=thumbnail,
                 fanart=fanart,
                 fulltitle=fulltitle,
                 infoLabels=infolabels,
                 contentTitle=fulltitle,
                 viewmode="movie_with_plot",
                 text_color=color3))

    # Se añade item para añadir la lista de vídeos a la videoteca
    if data.get(
            'a'
    ) and itemlist and contentList and config.get_videolibrary_support():
        itemlist.append(
            Item(channel=item.channel,
                 text_color=color5,
                 title="Añadir esta lista a la videoteca",
                 url=item.url,
                 action="listas"))
    elif contentSerie and config.get_videolibrary_support():
        itemlist.append(
            Item(channel=item.channel,
                 title="Añadir esta serie a la videoteca",
                 text_color=color5,
                 url=item.url,
                 action="add_serie_to_library",
                 show=item.show,
                 fulltitle=item.fulltitle,
                 extra="episodios"))

    return itemlist
Example #29
0
def episodios(item):
    logger.info()
    itemlist = []

    # Descarga la página
    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)

    if not item.infoLabels["tmdb_id"]:
        item.infoLabels["tmdb_id"] = scrapertools.find_single_match(data,
                                                                    '<a href="https://www.themoviedb.org/[^/]+/(\d+)')
        item.infoLabels["year"] = scrapertools.find_single_match(data, 'class="e_new">(\d{4})')
    if not item.infoLabels["genre"]:
        item.infoLabels["genre"] = ", ".join(scrapertools.find_multiple_matches(data,
                                                                                '<a itemprop="genre"[^>]+>([^<]+)</a>'))
    if not item.infoLabels["plot"]:
        item.infoLabels["plot"] = scrapertools.find_single_match(data, 'itemprop="description">([^<]+)</div>')

    dc = scrapertools.find_single_match(data, "var dc_ic = '\?dc=([^']+)'")
    patron = '<div class="f_cl_l_c f_cl_l_c_id[^"]+" c_id="([^"]+)" .*?c_num="([^"]+)" c_name="([^"]+)"' \
             '.*?load_f_links\(\d+\s*,\s*(\d+).*?<div class="([^"]+)" onclick="marcar_capitulo'
    matches = scrapertools.find_multiple_matches(data, patron)
    lista_epis = []
    for c_id, episodio, title, ficha, status in matches:
        episodio = episodio.replace("X", "x")
        if episodio in lista_epis:
            continue
        lista_epis.append(episodio)
        url = "https://playmax.mx/c_enlaces_n.php?ficha=%s&c_id=%s&dc=%s" % (ficha, c_id, dc)
        title = "%s - %s" % (episodio, title)
        if "_mc a" in status:
            title = "[COLOR %s]%s[/COLOR] %s" % (color5, u"\u0474".encode('utf-8'), title)

        new_item = Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=item.thumbnail,
                        fanart=item.fanart, show=item.show, infoLabels=item.infoLabels, text_color=color2,
                        referer=item.url, contentType="episode")
        try:
            new_item.infoLabels["season"], new_item.infoLabels["episode"] = episodio.split('x', 1)
        except:
            pass
        itemlist.append(new_item)

    itemlist.sort(key=lambda it: (it.infoLabels["season"], it.infoLabels["episode"]), reverse=True)
    if __modo_grafico__:
        tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)

    library_path = config.get_videolibrary_path()
    if config.get_videolibrary_support() and not item.extra:
        title = "Añadir serie a la videoteca"
        if item.infoLabels["imdb_id"] and not library_path.lower().startswith("smb://"):
            try:
                from core import filetools
                path = filetools.join(library_path, "SERIES")
                files = filetools.walk(path)
                for dirpath, dirname, filename in files:
                    if item.infoLabels["imdb_id"] in dirpath:
                        for f in filename:
                            if f != "tvshow.nfo":
                                continue
                            from core import videolibrarytools
                            head_nfo, it = videolibrarytools.read_nfo(filetools.join(dirpath, dirname, f))
                            canales = it.library_urls.keys()
                            canales.sort()
                            if "playmax" in canales:
                                canales.pop(canales.index("playmax"))
                                canales.insert(0, "[COLOR red]playmax[/COLOR]")
                            title = "Serie ya en tu videoteca. [%s] ¿Añadir?" % ",".join(canales)
                            break
            except:
                import traceback
                logger.error(traceback.format_exc())
                pass

        itemlist.append(item.clone(action="add_serie_to_library", title=title, text_color=color5,
                                   extra="episodios###library"))
    if itemlist and not __menu_info__:
        ficha = scrapertools.find_single_match(item.url, '-f(\d+)-')
        itemlist.extend(acciones_fichas(item, sid, ficha))

    return itemlist
Example #30
0
def findvideos(item):
    logger.info()

    itemlist = []
    langs = dict()

    data = httptools.downloadpage(item.url).data
    logger.debug('data: %s' % data)
    patron = '<a onclick="return (play\d+).*?;"> (.*?) <\/a>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for key, value in matches:
        langs[key] = value.strip()

    patron = 'function (play\d).*?servidores.*?attr.*?src.*?\+([^;]+);'
    matches = re.compile(patron, re.DOTALL).findall(data)
    title = item.title
    enlace = scrapertools.find_single_match(
        data,
        'var e20fb341325556c0fc0145ce10d08a970538987 =.*?"\/your\.".*?"([^"]+)"'
    )

    for scrapedlang, encurl in matches:

        if 'e20fb34' in encurl:
            url = dec(encurl)
            url = url + enlace

        else:
            url = dec(encurl)
        title = ''
        server = ''
        servers = {
            '/opl': 'openload',
            '/your': 'yourupload',
            '/sen': 'senvid',
            '/face': 'netutv',
            '/vk': 'vk'
        }
        server_id = re.sub(r'.*?embed|\.php.*', '', url)
        if server_id and server_id in servers:
            server = servers[server_id]
        logger.debug('server_id: %s' % server_id)

        if langs[scrapedlang] in list_language:
            language = IDIOMAS[langs[scrapedlang]]
        else:
            language = 'Latino'
        if langs[scrapedlang] == 'Latino':
            idioma = '[COLOR limegreen]LATINO[/COLOR]'
        elif langs[scrapedlang] == 'Sub Español':
            idioma = '[COLOR red]SUB[/COLOR]'

        if item.extra == 'peliculas':
            title = item.contentTitle + ' (' + server + ') ' + idioma
            plot = scrapertools.find_single_match(data, '<p>([^<]+)<\/p>')
        else:
            title = item.contentSerieName + ' (' + server + ') ' + idioma
            plot = item.plot

        thumbnail = servertools.guess_server_thumbnail(title)

        if 'player' not in url and 'php' in url:
            itemlist.append(
                item.clone(title=title,
                           url=url,
                           action="play",
                           plot=plot,
                           thumbnail=thumbnail,
                           server=server,
                           quality='',
                           language=language))
        logger.debug('url: %s' % url)
    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if config.get_videolibrary_support(
    ) and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(channel=item.channel,
                 title=
                 '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 contentTitle=item.contentTitle))

    return itemlist