Esempio n. 1
0
def busqueda(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = json.xmlTojson(None, data)

    for f in data["Data"]["Fichas"]["Ficha"]:
        f['Title'] = scrapertools.find_single_match(f['Title'],
                                                    '<!\[CDATA\[(.+?)\]\]>')
        title = "%s  (%s)" % (f["Title"], f["Year"])
        infolab = {'year': f["Year"]}
        thumbnail = f["Poster"]
        url = "%s/ficha.php?f=%s" % (host, f["Id"])
        action = "findvideos"
        if __menu_info__:
            action = "menu_info"
        if f["IsSerie"] == "1":
            tipo = "tvshow"
            show = f["Title"]
            if not __menu_info__:
                action = "episodios"
        else:
            tipo = "movie"
            show = ""

        itemlist.append(
            Item(channel=item.channel,
                 action=action,
                 title=title,
                 url=url,
                 text_color=color2,
                 contentTitle=f["Title"],
                 show=show,
                 contentType=tipo,
                 infoLabels=infolab,
                 thumbnail=thumbnail))

    if __modo_grafico__:
        tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)

    total = int(data["Data"]["totalResultsFichas"])
    actualpage = int(scrapertools.find_single_match(item.url, "start=(\d+)"))
    if actualpage + 20 < total:
        next_page = item.url.replace("start=%s" % actualpage,
                                     "start=%s" % (actualpage + 20))
        itemlist.append(
            Item(channel=item.channel,
                 action="busqueda",
                 title=">> Página Siguiente",
                 url=next_page,
                 thumbnail=item.thumbnail))

    return itemlist
Esempio n. 2
0
def acciones_cuenta(item):
    logger.info()
    itemlist = []

    if "Tus fichas" in item.title:
        itemlist.append(
            item.clone(title="Capítulos",
                       url="tf_block_c a",
                       contentType="tvshow"))
        itemlist.append(
            item.clone(title="Series", url="tf_block_s", contentType="tvshow"))
        itemlist.append(item.clone(title="Películas", url="tf_block_p"))
        itemlist.append(item.clone(title="Documentales", url="tf_block_d"))
        return itemlist
    elif "Añadir a una lista" in item.title:
        data = httptools.downloadpage(host + "/c_listas.php?apikey=%s&sid=%s" %
                                      (apikey, sid)).data
        data = json.xmlTojson(None, data)
        itemlist.append(item.clone(title="Crear nueva lista", folder=False))
        if data["Data"]["TusListas"] != "\t":
            import random
            data = data["Data"]["TusListas"]["Item"]
            if type(data) is not list:
                data = [data]
            for child in data:
                image = ""

                child['Title'] = scrapertools.find_single_match(
                    child['Title'], '<!\[CDATA\[(.+?)\]\]>')
                title = "%s (%s fichas)" % (child["Title"],
                                            child["FichasInList"])
                images = []
                for i in range(1, 5):
                    if "sinimagen.png" not in child["Poster%s" % i]:
                        images.append(child["Poster%s" % i].replace(
                            "/100/", "/400/"))
                if images:
                    image = images[random.randint(0, len(images) - 1)]
                url = host + "/data.php?mode=add_listas&apikey=%s&sid=%s&ficha_id=%s" % (
                    apikey, sid, item.ficha)
                post = "lista_id[]=%s" % child["Id"]
                itemlist.append(
                    item.clone(title=title,
                               url=url,
                               post=post,
                               thumbnail=image,
                               folder=False))

        return itemlist
    elif "Crear nueva lista" in item.title:
        from platformcode import platformtools
        nombre = platformtools.dialog_input(
            "", "Introduce un nombre para la lista")
        if nombre:
            dict_priv = {0: 'Pública', 1: 'Privada'}
            priv = platformtools.dialog_select("Privacidad de la lista",
                                               ['Pública', 'Privada'])
            if priv != -1:
                url = host + "/data.php?mode=create_list&apikey=%s&sid=%s" % (
                    apikey, sid)
                post = "name=%s&private=%s" % (nombre, priv)
                data = httptools.downloadpage(url, post)
                platformtools.dialog_notification(
                    "Lista creada correctamente",
                    "Nombre: %s - %s" % (nombre, dict_priv[priv]))
                platformtools.itemlist_refresh()
        return
    elif re.search(r"(?i)Seguir Lista", item.title):
        from platformcode import platformtools
        data = httptools.downloadpage(item.url)
        platformtools.dialog_notification("Operación realizada con éxito",
                                          "Lista: %s" % item.lista)
        return
    elif item.post:
        from platformcode import platformtools
        data = httptools.downloadpage(item.url, item.post).data
        platformtools.dialog_notification("Ficha añadida a la lista",
                                          "Lista: %s" % item.title)
        platformtools.itemlist_refresh()
        return

    data = httptools.downloadpage("https://playmax.mx/tusfichas.php").data
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)

    bloque = scrapertools.find_single_match(
        data,
        item.url + '">(.*?)(?:<div class="tf_blocks|<div class="tf_o_move">)')
    matches = scrapertools.find_multiple_matches(
        bloque, '<div class="tf_menu_mini">([^<]+)<(.*?)<cb></cb></div>')
    for category, contenido in matches:
        itemlist.append(
            item.clone(action="", title=category, text_color=color3))

        patron = '<div class="c_fichas_image">.*?href="\.([^"]+)".*?src="\.([^"]+)".*?serie="([^"]*)".*?' \
                 '<div class="c_fichas_title">(?:<div class="c_fichas_episode">([^<]+)</div>|)([^<]+)</div>'
        entradas = scrapertools.find_multiple_matches(contenido, patron)
        for scrapedurl, scrapedthumbnail, serie, episodio, scrapedtitle in entradas:
            tipo = "movie"
            scrapedurl = host + scrapedurl
            scrapedthumbnail = host + scrapedthumbnail
            action = "findvideos"
            if __menu_info__:
                action = "menu_info"
            if serie:
                tipo = "tvshow"
            if episodio:
                title = "      %s - %s" % (episodio.replace("X",
                                                            "x"), scrapedtitle)
            else:
                title = "      " + scrapedtitle

            new_item = Item(channel=item.channel,
                            action=action,
                            title=title,
                            url=scrapedurl,
                            thumbnail=scrapedthumbnail,
                            contentTitle=scrapedtitle,
                            contentType=tipo,
                            text_color=color2)
            if new_item.contentType == "tvshow":
                new_item.show = scrapedtitle
                if not __menu_info__:
                    new_item.action = "episodios"

            itemlist.append(new_item)

    return itemlist
Esempio n. 3
0
def acciones_fichas(item, sid, ficha, season=False):
    marcarlist = []
    new_item = item.clone()
    new_item.infoLabels.pop("duration", None)
    estados = [{
        'following': 'seguir'
    }, {
        'favorite': 'favorita'
    }, {
        'view': 'vista'
    }, {
        'slope': 'pendiente'
    }]
    url = "https://playmax.mx/ficha.php?apikey=%s&sid=%s&f=%s" % (apikey, sid,
                                                                  ficha)
    data = httptools.downloadpage(url).data
    data = json.xmlTojson(None, data)

    try:
        marked = data["Data"]["User"]["Marked"]
        if new_item.contentType == "episode":
            for epi in data["Data"]["Episodes"][
                    "Season_%s" % new_item.infoLabels["season"]]["Item"]:
                if int(epi["Episode"]) == new_item.infoLabels["episode"]:
                    epi_marked = epi["EpisodeViewed"].replace("yes", "ya")
                    epi_id = epi["Id"]
                    marcarlist.append(
                        new_item.clone(action="marcar",
                                       title="Capítulo %s visto. ¿Cambiar?" %
                                       epi_marked,
                                       text_color=color3,
                                       epi_id=epi_id))
                    break
    except:
        pass

    try:
        tipo = new_item.contentType.replace("movie", "Película").replace(
            "episode", "Serie").replace("tvshow", "Serie")
        for status in estados:
            for k, v in status.items():
                if k != marked:
                    title = "Marcar %s como %s" % (tipo.lower(), v)
                    action = "marcar"
                else:
                    title = "%s marcada como %s" % (tipo, v)
                    action = ""
                if k == "following" and tipo == "Película":
                    continue
                elif k == "following" and tipo == "Serie":
                    title = title.replace("seguir", "seguida")
                    if k != marked:
                        title = "Seguir serie"
                        action = "marcar"
                    marcarlist.insert(
                        1,
                        new_item.clone(action=action,
                                       title=title,
                                       text_color=color4,
                                       ficha=ficha,
                                       folder=False))
                    continue

                marcarlist.append(
                    new_item.clone(action="marcar",
                                   title=title,
                                   text_color=color3,
                                   ficha=ficha,
                                   folder=False))
    except:
        pass

    try:
        if season and item.contentType == "tvshow":
            seasonlist = []
            for k, v in data["Data"]["Episodes"].items():
                vistos = False
                season = k.rsplit("_", 1)[1]
                if type(v) is str:
                    continue
                elif type(v["Item"]) is not list:
                    v["Item"] = [v["Item"]]

                for epi in v["Item"]:
                    if epi["EpisodeViewed"] == "no":
                        vistos = True
                        seasonlist.append(
                            new_item.clone(
                                action="marcar",
                                title="Marcar temporada %s como vista" %
                                season,
                                text_color=color1,
                                season=int(season),
                                ficha=ficha,
                                folder=False))
                        break

                if not vistos:
                    seasonlist.append(
                        new_item.clone(
                            action="marcar",
                            title="Temporada %s ya vista. ¿Revertir?" % season,
                            text_color=color1,
                            season=int(season),
                            ficha=ficha,
                            folder=False))

            seasonlist.sort(key=lambda it: it.season, reverse=True)
            marcarlist.extend(seasonlist)
    except:
        pass
    return marcarlist
Esempio n. 4
0
def findvideos(item):
    logger.info()
    itemlist = []

    if item.contentType == "movie":
        # Descarga la página
        data = httptools.downloadpage(item.url).data
        data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)

        if not item.infoLabels["tmdb_id"]:
            item.infoLabels["tmdb_id"] = scrapertools.find_single_match(
                data, '<a href="https://www.themoviedb.org/'
                '[^/]+/(\d+)')
            item.infoLabels["year"] = scrapertools.find_single_match(
                data, 'class="e_new">(\d{4})')

        if __modo_grafico__:
            tmdb.set_infoLabels_item(item, __modo_grafico__)
        if not item.infoLabels["plot"]:
            item.infoLabels["plot"] = scrapertools.find_single_match(
                data, 'itemprop="description">([^<]+)</div>')
        if not item.infoLabels["genre"]:
            item.infoLabels["genre"] = ", ".join(
                scrapertools.find_multiple_matches(
                    data, '<a itemprop="genre"[^>]+>'
                    '([^<]+)</a>'))

        ficha = scrapertools.find_single_match(item.url, '-f(\d+)-')
        if not ficha:
            ficha = scrapertools.find_single_match(item.url, 'f=(\d+)')
        cid = "0"
    else:
        ficha, cid = scrapertools.find_single_match(item.url,
                                                    'ficha=(\d+)&c_id=(\d+)')

    url = "https://playmax.mx/c_enlaces_n.php?apikey=%s&sid=%s&ficha=%s&cid=%s" % (
        apikey, sid, ficha, cid)
    data = httptools.downloadpage(url).data
    data = json.xmlTojson(None, data)

    for k, v in data["Data"].items():
        try:
            if type(v) is dict:
                if k == "Online":
                    order = 1
                elif k == "Download":
                    order = 0
                else:
                    order = 2

                itemlist.append(
                    item.clone(action="",
                               title=k,
                               text_color=color3,
                               order=order))
                if type(v["Item"]) is str:
                    continue
                elif type(v["Item"]) is dict:
                    v["Item"] = [v["Item"]]
                for it in v["Item"]:
                    try:
                        thumbnail = "%s/styles/prosilver/imageset/%s.png" % (
                            host, it['Host'])
                        title = "   %s - %s/%s" % (it['Host'].capitalize(),
                                                   it['Quality'], it['Lang'])
                        calidad = int(
                            scrapertools.find_single_match(
                                it['Quality'], '(\d+)p'))
                        calidadaudio = it['QualityA'].replace("...", "")
                        subtitulos = it['Subtitles'].replace(
                            "Sin subtítulos", "")
                        if subtitulos:
                            title += " (%s)" % subtitulos
                        if calidadaudio:
                            title += "  [Audio:%s]" % calidadaudio

                        likes = 0
                        if it["Likes"] != "0" or it["Dislikes"] != "0":
                            likes = int(it["Likes"]) - int(it["Dislikes"])
                            title += "  (%s ok, %s ko)" % (it["Likes"],
                                                           it["Dislikes"])
                        if type(it["Url"]) is dict:
                            for i, enlace in enumerate(it["Url"]["Item"]):
                                titulo = title + "  (Parte %s)" % (i + 1)
                                itemlist.append(
                                    item.clone(title=titulo,
                                               url=enlace,
                                               action="play",
                                               calidad=calidad,
                                               thumbnail=thumbnail,
                                               order=order,
                                               like=likes,
                                               ficha=ficha,
                                               cid=cid,
                                               folder=False))
                        else:
                            url = it["Url"]
                            itemlist.append(
                                item.clone(title=title,
                                           url=url,
                                           action="play",
                                           calidad=calidad,
                                           thumbnail=thumbnail,
                                           order=order,
                                           like=likes,
                                           ficha=ficha,
                                           cid=cid,
                                           folder=False))
                    except:
                        pass
        except:
            pass

    if not config.get_setting("order_web", "playmax"):
        itemlist.sort(key=lambda it: (it.order, it.calidad, it.like),
                      reverse=True)
    else:
        itemlist.sort(key=lambda it: it.order, reverse=True)
    if itemlist:
        itemlist.extend(acciones_fichas(item, sid, ficha))

    if not itemlist and item.contentType != "movie":
        url = url.replace("apikey=%s&" % apikey, "")
        data = httptools.downloadpage(url).data
        data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)

        patron = '<div id="f_fde_c"[^>]+>(.*?update_fecha\(\d+\)">)</div>'
        estrenos = scrapertools.find_multiple_matches(data, patron)
        for info in estrenos:
            info = "Estreno en " + scrapertools.htmlclean(info)
            itemlist.append(item.clone(action="", title=info))

    if not itemlist:
        itemlist.append(
            item.clone(action="", title="No hay enlaces disponibles"))

    return itemlist
Esempio n. 5
0
def listas(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = json.xmlTojson(None, data)
    if item.extra == "listas":
        itemlist.append(
            Item(channel=item.channel,
                 title="Listas más seguidas",
                 action="listas",
                 text_color=color1,
                 url=item.url + "&orden=1",
                 extra="listas_plus"))
        itemlist.append(
            Item(channel=item.channel,
                 title="Listas con más fichas",
                 action="listas",
                 text_color=color1,
                 url=item.url + "&orden=2",
                 extra="listas_plus"))
        itemlist.append(
            Item(channel=item.channel,
                 title="Listas aleatorias",
                 action="listas",
                 text_color=color1,
                 url=item.url + "&orden=3",
                 extra="listas_plus"))
        if data["Data"]["ListasSiguiendo"] != "\t":
            itemlist.append(
                Item(channel=item.channel,
                     title="Listas que sigo",
                     action="listas",
                     text_color=color1,
                     url=item.url,
                     extra="sigo"))
        if data["Data"]["TusListas"] != "\t":
            itemlist.append(
                Item(channel=item.channel,
                     title="Mis listas",
                     action="listas",
                     text_color=color1,
                     url=item.url,
                     extra="mislistas"))

        return itemlist

    elif item.extra == "sigo":
        data = data["Data"]["ListasSiguiendo"]["Item"]
    elif item.extra == "mislistas":
        data = data["Data"]["TusListas"]["Item"]
    else:
        data = data["Data"]["Listas"]["Item"]

    if type(data) is not list:
        data = [data]
    import random
    for child in data:
        image = ""

        child['Title'] = scrapertools.find_single_match(
            child['Title'], '<!\[CDATA\[(.+?)\]\]>')
        title = "%s (%s fichas)" % (child["Title"], child["FichasInList"])
        images = []
        for i in range(1, 5):
            if "sinimagen.png" not in child["Poster%s" % i]:
                images.append(child["Poster%s" % i].replace("/100/", "/400/"))
        if images:
            image = images[random.randint(0, len(images) - 1)]
        url = host + "/l%s" % child["Id"]
        itemlist.append(
            Item(channel=item.channel,
                 action="fichas",
                 url=url,
                 text_color=color3,
                 thumbnail=image,
                 title=title,
                 extra=item.extra))

    if len(itemlist) == 20:
        start = scrapertools.find_single_match(item.url, 'start=(\d+)')
        end = int(start) + 20
        url = re.sub(r'start=%s' % start, 'start=%s' % end, item.url)
        itemlist.append(item.clone(title=">> Página Siguiente", url=url))

    return itemlist
Esempio n. 6
0
def get_server_parameters(server):
    """
    Obtiene los datos del servidor
    @param server: Nombre del servidor
    @type server: str

    @return: datos del servidor
    @rtype: dict
    """
    global dict_servers_parameters
    server = server.split('.')[0]
    if not server:
        return {}

    if not server in dict_servers_parameters:
        try:
            #Servers
            if os.path.isfile(
                    os.path.join(config.get_runtime_path(), "servers",
                                 server + ".xml")):
                JSONFile = jsontools.xmlTojson(
                    os.path.join(config.get_runtime_path(), "servers",
                                 server + ".xml"))["server"]
            #Debriders
            elif os.path.isfile(
                    os.path.join(config.get_runtime_path(), "servers",
                                 "debriders", server + ".xml")):
                JSONFile = jsontools.xmlTojson(
                    os.path.join(config.get_runtime_path(), "servers",
                                 "debriders", server + ".xml"))["server"]

            for k in ['premium', 'id']:
                if not JSONFile.has_key(k) or JSONFile[k] == "":
                    JSONFile[k] = []
                elif type(JSONFile[k]) == dict:
                    JSONFile[k] = JSONFile[k]["value"]
                if type(JSONFile[k]) == str:
                    JSONFile[k] = [JSONFile[k]]

            if JSONFile.has_key('find_videos'):
                if type(JSONFile['find_videos']['patterns']) == dict:
                    JSONFile['find_videos']['patterns'] = [
                        JSONFile['find_videos']['patterns']
                    ]

                if not JSONFile['find_videos'].get("ignore_urls", ""):
                    JSONFile['find_videos']["ignore_urls"] = []
                elif type(JSONFile['find_videos']["ignore_urls"] == dict):
                    JSONFile['find_videos']["ignore_urls"] = JSONFile[
                        'find_videos']["ignore_urls"]["value"]
                if type(JSONFile['find_videos']["ignore_urls"]) == str:
                    JSONFile['find_videos']["ignore_urls"] = [
                        JSONFile['find_videos']["ignore_urls"]
                    ]

            if JSONFile.has_key('settings'):
                if type(JSONFile['settings']) == dict:
                    JSONFile['settings'] = [JSONFile['settings']]

                if len(JSONFile['settings']):
                    JSONFile['has_settings'] = True
                else:
                    JSONFile['has_settings'] = False
            else:
                JSONFile['has_settings'] = False

            dict_servers_parameters[server] = JSONFile

        except:
            mensaje = "Error al cargar el servidor: %s\n" % server
            import traceback
            logger.error(mensaje + traceback.format_exc())
            return {}

    return dict_servers_parameters[server]