예제 #1
0
def login():
    logger.info()

    try:
        user = config.get_setting("playmaxuser", "playmax")
        password = config.get_setting("playmaxpassword", "playmax")
        if user == "" and password == "":
            return False, "Para ver los enlaces de este canal es necesario registrarse en playmax.mx"
        elif user == "" or password == "":
            return False, "Usuario o contraseña en blanco. Revisa tus credenciales"

        data = httptools.downloadpage(
            "https://playmax.mx/ucp.php?mode=login").data
        if re.search(r'(?i)class="hb_user_data" title="%s"' % user, data):
            if not config.get_setting("sid_playmax", "playmax"):
                sid_ = scrapertools.find_single_match(data, 'sid=([^"]+)"')
                if not sid_:
                    sid_ = scrapertools.find_single_match(
                        config.get_cookie_data(),
                        'playmax.*?_sid\s*([A-z0-9]+)')
                config.set_setting("sid_playmax", sid_, "playmax")
            return True, ""

        confirm_id = scrapertools.find_single_match(
            data, 'name="confirm_id" value="([^"]+)"')
        sid_log = scrapertools.find_single_match(data,
                                                 'name="sid" value="([^"]+)"')
        post = "username=%s&password=%s&autologin=on&agreed=true&change_lang=0&confirm_id=%s&login=&sid=%s" \
               "&redirect=index.php&login=Entrar" % (user, password, confirm_id, sid_log)
        data = httptools.downloadpage("https://playmax.mx/ucp.php?mode=login",
                                      post=post).data
        if "contraseña incorrecta" in data:
            logger.error("Error en el login")
            return False, "Contraseña errónea. Comprueba tus credenciales"
        elif "nombre de usuario incorrecto" in data:
            logger.error("Error en el login")
            return False, "Nombre de usuario no válido. Comprueba tus credenciales"
        else:
            logger.info("Login correcto")
            sid_ = scrapertools.find_single_match(data, 'sid=([^"]+)"')
            if not sid_:
                sid_ = scrapertools.find_single_match(
                    config.get_cookie_data(), 'playmax.*?_sid\s*([A-z0-9]+)')
            config.set_setting("sid_playmax", sid_, "playmax")
            # En el primer logueo se activa la busqueda global y la seccion novedades
            if not config.get_setting("primer_log", "playmax"):
                config.set_setting("include_in_global_search", True, "playmax")
                config.set_setting("include_in_newest_peliculas", True,
                                   "playmax")
                config.set_setting("include_in_newest_series", True, "playmax")
                config.set_setting("include_in_newest_infantiles", True,
                                   "playmax")
                config.set_setting("primer_log", False, "playmax")
            return True, ""
    except:
        import traceback
        logger.error(traceback.format_exc())
        return False, "Error en el login. Comprueba tus credenciales o si la web está operativa"
예제 #2
0
파일: vimpleru.py 프로젝트: Reunion90/raiz
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("(page_url=%s)" % page_url)

    data = httptools.downloadpage(page_url).data

    media_url = scrapertools.find_single_match(
        data, '"video"[^,]+,"url":"([^"]+)"').replace('\\', '')
    data_cookie = config.get_cookie_data()
    cfduid = scrapertools.find_single_match(data_cookie, '.vimple.ru.*?(__cfduid\t[a-f0-9]+)') \
        .replace('\t', '=')
    univid = scrapertools.find_single_match(data_cookie, '.vimple.ru.*?(UniversalUserID\t[a-f0-9]+)') \
        .replace('\t', '=')

    media_url += "|User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0" \
                 "&Cookie=%s; %s" % (cfduid, univid)

    video_urls = []
    video_urls.append([
        scrapertools.get_filename_from_url(media_url)[-4:] + " [vimple.ru]",
        media_url
    ])

    for video_url in video_urls:
        logger.info("%s - %s" % (video_url[0], video_url[1]))

    return video_urls
예제 #3
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("(page_url='%s')" % page_url)

    if config.get_setting("premium", server="onefichier"):
        user = config.get_setting("user", server="onefichier")
        password = config.get_setting("password", server="onefichier")

        url = "https://1fichier.com/login.pl"
        logger.info("url=" + url)
        post_parameters = {
            "mail": user,
            "pass": password,
            "lt": "on",
            "purge": "on",
            "valider": "Send"
        }
        post = urllib.urlencode(post_parameters)
        logger.info("post=" + post)

        data = scrapertools.cache_page(url, post=post)
        # logger.info("data="+data)

        cookies = config.get_cookie_data()
        logger.info("cookies=" + cookies)

        # 1fichier.com   TRUE    /   FALSE   1443553315  SID imC3q8MQ7cARw5tkXeWvKyrH493rR=1yvrjhxDAA0T0iEmqRfNF9GXwjrwPHssAQ
        sid_cookie_value = scrapertools.find_single_match(
            cookies, "1fichier.com.*?SID\s+([A-Za-z0-9\+\=]+)")
        logger.info("sid_cookie_value=" + sid_cookie_value)

        # .1fichier.com  TRUE    /   FALSE   1443553315  SID imC3q8MQ7cARw5tkXeWvKyrH493rR=1yvrjhxDAA0T0iEmqRfNF9GXwjrwPHssAQ
        cookie = urllib.urlencode({"SID": sid_cookie_value})

        # Averigua el nombre del fichero real
        headers = []
        headers.append([
            'User-Agent',
            'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12'
        ])
        headers.append(['Cookie', cookie])
        filename = scrapertools.get_header_from_response(
            page_url, header_to_get="Content-Disposition")
        logger.info("filename=" + filename)

        # Construye la URL final para Kodi
        location = page_url + "|Cookie=" + cookie
        logger.info("location=" + location)

        video_urls = []
        video_urls.append([filename[-4:] + " (Premium) [1fichier]", location])

    for video_url in video_urls:
        logger.info("%s - %s" % (video_url[0], video_url[1]))

    return video_urls
예제 #4
0
def check_cookie(domain, cname):
    from platformcode import config
    #cargamos las cookies
    cookies = config.get_cookie_data()
    #buscamos el valor de la cookie "cname" del dominio "domain"
    cookie_value = scrapertools.find_single_match(cookies, domain + ".*?" + cname + "\s+([A-Za-z0-9\+\=\%\_]+)")
    if cookie_value:
        if len(cookie_value) > 6:
            return True
        else:
            return False
    else:
        return False
예제 #5
0
def findvideos(item):
    logger.info()
    itemlist = []

    headers = {
        'Upgrade-Insecure-Requests':
        '1',
        'User-Agent':
        'Mozilla/5.0 (Windows NT 6.1; rv:52.0) Gecko/20100101 Firefox/52.0'
    }

    if item.extra:
        data = httptools.downloadpage(item.url, headers=headers).data
        blocco = scrapertools.find_single_match(data,
                                                r'%s(.*?)</tr>' % item.extra)
        item.url = scrapertools.find_single_match(blocco,
                                                  r'<a href="([^"]+)"[^>]+>')

    patron = r'http:\/\/link[^a]+animesubita[^o]+org\/[^\/]+\/.*?(episodio\d*)[^p]+php(\?.*)'
    for phpfile, scrapedurl in re.findall(patron, item.url, re.DOTALL):
        url = "%s/%s.php%s" % (host, phpfile, scrapedurl)
        headers['Referer'] = url
        data = httptools.downloadpage(url, headers=headers).data
        # ------------------------------------------------
        cookies = ""
        matches = re.compile(
            '(.%s.*?)\n' % host.replace("http://", "").replace("www.", ""),
            re.DOTALL).findall(config.get_cookie_data())
        for cookie in matches:
            name = cookie.split('\t')[5]
            value = cookie.split('\t')[6]
            cookies += name + "=" + value + ";"
        headers['Cookie'] = cookies[:-1]
        # ------------------------------------------------
        scrapedurl = scrapertools.find_single_match(
            data, r'<source src="([^"]+)"[^>]+>')
        url = scrapedurl + '|' + urllib.urlencode(headers)
        itemlist.append(
            Item(channel=item.channel,
                 action="play",
                 text_color="azure",
                 title="[%s] %s" %
                 (support.color("Diretto", "orange"), item.title),
                 fulltitle=item.fulltitle,
                 url=url,
                 thumbnail=item.thumbnail,
                 fanart=item.thumbnail,
                 plot=item.plot))

    return itemlist
예제 #6
0
def dettaglio(item):
    log("animetubeita", "dettaglio", item.channel)

    itemlist = []
    headers = {
        'Upgrade-Insecure-Requests':
        '1',
        'User-Agent':
        'Mozilla/5.0 (Windows NT 6.1; rv:52.0) Gecko/20100101 Firefox/52.0'
    }

    episodio = 1
    patron = r'<a href="http:\/\/link[^a]+animetubeita[^c]+com\/[^\/]+\/[^s]+((?:stream|strm))[^p]+php(\?.*?)"'
    for phpfile, scrapedurl in scrapedAll(item.url, patron):
        title = "Episodio " + str(episodio)
        episodio += 1
        url = "%s/%s.php%s" % (host, phpfile, scrapedurl)
        headers['Referer'] = url
        data = httptools.downloadpage(url, headers=headers).data
        # ------------------------------------------------
        cookies = ""
        matches = re.compile('(.animetubeita.com.*?)\n',
                             re.DOTALL).findall(config.get_cookie_data())
        for cookie in matches:
            name = cookie.split('\t')[5]
            value = cookie.split('\t')[6]
            cookies += name + "=" + value + ";"
        headers['Cookie'] = cookies[:-1]
        # ------------------------------------------------
        url = scrapertools.find_single_match(
            data, """<source src="([^"]+)" type='video/mp4'>""")
        url += '|' + urllib.urlencode(headers)
        itemlist.append(
            Item(channel=item.channel,
                 action="play",
                 title="[COLOR azure]" + title + "[/COLOR]",
                 url=url,
                 thumbnail=item.thumbnail,
                 fanart=item.thumbnail,
                 plot=item.plot))

    return itemlist