예제 #1
0
def set_cookie():
    dict_cookie = {'domain': '.youtube.com',
                    'name': 'c_locale',
                    'value': '0',
                    'expires': 1}
    
    httptools.set_cookies(dict_cookie, clear=True)
예제 #2
0
    def _unshorten_vcrypt(self, uri):
        httptools.set_cookies({
            'domain': 'vcrypt.net',
            'name': 'saveMe',
            'value': '1'
        })
        httptools.set_cookies({
            'domain': 'vcrypt.pw',
            'name': 'saveMe',
            'value': '1'
        })
        try:
            headers = {}
            if 'myfoldersakstream.php' in uri or '/verys/' in uri:
                return uri, 0
            r = None

            if 'shield' in uri.split('/')[-2]:
                uri = decrypt_aes(uri.split('/')[-1], b"naphajU2usWUswec")
            else:
                spl = uri.split('/')
                spl[0] = 'http:'

                if 'sb/' in uri or 'akv/' in uri or 'wss/' in uri:
                    import datetime, hashlib
                    from base64 import b64encode
                    # ip = urlopen('https://api.ipify.org/').read()
                    ip = b'31.220.1.77'
                    day = datetime.date.today().strftime('%Y%m%d')
                    if PY3: day = day.encode()
                    headers = {
                        "Cookie":
                        hashlib.md5(ip + day).hexdigest() + "=1;saveMe=1"
                    }
                    spl[3] += '1'
                    if spl[3] in ['wss1', 'sb1']:
                        spl[4] = b64encode(
                            spl[4].encode('utf-8')).decode('utf-8')

                uri = '/'.join(spl)
                r = httptools.downloadpage(uri,
                                           timeout=self._timeout,
                                           headers=headers,
                                           follow_redirects=False,
                                           verify=False)
                if 'Wait 1 hour' in r.data:
                    uri = ''
                    logger.error('IP bannato da vcrypt, aspetta un ora')
                else:
                    uri = r.headers['location']
            return uri, r.code if r else 200
        except Exception as e:
            logger.error(e)
            return uri, 0
예제 #3
0
def set_weblang():

    #para el idoma usaremos la cookie c_locale y set_cookie
    langs = ['deDE', 'ptPT', 'frFR', 'itIT', 'enUS', 'esLA', 'esES']
    lang = langs[config.get_setting("crunchyrollidioma", canonical['channel'])]

    #creamos la cookie c_locale y le asignamos 7 dias de vida
    dict_cookie = {
        'domain': '.%s' % domain,
        'name': 'c_locale',
        'value': lang,
        'expires': 604800
    }

    httptools.set_cookies(dict_cookie, clear=False)
예제 #4
0
파일: hdmario.py 프로젝트: Muzic98/addon
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    global page, data
    page_url = page_url.replace('?', '')
    logger.debug("url=" + page_url)

    if 'unconfirmed' in page.url:
        id = page_url.split('/')[-1]
        mailbox = Gmailnator()
        postData = {'email': mailbox.address, 'hls_video_id': id}
        httptools.downloadpage(page.url, post=postData)
        mail = mailbox.waitForMail()
        logger.debug(mail)
        if mail:
            code = mail.subject.split(' - ')[0]
            page = httptools.downloadpage(page_url + '?code=' + code)
            data = page.data

    if '/unauthorized' in page.url or '/not-active' in page.url:
        httptools.set_cookies({'domain': 'hdmario.live'},
                              True)  # clear cookies
        if not registerOrLogin(page_url):
            return []
        page = httptools.downloadpage(page_url)
        data = page.data

    logger.debug(data)
    from lib import jsunpack_js2py
    unpacked = jsunpack_js2py.unpack(
        scrapertools.find_single_match(
            data, '<script type="text/javascript">\n*\s*\n*(eval.*)'))
    # p,a,c,k,e,d data -> xhr.setRequestHeader
    secureProof = scrapertools.find_single_match(
        unpacked, """X-Secure-Proof['"]\s*,\s*['"]([^"']+)""")
    logger.debug('X-Secure-Proof=' + secureProof)

    data = httptools.downloadpage(
        baseUrl + '/pl/' + page_url.split('/')[-1].replace('?', '') + '.m3u8',
        headers=[['X-Secure-Proof', secureProof]]).data
    filetools.write(xbmc.translatePath('special://temp/hdmario.m3u8'), data,
                    'w')

    video_urls = [['.m3u8 [HDmario]', 'special://temp/hdmario.m3u8']]

    return video_urls
예제 #5
0
def logout(item):
    logger.info()
    dict_cookie = {"domain": "hdfull.me", 'expires': 0}
    #borramos cookies de hdfullme
    httptools.set_cookies(dict_cookie)

    #borramos el login
    config.set_setting("hdfulluser", "", channel="hdfull")
    config.set_setting("hdfullpassword", "", channel="hdfull")
    config.set_setting("logged", False, channel="hdfull")
    
    #avisamos, si nos dejan
    if not _silence:
        platformtools.dialog_notification("Deslogueo completo", 
                                          "Reconfigure su cuenta",
                                          sound=False,)
    #y mandamos a configuracion del canal
    return settingCanal(item)
예제 #6
0
파일: gamovideo.py 프로젝트: zapan/addon
def test_video_exists(page_url):
    logger.info("(page_url='%s')" % page_url)
    

    data = alfaresolver.get_data(page_url, False)
    if not "|mp4|" in data:
        dict_cookie = {'domain': '.gamovideo.com', 'expires': 0}
        httptools.set_cookies(dict_cookie)
        data = alfaresolver.get_data(page_url, False)
    
    global DATA
    DATA = data
    if "images/proced.png" in data:
        return False, "[Gamovideo] El archivo no existe o ha sido borrado"
    if "File was deleted" in data or ("Not Found"  in data and not "|mp4|" in data) or "File was locked by administrator" in data:
        return False, "[Gamovideo] El archivo no existe o ha sido borrado"
    if "Video is processing now" in data:
        return False, "[Gamovideo] El video está procesándose en estos momentos. Inténtelo mas tarde."
    if "File is awaiting for moderation" in data:
        return False, "[Gamovideo] El video está esperando por moderación."

    return True, ""
예제 #7
0
def logout(item):
    global account
    logger.info()
    
    domain = urlparse.urlparse(host).netloc
    dict_cookie = {"domain": domain, 'expires': 0}
    #borramos cookies de hdfull
    httptools.set_cookies(dict_cookie)

    #borramos el login
    config.set_setting("hdfulluser", "", channel=canonical['channel'])
    config.set_setting("hdfullpassword", "", channel=canonical['channel'])
    config.set_setting("logged", False, channel=canonical['channel'])
    account = False
    
    #avisamos, si nos dejan
    if not _silence:
        platformtools.dialog_notification("Deslogueo completo", 
                                          "Reconfigure su cuenta",
                                          sound=False,)
    platformtools.itemlist_refresh()
    return item
예제 #8
0
파일: cliver.py 프로젝트: roliverosc/addon
def js2py_conversion(data, domain=".cliver.to"):
    logger.info()
    from lib import js2py
    import base64

    patron = ",\s*S='([^']+)'"
    data_new = scrapertools.find_single_match(data, patron)

    if not data_new:
        logger.error('js2py_conversion: NO data_new')

    try:
        for x in range(10):  # Da hasta 10 pasadas o hasta que de error
            data_end = base64.b64decode(data_new).decode('utf-8')
            data_new = data_end
    except:
        js2py_code = data_new
    else:
        logger.error('js2py_conversion: base64 data_new NO Funciona: ' +
                     str(data_new))

    if not js2py_code:
        logger.error('js2py_conversion: NO js2py_code BASE64')

    js2py_code = js2py_code.replace('document',
                                    'window').replace(" location.reload();",
                                                      "")
    js2py.disable_pyimport()
    context = js2py.EvalJs({'atob': atob})
    new_cookie = context.eval(js2py_code)

    logger.info('new_cookie: ' + new_cookie)

    dict_cookie = {
        'domain': domain,
    }

    if ';' in new_cookie:
        new_cookie = new_cookie.split(';')[0].strip()
        namec, valuec = new_cookie.split('=')
        dict_cookie['name'] = namec.strip()
        dict_cookie['value'] = valuec.strip()
    zanga = httptools.set_cookies(dict_cookie)
예제 #9
0
def get_cl(resp, timeout=20, debug=False, extraPostDelay=15, retry=False, blacklist=True, retryIfTimeout=True, **kwargs):
    blacklist_clear = True
    if 'hideproxy' in resp.url or 'webproxy' in resp.url or kwargs.get('proxies'):
        blacklist_clear = False
        blacklist = False
    
    if timeout < 15: timeout = 20
    if timeout + extraPostDelay > 35: timeout = 20

    domain_full = urlparse.urlparse(resp.url).netloc
    domain = domain_full
    if blacklist and not retry: 
        blacklist_clear = check_blacklist(domain_full)
    
    if blacklist_clear:
        host = config.get_system_platform()[:1]
        
        freequent_data = [domain, 'CF2,0.0.0,0,%s0,NoApp' % host]
        
        check_assistant = alfa_assistant.open_alfa_assistant(getWebViewInfo=True, retry=retry)
        if not isinstance(check_assistant, dict) and retry:
            alfa_assistant.close_alfa_assistant()
            time.sleep(2)
            check_assistant = alfa_assistant.open_alfa_assistant(getWebViewInfo=True, retry=True)
            if not check_assistant:
                time.sleep(10)
                check_assistant = alfa_assistant.get_generic_call('getWebViewInfo', timeout=2, alfa_s=True)
            
        if check_assistant and isinstance(check_assistant, dict):

            if check_assistant.get('assistantLatestVersion') and check_assistant.get('assistantVersion'):
                installed_version = check_assistant['assistantVersion'].split('.')
                available_version = check_assistant['assistantLatestVersion'].split('.')
                newer = False
                for i, ver in enumerate(available_version):
                    if int(ver) > int(installed_version[i]):
                        newer = True
                        break
                    if int(ver) < int(installed_version[i]):
                        break
                if newer:
                    help_window.show_info('cf_2_02', wait=False)

            ua = get_ua(check_assistant)
            
            try:
                vers = int(scrapertools.find_single_match(ua, r"Android\s*(\d+)"))
            except:
                vers = 0

            wvbVersion = check_assistant.get('wvbVersion', '0.0.0').split('.')[0]
            if len(wvbVersion) > 3: wvbVersion = wvbVersion[:2]
            freequent_data[1] = 'CF2,%s,%s,%s%s,' % (check_assistant.get('assistantVersion', '0.0.0'), wvbVersion, host, vers)

            if vers:
                dan = {'User-Agent': ua}
                resp.headers.update(dict(dan))
                ua = None
            else:
                ua = httptools.get_user_agent()

            logger.debug("UserAgent: %s || Android Vrs: %s" % (ua, vers))

            jscode = get_jscode(1, 'KEYCODE_ENTER', 1)

            url_cf = scrapertools.find_single_match(resp.url, '(http.*\:\/\/(?:www\S*.)?\w+\.\w+(?:\.\w+)?)(?:\/)?') + '|cf_clearance'

            data_assistant = alfa_assistant.get_urls_by_page_finished(resp.url, timeout=timeout, getCookies=True, userAgent=ua,
                                                                        disableCache=True, debug=debug, jsCode=jscode,
                                                                        extraPostDelay=extraPostDelay, clearWebCache=True, 
                                                                        removeAllCookies=True, returnWhenCookieNameFound=url_cf,
                                                                        retryIfTimeout=retryIfTimeout
                                                                        )
            logger.debug("data assistant: %s" % data_assistant)

            domain_ = domain
            split_lst = domain.split(".")

            if len(split_lst) > 2:
                domain = domain.replace(split_lst[0], "")
            
            if not domain.startswith('.'):
                domain = "."+domain
            
            get_ua(data_assistant)

            if isinstance(data_assistant, dict) and data_assistant.get("cookies", None):

                logger.debug("Lista cookies: %s" % data_assistant.get("cookies", []))
                for cookie in data_assistant["cookies"]:
                    cookieslist = cookie.get("cookiesList", None)
                    val = scrapertools.find_single_match(cookieslist, 'cf_clearance=([A-z0-9_-]+)')
                    dom = cookie.get("urls", None)
                    logger.debug("dominios: %s" % dom[0])

                    if 'cf_clearance' in cookieslist and val:
                        
                        dict_cookie = {'domain': domain,
                                       'name': 'cf_clearance',
                                       'value': val}
                        if domain_ in dom[0]:
                            httptools.set_cookies(dict_cookie)
                            rin = {'Server': 'Alfa'}

                            resp.headers.update(dict(rin))
                            logger.debug("cf_clearence=%s" % val)
                            
                            if not retry:
                                freequent_data[1] += 'OK'
                            else:
                                freequent_data[1] += 'OK_R'
                            freequency(freequent_data)

                            return resp

                    else:
                        logger.error("No cf_clearance")
                else:
                    freequent_data[1] += 'NO-CFC'
            else:
                freequent_data[1] += 'ERR'
                logger.error("No Cookies o Error en conexión con Alfa Assistant")

            if not retry:
                config.set_setting('cf_assistant_ua', '')
                logger.debug("No se obtuvieron resultados, reintentando...")
                return get_cl(resp, timeout=timeout-5, extraPostDelay=extraPostDelay, \
                            retry=True, blacklist=True, retryIfTimeout=False, **kwargs)
        elif host == 'a':
            help_window.show_info('cf_2_01')
        
        freequency(freequent_data)
        
        if filetools.exists(PATH_BL):
            bl_data = jsontools.load(filetools.read(PATH_BL))
        else:
            bl_data = {}
        bl_data[domain_full] = time.time()
        filetools.write(PATH_BL, jsontools.dump(bl_data))

    msg = 'Detected a Cloudflare version 2 Captcha challenge,\
        This feature is not available in the opensource (free) version.'
    resp.status_code = msg
    
    raise CloudflareChallengeError(msg)
예제 #10
0
def get_cl(resp, timeout=20, debug=False, extraPostDelay=15, retry=True):
    check_assistant = alfa_assistant.open_alfa_assistant(getWebViewInfo=True)
    if check_assistant and isinstance(check_assistant, dict):
        
        ua = get_ua(check_assistant)
        
        try:
            vers = int(scrapertools.find_single_match(ua, r"Android\s*(\d+)"))
        except:
            vers = 0

        if vers:
            dan = {'User-Agent': ua}
            resp.headers.update(dict(dan))
            ua = None
        else:
            ua = httptools.get_user_agent()

        logger.debug("UserAgent: %s || Android Vrs: %s" % (ua, vers))
        
        jscode = get_jscode(1, 'KEYCODE_ENTER', 1)

        data_assistant = alfa_assistant.get_source_by_page_finished(resp.url, timeout=timeout, getCookies=True, userAgent=ua,
                                                                    disableCache=True, debug=debug, jsCode=jscode,
                                                                    extraPostDelay=extraPostDelay, clearWebCache=True, 
                                                                    removeAllCookies=True
                                                                    )
        
        logger.debug("data assistant: %s" % data_assistant)
        
        domain = urlparse(resp.url).netloc
        domain_ = domain
        split_lst = domain.split(".")

        if len(split_lst) > 2:
            domain = domain.replace(split_lst[0], "")
        
        if not domain.startswith('.'):
            domain = "."+domain
        
        get_ua(data_assistant)

        if isinstance(data_assistant, dict) and data_assistant.get("cookies", None):
            
            for cookie in data_assistant["cookies"]:
                cookieslist = cookie.get("cookiesList", None)
                val = scrapertools.find_single_match(cookieslist, 'cf_clearance=([A-z0-9_-]+)')
                dom = cookie.get("urls", None)
                #logger.debug("dominios: %s" % dom[0])
                #logger.debug("Lista cookies: %s" % cookieslist)

                if 'cf_clearance' in cookieslist and val:
                    
                    dict_cookie = {'domain': domain,
                                   'name': 'cf_clearance',
                                   'value': val}
                    if domain_ in dom[0]:
                        httptools.set_cookies(dict_cookie)
                        rin = {'Server': 'Alfa'}

                        resp.headers.update(dict(rin))
                        #logger.debug("cf_clearence=%s" %s val)

                        return resp
                    else:
                        logger.error("No cf_clearance for %s" % domain_)

                else: 
                    logger.error("No cf_clearance")
        else:
            logger.error("No Cookies o Error en conexión con Alfa Assistant")

        if retry:
            config.set_setting('cf_assistant_ua', '')
            logger.debug("No se obtuvieron resultados, reintentando...")
            return get_cl(resp, timeout=timeout-5, extraPostDelay=extraPostDelay, retry=False,
                         )



    msg = 'Detected a Cloudflare version 2 Captcha challenge,\
        This feature is not available in the opensource (free) version.'
    
    resp.status_code = msg

    logger.error('Detected a Cloudflare version 2 Hcaptcha challenge')
    
    return False
예제 #11
0
def get_source(url,
               resp,
               timeout=5,
               debug=False,
               extraPostDelay=5,
               retry=False,
               blacklist=True,
               headers=None,
               retryIfTimeout=True,
               cache=False,
               clearWebCache=False,
               mute=True,
               alfa_s=False,
               elapsed=0,
               **kwargs):
    blacklist_clear = True
    data = ''
    source = False
    if not elapsed: elapsed = time.time()
    elapsed_max = 40
    expiration = config.get_setting('cf_assistant_bl_expiration',
                                    default=30) * 60
    expiration_final = 0
    security_error_blackout = (5 * 60) - expiration

    if debug: alfa_s = False

    if not resp:
        resp = {'status_code': 429, 'headers': {}}
        resp = type('HTTPResponse', (), resp)

    if not alfa_s: logger.debug("ERROR de descarga: %s" % resp.status_code)

    opt = kwargs.get('opt', {})

    domain_full = urlparse.urlparse(url).netloc
    domain = domain_full
    pcb = base64.b64decode(
        config.get_setting('proxy_channel_bloqued')).decode('utf-8')
    if 'hideproxy' in url or 'webproxy' in url or 'hidester' in url or '__cpo=' in url  \
                          or httptools.TEST_ON_AIR or domain in pcb:
        blacklist_clear = False
        blacklist = False

    if timeout + extraPostDelay > 35: timeout = 20

    if blacklist and not retry:
        blacklist_clear = check_blacklist(domain_full)

    host = config.get_system_platform()[:1]
    freequent_data = [domain, 'Cha,0.0.0,0,%s0,BlakL' % host]
    if blacklist_clear:
        freequent_data = [domain, 'Cha,0.0.0,0,%s0,App' % host]
        if not retry:
            freequent_data[1] += 'KO'
        else:
            freequent_data[1] += 'KO_R'

        check_assistant = alfa_assistant.open_alfa_assistant(
            getWebViewInfo=True, retry=True, assistantLatestVersion=False)
        if not isinstance(check_assistant, dict) and not retry:
            alfa_assistant.close_alfa_assistant()
            time.sleep(2)
            check_assistant = alfa_assistant.open_alfa_assistant(
                getWebViewInfo=True, retry=True, assistantLatestVersion=False)
            logger.debug("Reintento en acceder al Assistant: %s - %s" \
                         % ('OK' if isinstance(check_assistant, dict) else 'ERROR', time.time() - elapsed))

        if check_assistant and isinstance(check_assistant, dict):

            if check_assistant.get(
                    'assistantLatestVersion') and check_assistant.get(
                        'assistantVersion'):
                installed_version = check_assistant['assistantVersion'].split(
                    '.')
                available_version = check_assistant[
                    'assistantLatestVersion'].split('.')
                newer = False
                for i, ver in enumerate(available_version):
                    if int(ver) > int(installed_version[i]):
                        newer = True
                        break
                    if int(ver) < int(installed_version[i]):
                        break
                if newer:
                    help_window.show_info('cf_2_02', wait=False)

            ua = get_ua(check_assistant)

            try:
                vers = int(
                    scrapertools.find_single_match(ua, r"Android\s*(\d+)"))
            except:
                vers = 0

            wvbVersion = check_assistant.get('wvbVersion',
                                             '0.0.0').split('.')[0]
            if len(wvbVersion) > 3: wvbVersion = wvbVersion[:2]
            freequent_data[1] = 'Cha,%s,%s,%s%s,' % (check_assistant.get(
                'assistantVersion', '0.0.0'), wvbVersion, host, vers)
            if not retry:
                freequent_data[1] += 'Src'
            else:
                freequent_data[1] += 'Src_R'

            if vers:
                dan = {'User-Agent': ua}
                resp.headers.update(dict(dan))
                ua = None
            else:
                ua = httptools.get_user_agent()

            if not alfa_s:
                logger.debug("UserAgent: %s || Android Vrs: %s" % (ua, vers))

            jscode = None

            url_cf = scrapertools.find_single_match(
                url, '(http.*\:\/\/(?:www\S*.)?\w+\.\w+(?:\.\w+)?)(?:\/)?'
            ) + '|cf_clearance'

            data_assistant = alfa_assistant.get_source_by_page_finished(
                url,
                timeout=timeout,
                getCookies=True,
                userAgent=ua,
                disableCache=cache,
                debug=debug,
                jsCode=jscode,
                extraPostDelay=extraPostDelay,
                clearWebCache=clearWebCache,
                removeAllCookies=True,
                returnWhenCookieNameFound=url_cf,
                retryIfTimeout=retryIfTimeout,
                useAdvancedWebView=True,
                headers=headers,
                mute=mute,
                alfa_s=alfa_s)
            if not alfa_s: logger.debug("data assistant: %s" % data_assistant)

            if isinstance(data_assistant, dict) and data_assistant.get('htmlSources', []) \
                                                and data_assistant['htmlSources'][0].get('source', ''):
                try:
                    data = base64.b64decode(data_assistant['htmlSources'][0]
                                            ['source']).decode('utf-8')
                    source = True
                except:
                    pass

                if source and 'accessing a cross-origin frame' in data:
                    source = False
                    retry = True
                    expiration_final = security_error_blackout
                    freequent_data[1] = 'Cha,%s,%s,%s%s,' % (
                        check_assistant.get('assistantVersion',
                                            '0.0.0'), wvbVersion, host, vers)
                    freequent_data[1] += 'KO_SecE'
                    logger.error('Error SEGURIDAD: %s - %s' %
                                 (expiration_final, data[:100]))

                if source:
                    freequent_data[1] = 'Cha,%s,%s,%s%s,' % (
                        check_assistant.get('assistantVersion',
                                            '0.0.0'), wvbVersion, host, vers)
                    if not retry:
                        freequent_data[1] += 'OK'
                    else:
                        freequent_data[1] += 'OK_R'

            if not source and not retry:
                config.set_setting('cf_assistant_ua', '')
                logger.debug("No se obtuvieron resultados, reintentando...")
                timeout = -1 if timeout < 0 else timeout * 2
                extraPostDelay = -1 if extraPostDelay < 0 else extraPostDelay * 2
                return get_source(url,
                                  resp,
                                  timeout=timeout,
                                  debug=debug,
                                  extraPostDelay=extraPostDelay,
                                  retry=True,
                                  blacklist=blacklist,
                                  retryIfTimeout=retryIfTimeout,
                                  cache=cache,
                                  clearWebCache=clearWebCache,
                                  alfa_s=False,
                                  headers=headers,
                                  mute=mute,
                                  elapsed=elapsed,
                                  **kwargs)

            domain_ = domain
            split_lst = domain.split(".")

            if len(split_lst) > 2:
                domain = domain.replace(split_lst[0], "")

            if not domain.startswith('.'):
                domain = "." + domain

            get_ua(data_assistant)

            if isinstance(data_assistant, dict) and data_assistant.get(
                    "cookies", None):

                if not alfa_s:
                    logger.debug("Lista cookies: %s" %
                                 data_assistant.get("cookies", []))
                for cookie in data_assistant["cookies"]:
                    cookieslist = cookie.get("cookiesList", None)
                    val = scrapertools.find_single_match(
                        cookieslist, 'cf_clearance=([A-z0-9_\-\.]+)')
                    #val = scrapertools.find_single_match(cookieslist, 'cf_clearance=([^;]+)')
                    dom = cookie.get("urls", None)
                    if not alfa_s: logger.debug("dominios: %s" % dom[0])

                    if 'cf_clearance' in cookieslist and val:

                        dict_cookie = {
                            'domain': domain,
                            'name': 'cf_clearance',
                            'value': val
                        }
                        if domain_ in dom[0]:
                            httptools.set_cookies(dict_cookie)
                            rin = {'Server': 'Alfa'}

                            resp.headers.update(dict(rin))
                            freequent_data[1] += 'C'
                            if not alfa_s:
                                logger.debug("cf_clearence=%s" % val)

        elif host == 'a':
            help_window.show_info('cf_2_01')

    freequency(freequent_data)

    if blacklist_clear and (not source or time.time() - elapsed > elapsed_max):
        if filetools.exists(PATH_BL):
            bl_data = jsontools.load(filetools.read(PATH_BL))
        else:
            bl_data = {}
        if time.time() - elapsed > elapsed_max:
            bl_data[domain_full] = time.time() + elapsed_max * 10 * 60
        else:
            bl_data[domain_full] = time.time() + expiration_final
        if not debug and not httptools.TEST_ON_AIR:
            filetools.write(PATH_BL, jsontools.dump(bl_data))
    if not source:
        resp.status_code = 429
    else:
        resp.status_code = 200

    return data, resp
예제 #12
0
    def _unshorten_vcrypt(self, uri):
        httptools.set_cookies({'domain': 'vcrypt.net', 'name': 'saveMe', 'value': '1'})
        httptools.set_cookies({'domain': 'vcrypt.pw', 'name': 'saveMe', 'value': '1'})
        try:
            headers = {}
            if 'myfoldersakstream.php' in uri or '/verys/' in uri:
                return uri, 0
            r = None

            def decrypt(str):
                try:
                    from Cryptodome.Cipher import AES
                except:
                    from Crypto.Cipher import AES

                str = str.replace("_ppl_", "+").replace("_eqq_", "=").replace("_sll_", "/")
                iv = b"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
                key = b"naphajU2usWUswec"
                decoded = b64decode(str)
                decoded = decoded + b'\0' * (len(decoded) % 16)
                crypt_object = AES.new(key, AES.MODE_CBC, iv)
                decrypted = b''
                for p in range(0, len(decoded), 16):
                    decrypted += crypt_object.decrypt(decoded[p:p + 16]).replace(b'\0', b'')
                return decrypted.decode('ascii')
            if 'shield' in uri.split('/')[-2]:
                uri = decrypt(uri.split('/')[-1])
            else:
                if 'sb/' in uri or 'akv/' in uri or 'wss/' in uri or 'wsd/' in uri:
                    import datetime, hashlib
                    from base64 import b64encode
                    ip = urlopen('https://api.ipify.org/').read()
                    day = datetime.date.today().strftime('%Y%m%d')
                    if PY3: day = day.encode()
                    headers = {
                        "Cookie": hashlib.md5(ip+day).hexdigest() + "=1"
                    }
                    spl = uri.split('/')
                    spl[3] += '1'
                    if spl[3] == 'wss1':
                        spl[4] = b64encode(spl[4].encode('utf-8')).decode('utf-8')
                        uri = '/'.join(spl)
                r = httptools.downloadpage(uri, timeout=self._timeout, headers=headers, follow_redirects=False, verify=False)
                if 'Wait 1 hour' in r.data:
                    uri = ''
                    logger.error('IP bannato da vcrypt, aspetta un ora')
                else:
                    prev_uri = uri
                    uri = r.headers['location']
                    if uri == prev_uri:
                        logger.info('Use Cloudscraper')
                        uri = httptools.downloadpage(uri, timeout=self._timeout, headers=headers, follow_redirects=False, cf=True).headers['location']
            if "snip." in uri:
                if 'out_generator' in uri:
                    uri = re.findall('url=(.*)$', uri)[0]
                elif '/decode/' in uri:
                    scheme, netloc, path, query, fragment = urlsplit(uri)
                    splitted = path.split('/')
                    splitted[1] = 'outlink'
                    new_uri = httptools.downloadpage(uri, follow_redirects=False, post={'url': splitted[2]}).headers['location']
                    if new_uri and new_uri != uri:
                        uri = new_uri
                    else:
                        uri = httptools.downloadpage(scheme + '://' + netloc + "/".join(splitted) + query + fragment, follow_redirects=False, post={'url': splitted[2]}).headers['location']
                    # uri = decrypt(uri.split('/')[-1])

            return uri, r.code if r else 200
        except Exception as e:
            logger.error(e)
            return uri, 0
예제 #13
0
    def _unshorten_vcrypt(self, uri):
        httptools.set_cookies({
            'domain': 'vcrypt.net',
            'name': 'saveMe',
            'value': '1'
        })
        httptools.set_cookies({
            'domain': 'vcrypt.pw',
            'name': 'saveMe',
            'value': '1'
        })
        try:
            headers = {}
            if 'myfoldersakstream.php' in uri or '/verys/' in uri:
                return uri, 0
            r = None

            def decrypt(str):
                try:
                    from Cryptodome.Cipher import AES
                except:
                    from Crypto.Cipher import AES

                str = str.replace("_ppl_",
                                  "+").replace("_eqq_",
                                               "=").replace("_sll_", "/")
                iv = b"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
                key = b"naphajU2usWUswec"
                decoded = b64decode(str)
                decoded = decoded + b'\0' * (len(decoded) % 16)
                crypt_object = AES.new(key, AES.MODE_CBC, iv)
                decrypted = b''
                for p in range(0, len(decoded), 16):
                    decrypted += crypt_object.decrypt(
                        decoded[p:p + 16]).replace(b'\0', b'')
                return decrypted.decode('ascii')

            if 'shield' in uri.split('/')[-2]:
                uri = decrypt(uri.split('/')[-1])
            else:
                if 'sb/' in uri or 'akv/' in uri or 'wss/' in uri or 'wsd/' in uri:
                    import datetime, hashlib
                    from base64 import b64encode
                    ip = urlopen('https://api.ipify.org/').read()
                    day = datetime.date.today().strftime('%Y%m%d')
                    if PY3: day = day.encode()
                    headers = {
                        "Cookie":
                        hashlib.md5(ip + day).hexdigest() + "=1;saveMe=1"
                    }
                    spl = uri.split('/')
                    spl[3] += '1'
                    if spl[3] == 'wss1':
                        spl[4] = b64encode(
                            spl[4].encode('utf-8')).decode('utf-8')
                        uri = '/'.join(spl)
                r = httptools.downloadpage(uri,
                                           timeout=self._timeout,
                                           headers=headers,
                                           follow_redirects=False,
                                           verify=False)
                if 'Wait 1 hour' in r.data:
                    uri = ''
                    logger.error('IP bannato da vcrypt, aspetta un ora')
                else:
                    uri = r.headers['location']
            return uri, r.code if r else 200
        except Exception as e:
            logger.error(e)
            return uri, 0