Пример #1
0
def obtener_cripto(password, plaintext):
    salt = os.urandom(8)

    paddingLength = len(plaintext) % 16
    if paddingLength == 0:
        paddedPlaintext = plaintext
    else:
        dif = 16 - paddingLength
        paddedPlaintext = plaintext + chr(dif) * dif

    kdf = evpKDF(password, salt)
    iv = kdf['iv']

    try:  # Intentar con librería AES del sistema
        from Crypto.Cipher import AES
        cipherSpec = AES.new(kdf['key'], AES.MODE_CBC, iv)
    except:  # Si falla intentar con librería del addon
        import jscrypto
        cipherSpec = jscrypto.new(kdf['key'], jscrypto.MODE_CBC, iv)
    ciphertext = cipherSpec.encrypt(paddedPlaintext)

    return json.dumps(
        {
            'ct': base64.b64encode(ciphertext),
            'iv': iv.encode("hex"),
            's': salt.encode("hex")
        },
        sort_keys=True,
        separators=(',', ':'))
Пример #2
0
def obtener_cripto(password, plaintext):
    import os, base64, json
    SALT_LENGTH = 8
    BLOCK_SIZE = 16
    KEY_SIZE = 32

    salt = os.urandom(SALT_LENGTH)
    iv = os.urandom(BLOCK_SIZE)

    paddingLength = 16 - (len(plaintext) % 16)
    paddedPlaintext = plaintext + chr(paddingLength) * paddingLength

    kdf = evpKDF(password, salt)

    try:  # Intentar con librería AES del sistema
        from Crypto.Cipher import AES
        cipherSpec = AES.new(kdf['key'], AES.MODE_CBC, iv)
    except:  # Si falla intentar con librería del addon
        import jscrypto
        cipherSpec = jscrypto.new(kdf['key'], jscrypto.MODE_CBC, iv)
    ciphertext = cipherSpec.encrypt(paddedPlaintext)

    return json.dumps(
        {
            'ct': base64.b64encode(ciphertext),
            'iv': iv.encode("hex"),
            's': salt.encode("hex")
        },
        sort_keys=True,
        separators=(',', ':'))
Пример #3
0
 def aes_cbc_decrypt(self, data, key):
     try:
         from Crypto.Cipher import AES
         decryptor = AES.new(key, AES.MODE_CBC, '\0' * 16)
         #decryptor = aes.AESModeOfOperationCBC(key, iv='\0' * 16)
     except:
         import jscrypto
         decryptor = jscrypto.new(key, jscrypto.MODE_CBC, '\0' * 16)
     return decryptor.decrypt(data)
Пример #4
0
def decrypt_subs(iv, data, id):
    import jscrypto
    data = base64.b64decode(data.encode('utf-8'))
    iv = base64.b64decode(iv.encode('utf-8'))
    id = int(id)

    def obfuscate_key_aux(count, modulo, start):
        output = list(start)
        for _ in range(count):
            output.append(output[-1] + output[-2])
        # cut off start values
        output = output[2:]
        output = list(map(lambda x: x % modulo + 33, output))
        return output

    def obfuscate_key(key):
        from math import pow, sqrt, floor
        num1 = int(floor(pow(2, 25) * sqrt(6.9)))
        num2 = (num1 ^ key) << 5
        num3 = key ^ num1
        num4 = num3 ^ (num3 >> 3) ^ num2
        prefix = obfuscate_key_aux(20, 97, (1, 2))
        prefix = struct.pack('B' * len(prefix), *prefix)
        shaHash = sha1(prefix + str(num4).encode('ascii')).digest()
        decshaHash = []
        for char in shaHash:
            decshaHash.append(ord(char))
        # Extend 160 Bit hash to 256 Bit
        return decshaHash + [0] * 12

    key = obfuscate_key(id)
    key = struct.pack('B' * len(key), *key)

    decryptor = jscrypto.new(key, 2, iv)
    decrypted_data = decryptor.decrypt(data)
    data = zlib.decompress(decrypted_data)

    import xml.etree.ElementTree as ET
    raiz = ET.fromstring(data)

    ass_sub = convert_to_ass(raiz)
    file_sub = filetools.join(config.get_data_path(), 'crunchyroll_sub.ass')
    filetools.write(file_sub, ass_sub)
    return file_sub
Пример #5
0
def findvideos(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url, canonical=canonical).data
    data2 = data.replace("\n", "")
    idiomas = scrapertools.find_single_match(data, 'Subtitulo:\s*(.*?) \[')
    idiomas = idiomas.replace("Español Latino",
                              "Latino").replace("Español España", "Castellano")
    ty = scrapertools.find_single_match(data,
                                        '720p: <a href=(.*?)1080p: <a href="')
    if ty:
        calidades = ['720p', '1080p']
    else:
        calidades = ['1080p', '720p']
    torrentes = scrapertools.find_multiple_matches(
        data,
        '<a href="((?:https://www.frozen-layer.com/descargas[^"]+|https://nyaa.si/view/[^"]+|https://anidex.info/torrent/[^"]+))"'
    )
    if torrentes:
        for i, enlace in enumerate(torrentes):
            title = "Ver por Torrent %s" % idiomas
            if "720p" in data and "1080p" in data2:
                title = "[%s] %s" % (calidades[i], title)
            if "anidex.info" in enlace:
                enlace = enlace.replace("/torrent/", "/dl/")
                itemlist.append(
                    item.clone(title=title,
                               action="play",
                               url=enlace,
                               server="torrent"))
            elif "nyaa" in enlace:
                data1 = httptools.downloadpage(enlace).data
                enlace = "https://nyaa.si" + scrapertools.find_single_match(
                    data1, 'a href="(/do[^"]+)')
                itemlist.append(
                    item.clone(title=title,
                               action="play",
                               url=enlace,
                               server="torrent"))
                enlace = scrapertools.find_single_match(
                    data1, '<a href="(magnet[^"]+)')
                itemlist.append(
                    item.clone(title=title + "(magnet]",
                               action="play",
                               url=enlace,
                               server="torrent"))
            #itemlist.append(item.clone(title=title, action="play", url=enlace, server="torrent"))
    onefichier = scrapertools.find_multiple_matches(
        data, '<a href="(https://1fichier.com/[^"]+)"')
    if onefichier:
        for i, enlace in enumerate(onefichier):
            title = "Ver por 1fichier   %s" % idiomas
            if "720p" in data and "1080p" in data2:
                try:
                    title = "[%s] %s" % (calidades[i], title)
                except:
                    pass
            itemlist.append(
                item.clone(title=title,
                           action="play",
                           url=enlace,
                           server="onefichier"))
    puyaenc = scrapertools.find_multiple_matches(
        data, '<a href="(%senc/[^"]+)"' % host)
    if puyaenc:
        import base64, os, jscrypto
        action = "play"
        for i, enlace in enumerate(puyaenc):
            data_enc = httptools.downloadpage(enlace).data
            jk, encryp = scrapertools.find_single_match(
                data_enc, " return '(\d+)'.*?crypted\" VALUE=\"(.*?)\"")

            iv = os.urandom(16)
            jk = base64.b16decode(jk)
            encryp = base64.b64decode(encryp)

            crypto = jscrypto.new(jk, jscrypto.MODE_CBC, iv)
            decryp = crypto.decrypt(encryp)
            if PY3:
                decryp = decryp.decode('utf-8',
                                       errors='replace').replace('\x00', '')
            else:
                decryp = decryp.replace('\0', '')
            link = decryp.split('#')
            link = decryp.replace(link[0], "https://mega.nz/")

            title = "Ver por Mega   %s" % idiomas
            if "720p" in data and "1080p" in data2:
                try:
                    title = "[%s] %s" % (calidades[i], title)
                except:
                    pass
            if "/#F!" in link:
                action = "carpeta"
            itemlist.append(
                item.clone(title=title, action=action, url=link,
                           server="mega"))
    safelink = scrapertools.find_multiple_matches(
        data, '<a href="(http(?:s|)://.*?safelinking.net/[^"]+)"')
    domain = ""
    server = ""
    if safelink:
        for i, safe in enumerate(safelink):
            headers = {'Content-Type': 'application/json'}
            hash = safe.rsplit("/", 1)[1]
            post = jsontools.dump({"hash": hash})
            data_sf = httptools.downloadpage(
                "https://safelinking.net/v1/protected",
                post=post,
                headers=headers).json
            try:
                for link in data_sf.get("links"):
                    enlace = link["url"]
                    action = "play"
                    if "tinyurl" in enlace:
                        header = httptools.downloadpage(
                            enlace, follow_redirects=False).headers
                        enlace = header['location']
                    elif "mega." in enlace:
                        server = "mega"
                        domain = "Mega"
                        if "/#F!" in enlace:
                            action = "carpeta"
                    elif "1fichier." in enlace:
                        server = "onefichier"
                        domain = "1fichier"
                        if "/dir/" in enlace:
                            action = "carpeta"
                    elif "google." in enlace:
                        server = "gvideo"
                        domain = "Gdrive"
                        if "/folders/" in enlace:
                            action = "carpeta"
                    title = "Ver por %s" % domain
                    if idiomas:
                        title += " [Subs: %s]" % idiomas
                    if "720p" in data and "1080p" in data2:
                        try:
                            title = "[%s]  %s" % (calidades[i], title)
                        except:
                            pass
                    itemlist.append(
                        item.clone(title=title,
                                   action=action,
                                   url=enlace,
                                   server=server))
            except:
                pass
    return itemlist