def __new__(cls, x): if isinstance(x, Integral): x = int2bytes(x, length=32, endian='little') if not isinstance(x, bytes): raise TypeError( "Can only instantiate Point instances from integers or bytes") return super(Point, cls).__new__(cls, x)
def decode_unordered(w, compact=False, checksum=True, permissive=False): """From an unordered set of words, or a whitespace-separated string of words, produce the original byte string that was encoded. w: the list of words, or whitespace delimited words to be decoded compact: compact encoding was used instead of length encoding checksum: encoded string had a checksum appended before encoding permissive: if there are spelling errors, correct them instead of throwing an error (will still throw ValueError if spelling can't be corrected) Raises ValueError if the encoding is invalid. """ if isinstance(w, bytes): w = w.split() digits = [None]*len(w) for i,word in enumerate(w): if word in rwords: digits[i] = rwords[word] elif permissive: for nearby in dldist(word, 1): if nearby in rwords: digits[i] = rwords[nearby] break if digits[i] is None: raise ValueError('Unrecognized word %s' % repr(word)) digits.sort() n = get_rank_length_offset(len(digits)) for i, d in enumerate(digits): n += nCk(d, i+1) s = int2bytes(n, endian='little') return unpad_and_checksum(s, compact, checksum)
def decode_unordered(w, compact=False, checksum=True, permissive=False): """From an unordered set of words, or a whitespace-separated string of words, produce the original byte string that was encoded. w: the list of words, or whitespace delimited words to be decoded compact: compact encoding was used instead of length encoding checksum: encoded string had a checksum appended before encoding permissive: if there are spelling errors, correct them instead of throwing an error (will still throw ValueError if spelling can't be corrected) Raises ValueError if the encoding is invalid. """ if isinstance(w, bytes): w = w.split() digits = [None] * len(w) for i, word in enumerate(w): if word in rwords: digits[i] = rwords[word] elif permissive: for nearby in dldist(word, 1): if nearby in rwords: digits[i] = rwords[nearby] break if digits[i] is None: raise ValueError('Unrecognized word %s' % repr(word)) digits.sort() n = get_rank_length_offset(len(digits)) for i, d in enumerate(digits): n += nCk(d, i + 1) s = int2bytes(n, endian='little') return unpad_and_checksum(s, compact, checksum)
def oaep_keccak(m, label='', out_len=None, hash_len=32, random=random, keccak_args=dict()): """Perform OAEP (as specified by PKCS#1v2.1) with Keccak as the one-way function and mask-generating function All lengths specified in *bytes* m: message to be padded label: (optional) to be associated with the message, the default is the empty string out_len: (optional) the length of the message after padding, the default is len(m) + 2*hash_len + 2 hash_len: (optional) the length of the output of the hash algorithm, the default is 32 random: (optional) source of entropy for the random seed generation, the default is python's random module keccak_args: (optional) parameters for the Keccak sponge function, the defaults are the Keccak defaults """ if out_len is not None and len(m) > out_len - 2 * hash_len - 2: raise ValueError( "Message too long to specified output and hash lengths") # hash the label k = Keccak(**keccak_args) k.absorb(label) lhash = k.squeeze(hash_len) if out_len is not None: pad_string = '\x00' * (out_len - len(m) - 2 * hash_len - 2) else: pad_string = '' # pad m padded = lhash + pad_string + '\x01' + m # generate rand_seed, a hash_len-byte random string rand_seed = random.getrandbits(hash_len * 8) rand_seed = int2bytes(rand_seed) # expand rand_seed to the length of padded k = Keccak(**keccak_args) k.absorb(rand_seed) mask = k.squeeze(len(padded)) # XOR the message with the expanded r masked = ''.join(imap(chr, imap(xor, imap(ord, padded), imap(ord, mask)))) # hash masked to generate the seed mask k = Keccak(**keccak_args) k.absorb(masked) seed_mask = k.squeeze(len(rand_seed)) # mask the seed masked_seed = ''.join( imap(chr, imap(xor, imap(ord, rand_seed), imap(ord, seed_mask)))) # concatenate the two together return '\x00' + masked_seed + masked
def __new__(cls, x): if isinstance(x, Integral): x = int2bytes(x % bytes2int(p, endian='little'), length=32, endian='little') elif isinstance(x, bytes): if len(x) != 32: raise ValueError( "When instantiating Element from bytes, argument must be of length 32" ) x = int2bytes(bytes2int(x, endian='little') % bytes2int(p, endian='little'), length=32, endian='little') else: raise TypeError( "Can only instantiate Element instances from integers or bytes" ) return super(Element, cls).__new__(cls, x)
def decode(w, compact=False, permissive=False): """From a list of words, or a whitespace-separated string of words, produce the original string that was encoded. w: the list of words, or whitespace delimited words to be decoded compact: compact encoding was used instead of length encoding permissive: if there are spelling errors, correct them instead of throwing an error (will still throw ValueError if spelling can't be corrected) Raises ValueError if the encoding is invalid. """ if isinstance(w, bytes): w = w.split() indexes = [None] * len(w) for i, word in enumerate(w): if word in rwords: indexes[i] = rwords[word] elif permissive: for nearby in dldist(word, 1): if nearby in rwords: indexes[i] = rwords[nearby] break if indexes[i] is None: raise ValueError('Unrecognized word %s' % repr(word)) # because we don't directly encode the mantissas, we have to extract them values = reduce( lambda (last_index, accum), index: (index, accum + [(index - last_index) % len(words)]), indexes, (0, []))[1] i = sum(mantissa * len(words)**radix for radix, mantissa in enumerate(values)) # we don't need to worry about truncating null bytes because of the encoded length on the end s = int2bytes(i) if compact: checksum_length = ord(s[-1]) consumed = 1 length = len(s) - checksum_length - consumed else: (length, consumed) = decode_varint(s, endian='little') checksum_length = max(1, (length - 1).bit_length()) s = s[:-consumed] s, checksum = s[:-checksum_length], s[-checksum_length:] if len(s) != length: raise ValueError("Invalid length") k = Keccak() k.absorb(s) if k.squeeze(checksum_length) != checksum: raise ValueError("Invalid checksum") return s
def decode(w, compact=False, permissive=False): """From a list of words, or a whitespace-separated string of words, produce the original string that was encoded. w: the list of words, or whitespace delimited words to be decoded compact: compact encoding was used instead of length encoding permissive: if there are spelling errors, correct them instead of throwing an error (will still throw ValueError if spelling can't be corrected) Raises ValueError if the encoding is invalid. """ if isinstance(w, bytes): w = w.split() indexes = [None]*len(w) for i,word in enumerate(w): if word in rwords: indexes[i] = rwords[word] elif permissive: for nearby in dldist(word, 1): if nearby in rwords: indexes[i] = rwords[nearby] break if indexes[i] is None: raise ValueError('Unrecognized word %s' % repr(word)) # because we don't directly encode the mantissas, we have to extract them values = reduce(lambda (last_index, accum), index: (index, accum + [(index - last_index) % len(words)]), indexes, (0, []))[1] i = sum(mantissa * len(words)**radix for radix, mantissa in enumerate(values)) # we don't need to worry about truncating null bytes because of the encoded length on the end s = int2bytes(i) if compact: checksum_length = ord(s[-1]) consumed = 1 length = len(s) - checksum_length - consumed else: (length, consumed) = decode_varint(s, endian='little') checksum_length = max(1, (length-1).bit_length()) s = s[:-consumed] s, checksum = s[:-checksum_length], s[-checksum_length:] if len(s) != length: raise ValueError("Invalid length") k = Keccak() k.absorb(s) if k.squeeze(checksum_length) != checksum: raise ValueError("Invalid checksum") return s
def oaep_keccak(m, label='', out_len=None, hash_len=32, random=random, keccak_args=dict()): """Perform OAEP (as specified by PKCS#1v2.1) with Keccak as the one-way function and mask-generating function All lengths specified in *bytes* m: message to be padded label: (optional) to be associated with the message, the default is the empty string out_len: (optional) the length of the message after padding, the default is len(m) + 2*hash_len + 2 hash_len: (optional) the length of the output of the hash algorithm, the default is 32 random: (optional) source of entropy for the random seed generation, the default is python's random module keccak_args: (optional) parameters for the Keccak sponge function, the defaults are the Keccak defaults """ if out_len is not None and len(m) > out_len - 2*hash_len - 2: raise ValueError("Message too long to specified output and hash lengths") # hash the label k = Keccak(**keccak_args) k.absorb(label) lhash = k.squeeze(hash_len) if out_len is not None: pad_string = '\x00' * (out_len - len(m) - 2*hash_len - 2) else: pad_string = '' # pad m padded = lhash + pad_string + '\x01' + m # generate rand_seed, a hash_len-byte random string rand_seed = random.getrandbits(hash_len*8) rand_seed = int2bytes(rand_seed) # expand rand_seed to the length of padded k = Keccak(**keccak_args) k.absorb(rand_seed) mask = k.squeeze(len(padded)) # XOR the message with the expanded r masked = ''.join(imap(chr, imap(xor, imap(ord, padded), imap(ord, mask)))) # hash masked to generate the seed mask k = Keccak(**keccak_args) k.absorb(masked) seed_mask = k.squeeze(len(rand_seed)) # mask the seed masked_seed = ''.join(imap(chr, imap(xor, imap(ord, rand_seed), imap(ord, seed_mask)))) # concatenate the two together return '\x00' + masked_seed + masked
def __new__(cls, x): if isinstance(x, Integral): x = _curve25519.make_seckey( int2bytes(x, length=32, endian='little')) elif isinstance(x, bytes): if len(x) != 32: raise ValueError( "When instantiating SubElement from bytes, argument must be of length 32" ) x = _curve25519.make_seckey(x) else: raise TypeError( "Can only instantiate SubElement instances from integers or bytes" ) return super(SubElement, cls).__new__(cls, x)
def string_to_element(s): if not isinstance(s, bytes): s = int2bytes(s, endian='little') if len(s) > 30: raise ValueError("Argument too large to fit into an element") encoded_length = encode_varint(len(s), endian='little') null_padding = '\x00' * (31 - len(encoded_length) - len(s)) s += null_padding s += encoded_length s = bytes2int(s, endian='little') if s >= p: raise ValueError("Argument too large to fit into an element") s = Element(s) return s
def runTest(self): for i in xrange(1000): ptext_start = self.random.randint(0, len(lorem)) ptext_end = self.random.randint(ptext_start, len(lorem)) ptext = lorem[ptext_start:ptext_end] nonce = int2bytes(self.random.getrandbits(128), length=128 / 8) ctext = '' c = self.get_encryption_cipher(nonce) chunk_start = 0 chunk_end = 0 while chunk_start < len(ptext): chunk_start = chunk_end chunk_end = self.random.randint(chunk_start, len(ptext)) ctext += c.encrypt(ptext[chunk_start:chunk_end]) ctext += c.emit_mac() ptext_ = '' d = self.get_decryption_cipher(nonce) chunk_start = 0 chunk_end = 0 while chunk_start < len(ctext): chunk_start = chunk_end chunk_end = self.random.randint(chunk_start, len(ctext)) ptext_ += d.decrypt(ctext[chunk_start:chunk_end]) ptext_ += d.verify_mac() self.assertEqual(ptext, ptext_, 'Message was not identical after an encryption/decryption round. key: %s, nonce: %s, round %d' \ % (repr(self.key), repr(nonce), i) ) ptext_ = '' d = self.get_decryption_cipher(nonce) changed_byte = self.random.randint(0, len(ctext) - 1) ctext = ctext[:changed_byte] + chr( self.random.randint(1, 255) ^ ord(ctext[changed_byte])) + ctext[changed_byte + 1:] chunk_start = 0 chunk_end = 0 while chunk_start < len(ctext): chunk_start = chunk_end chunk_end = self.random.randint(chunk_start, len(ctext)) ptext_ += d.decrypt(ctext[chunk_start:chunk_end]) with self.assertRaises(ValueError): ptext_ += d.verify_mac()
def runTest(self): for i in xrange(1000): ptext_start = self.random.randint(0, len(lorem)) ptext_end = self.random.randint(ptext_start, len(lorem)) ptext = lorem[ptext_start:ptext_end] nonce = int2bytes(self.random.getrandbits(128), length=128/8) ctext = '' c = self.get_encryption_cipher(nonce) chunk_start = 0 chunk_end = 0 while chunk_start < len(ptext): chunk_start = chunk_end chunk_end = self.random.randint(chunk_start, len(ptext)) ctext += c.encrypt(ptext[chunk_start:chunk_end]) ctext += c.emit_mac() ptext_ = '' d = self.get_decryption_cipher(nonce) chunk_start = 0 chunk_end = 0 while chunk_start < len(ctext): chunk_start = chunk_end chunk_end = self.random.randint(chunk_start, len(ctext)) ptext_ += d.decrypt(ctext[chunk_start:chunk_end]) ptext_ += d.verify_mac() self.assertEqual(ptext, ptext_, 'Message was not identical after an encryption/decryption round. key: %s, nonce: %s, round %d' \ % (repr(self.key), repr(nonce), i) ) ptext_ = '' d = self.get_decryption_cipher(nonce) changed_byte = self.random.randint(0, len(ctext)-1) ctext = ctext[:changed_byte] + chr(self.random.randint(1,255) ^ ord(ctext[changed_byte])) + ctext[changed_byte+1:] chunk_start = 0 chunk_end = 0 while chunk_start < len(ctext): chunk_start = chunk_end chunk_end = self.random.randint(chunk_start, len(ctext)) ptext_ += d.decrypt(ctext[chunk_start:chunk_end]) with self.assertRaises(ValueError): ptext_ += d.verify_mac()
def decode(w, compact=False, checksum=True, permissive=False): """From a list of words, or a whitespace-separated string of words, produce the original byte string that was encoded. w: the list of words, or whitespace delimited words to be decoded compact: compact encoding was used instead of length encoding checksum: encoded string had a checksum appended before encoding permissive: if there are spelling errors, correct them instead of throwing an error (will still throw ValueError if spelling can't be corrected) Raises ValueError if the encoding is invalid. """ if isinstance(w, bytes): w = w.split() indexes = [None] * len(w) for i, word in enumerate(w): if word in rwords: indexes[i] = rwords[word] elif permissive: for nearby in dldist(word, 1): if nearby in rwords: indexes[i] = rwords[nearby] break if indexes[i] is None: raise ValueError('Unrecognized word %s' % repr(word)) # because we don't directly encode the mantissas, we have to extract them values = reduce( lambda (last_index, accum), index: (index, accum + [(index - last_index) % len(words)]), indexes, (0, []))[1] i = sum(mantissa * len(words)**radix for radix, mantissa in enumerate(values)) # we don't need to worry about truncating null bytes because of the encoded length on the end s = unpad_and_checksum(int2bytes(i, endian='little'), compact, checksum) return s
def decode(w, compact=False, checksum=True, permissive=False): """From a list of words, or a whitespace-separated string of words, produce the original byte string that was encoded. w: the list of words, or whitespace delimited words to be decoded compact: compact encoding was used instead of length encoding checksum: encoded string had a checksum appended before encoding permissive: if there are spelling errors, correct them instead of throwing an error (will still throw ValueError if spelling can't be corrected) Raises ValueError if the encoding is invalid. """ if isinstance(w, bytes): w = w.split() indexes = [None]*len(w) for i,word in enumerate(w): if word in rwords: indexes[i] = rwords[word] elif permissive: for nearby in dldist(word, 1): if nearby in rwords: indexes[i] = rwords[nearby] break if indexes[i] is None: raise ValueError('Unrecognized word %s' % repr(word)) # because we don't directly encode the mantissas, we have to extract them values = reduce(lambda (last_index, accum), index: (index, accum + [(index - last_index) % len(words)]), indexes, (0, []))[1] i = sum(mantissa * len(words)**radix for radix, mantissa in enumerate(values)) # we don't need to worry about truncating null bytes because of the encoded length on the end s = unpad_and_checksum(int2bytes(i, endian='little'), compact, checksum) return s
def fromLaneToString(lane, w): """Convert a lane value to a string of bytes""" return int2bytes(lane, w//8)
def __str__(self): return int2bytes(self.c)
def __str__(self): retval = int2bytes(self) if retval[-1] != '\x01': raise ValueError('Invalid padding for conversion to str') return retval[:-1]
s = int2bytes(n, endian='little') return unpad_and_checksum(s, compact, checksum) __all__ = ['encode', 'decode', 'randomart', 'encode_unordered', 'decode_unordered'] if __name__ == '__main__': import random try: iterations = int(sys.argv[1]) except: iterations = 1000 print >>sys.stderr, "\nTesting encode/decode ordered, not compact, with checksum" for _ in xrange(iterations): n = random.getrandbits(1024) s = int2bytes(n, endian='little') r = encode(s, compact=False, checksum=True) s = decode(r, compact=False, checksum=True) m = bytes2int(s, endian='little') assert n == m, (n, r, m) print >>sys.stderr, '.', sys.stderr.flush() print >>sys.stderr, "\nTesting encode/decode ordered, compact, with checksum" for _ in xrange(iterations): n = random.getrandbits(1024) s = int2bytes(n, endian='little') r = encode(s, compact=True, checksum=True) s = decode(r, compact=True, checksum=True) m = bytes2int(s, endian='little') assert n == m, (n, r, m)
raise TypeError( "Can only instantiate SubElement instances from integers or bytes" ) return super(SubElement, cls).__new__(cls, x) p = 2**255 - 19 # curve is defined over the field Z_p q = 2**252 + 27742317777372353535851937790883648493 # order of the group generated by the base base = 9 base_y = 14781619447589544791020593568409986887264606134616475288964881837755586237401 bad_public_keys = [ 0, 1, 325606250916557431795983626356110631294008115727848805560023387167927233504, 39382357235489614581723060781553021112529911719440698176882885853963445705823, p - 1, p, p + 1, p + 325606250916557431795983626356110631294008115727848805560023387167927233504, p + 39382357235489614581723060781553021112529911719440698176882885853963445705823, 2 * p - 1, 2 * p, 2 * p + 1 ] p = int2bytes(p, length=32, endian='little') q = int2bytes(q, length=32, endian='little') base = Point(base) base_y = Element(base_y) bad_public_keys = map(Point, bad_public_keys) __all__ = [ 'p', 'q', 'base', 'base_y', 'bad_public_keys', 'curve', 'Point', 'Element', 'SubElement' ]
__all__ = [ 'encode', 'decode', 'randomart', 'encode_unordered', 'decode_unordered' ] if __name__ == '__main__': import random try: iterations = int(sys.argv[1]) except: iterations = 1000 print >> sys.stderr, "\nTesting encode/decode ordered, not compact, with checksum" for _ in xrange(iterations): n = random.getrandbits(1024) s = int2bytes(n, endian='little') r = encode(s, compact=False, checksum=True) s = decode(r, compact=False, checksum=True) m = bytes2int(s, endian='little') assert n == m, (n, r, m) print >> sys.stderr, '.', sys.stderr.flush() print >> sys.stderr, "\nTesting encode/decode ordered, compact, with checksum" for _ in xrange(iterations): n = random.getrandbits(1024) s = int2bytes(n, endian='little') r = encode(s, compact=True, checksum=True) s = decode(r, compact=True, checksum=True) m = bytes2int(s, endian='little') assert n == m, (n, r, m)