def unpad_and_checksum(s, compact, checksum): """Check length padding and checksum for a string return the string without length padding or checksum raise ValueError if either are wrong""" assert isinstance(s, bytes) if checksum: if compact: checksum_length = ord(s[-1]) consumed = 1 length = len(s) - checksum_length - consumed else: (length, consumed) = decode_varint(s, endian='little') checksum_length = max(1, (length-1).bit_length()) s = s[:-consumed] s, checksum = s[:-checksum_length], s[-checksum_length:] if len(s) != length: raise ValueError("Invalid length") k = Keccak() k.absorb(s) if k.squeeze(checksum_length) != checksum: raise ValueError("Invalid checksum") return s else: if compact: return s[:-1] else: (length, consumed) = decode_varint(s, endian='little') s = s[:-consumed] if len(s) != length: raise ValueError("Invalid length") return s
def unpad_and_checksum(s, compact, checksum): """Check length padding and checksum for a string return the string without length padding or checksum raise ValueError if either are wrong""" assert isinstance(s, bytes) if checksum: if compact: checksum_length = ord(s[-1]) consumed = 1 length = len(s) - checksum_length - consumed else: (length, consumed) = decode_varint(s, endian='little') checksum_length = max(1, (length - 1).bit_length()) s = s[:-consumed] s, checksum = s[:-checksum_length], s[-checksum_length:] if len(s) != length: raise ValueError("Invalid length") k = Keccak() k.absorb(s) if k.squeeze(checksum_length) != checksum: raise ValueError("Invalid checksum") return s else: if compact: return s[:-1] else: (length, consumed) = decode_varint(s, endian='little') s = s[:-consumed] if len(s) != length: raise ValueError("Invalid length") return s
def deserialize(cls, s): if s[0] != cls.tag_byte: raise ValueError('Attempting to deserialize a stream with the wrong tag byte') s = s[1:] (length, consumed) = decode_varint(s,endian='big') s = s[consumed:] return (cls(s[:length]), 1+consumed+length)
def decode(w, compact=False, permissive=False): """From a list of words, or a whitespace-separated string of words, produce the original string that was encoded. w: the list of words, or whitespace delimited words to be decoded compact: compact encoding was used instead of length encoding permissive: if there are spelling errors, correct them instead of throwing an error (will still throw ValueError if spelling can't be corrected) Raises ValueError if the encoding is invalid. """ if isinstance(w, bytes): w = w.split() indexes = [None] * len(w) for i, word in enumerate(w): if word in rwords: indexes[i] = rwords[word] elif permissive: for nearby in dldist(word, 1): if nearby in rwords: indexes[i] = rwords[nearby] break if indexes[i] is None: raise ValueError('Unrecognized word %s' % repr(word)) # because we don't directly encode the mantissas, we have to extract them values = reduce( lambda (last_index, accum), index: (index, accum + [(index - last_index) % len(words)]), indexes, (0, []))[1] i = sum(mantissa * len(words)**radix for radix, mantissa in enumerate(values)) # we don't need to worry about truncating null bytes because of the encoded length on the end s = int2bytes(i) if compact: checksum_length = ord(s[-1]) consumed = 1 length = len(s) - checksum_length - consumed else: (length, consumed) = decode_varint(s, endian='little') checksum_length = max(1, (length - 1).bit_length()) s = s[:-consumed] s, checksum = s[:-checksum_length], s[-checksum_length:] if len(s) != length: raise ValueError("Invalid length") k = Keccak() k.absorb(s) if k.squeeze(checksum_length) != checksum: raise ValueError("Invalid checksum") return s
def decode(w, compact=False, permissive=False): """From a list of words, or a whitespace-separated string of words, produce the original string that was encoded. w: the list of words, or whitespace delimited words to be decoded compact: compact encoding was used instead of length encoding permissive: if there are spelling errors, correct them instead of throwing an error (will still throw ValueError if spelling can't be corrected) Raises ValueError if the encoding is invalid. """ if isinstance(w, bytes): w = w.split() indexes = [None]*len(w) for i,word in enumerate(w): if word in rwords: indexes[i] = rwords[word] elif permissive: for nearby in dldist(word, 1): if nearby in rwords: indexes[i] = rwords[nearby] break if indexes[i] is None: raise ValueError('Unrecognized word %s' % repr(word)) # because we don't directly encode the mantissas, we have to extract them values = reduce(lambda (last_index, accum), index: (index, accum + [(index - last_index) % len(words)]), indexes, (0, []))[1] i = sum(mantissa * len(words)**radix for radix, mantissa in enumerate(values)) # we don't need to worry about truncating null bytes because of the encoded length on the end s = int2bytes(i) if compact: checksum_length = ord(s[-1]) consumed = 1 length = len(s) - checksum_length - consumed else: (length, consumed) = decode_varint(s, endian='little') checksum_length = max(1, (length-1).bit_length()) s = s[:-consumed] s, checksum = s[:-checksum_length], s[-checksum_length:] if len(s) != length: raise ValueError("Invalid length") k = Keccak() k.absorb(s) if k.squeeze(checksum_length) != checksum: raise ValueError("Invalid checksum") return s
def deserialize(cls, s): if s[0] != cls.tag_byte: raise ValueError('Attempting to deserialize a stream with the wrong tag byte') total_consumed = 1 s = s[1:] length, consumed = decode_varint(s,endian='big') total_consumed += consumed s = s[consumed:] temp = [None]*length for i in xrange(length): (temp[i], consumed) = deserialize(s) total_consumed += consumed s = s[consumed:] return (cls(temp), total_consumed)
def element_to_string(element): if not isinstance(element, Element): raise TypeError("Attempted to convert a non-element to a message") length, consumed = decode_varint(element, endian='little') element = element[:length] return element