def __init__(self, raw_contents, validate = False, skip_crypto_validation = False): super(HiddenServiceDescriptor, self).__init__(raw_contents, lazy_load = not validate) entries = _descriptor_components(raw_contents, validate, non_ascii_fields = ('introduction-points')) if validate: for keyword in REQUIRED_FIELDS: if keyword not in entries: raise ValueError("Hidden service descriptor must have a '%s' entry" % keyword) elif keyword in entries and len(entries[keyword]) > 1: raise ValueError("The '%s' entry can only appear once in a hidden service descriptor" % keyword) if 'rendezvous-service-descriptor' != list(entries.keys())[0]: raise ValueError("Hidden service descriptor must start with a 'rendezvous-service-descriptor' entry") elif 'signature' != list(entries.keys())[-1]: raise ValueError("Hidden service descriptor must end with a 'signature' entry") self._parse(entries, validate) if not skip_crypto_validation and stem.prereq.is_crypto_available(): signed_digest = self._digest_for_signature(self.permanent_key, self.signature) digest_content = self._content_range('rendezvous-service-descriptor ', '\nsignature\n') content_digest = hashlib.sha1(digest_content).hexdigest().upper() if signed_digest != content_digest: raise ValueError('Decrypted digest does not match local digest (calculated: %s, local: %s)' % (signed_digest, content_digest)) else: self._entries = entries
def __init__(self, content, validate = False, document = None): """ Parse a router descriptor in a network status document. :param str content: router descriptor content to be parsed :param NetworkStatusDocument document: document this descriptor came from :param bool validate: checks the validity of the content if **True**, skips these checks otherwise :raises: **ValueError** if the descriptor data is invalid """ super(RouterStatusEntry, self).__init__(content, lazy_load = not validate) self.document = document entries = _descriptor_components(content, validate) if validate: for keyword in self._required_fields(): if keyword not in entries: raise ValueError("%s must have a '%s' line:\n%s" % (self._name(True), keyword, str(self))) for keyword in self._single_fields(): if keyword in entries and len(entries[keyword]) > 1: raise ValueError("%s can only have a single '%s' line, got %i:\n%s" % (self._name(True), keyword, len(entries[keyword]), str(self))) if 'r' != list(entries.keys())[0]: raise ValueError("%s are expected to start with a 'r' line:\n%s" % (self._name(True), str(self))) self._parse(entries, validate) else: self._entries = entries
def __init__(self, raw_contents, validate=False, skip_crypto_validation=False): super(HiddenServiceDescriptorV3, self).__init__(raw_contents, lazy_load=not validate) entries = _descriptor_components(raw_contents, validate) if validate: for keyword in REQUIRED_V3_FIELDS: if keyword not in entries: raise ValueError( "Hidden service descriptor must have a '%s' entry" % keyword) elif keyword in entries and len(entries[keyword]) > 1: raise ValueError( "The '%s' entry can only appear once in a hidden service descriptor" % keyword) if 'hs-descriptor' != list(entries.keys())[0]: raise ValueError( "Hidden service descriptor must start with a 'hs-descriptor' entry" ) elif 'signature' != list(entries.keys())[-1]: raise ValueError( "Hidden service descriptor must end with a 'signature' entry" ) self._parse(entries, validate) else: self._entries = entries
def __init__(self, raw_contents, validate=False, annotations=None): """ Server descriptor constructor, created from an individual relay's descriptor content (as provided by 'GETINFO desc/*', cached descriptors, and metrics). By default this validates the descriptor's content as it's parsed. This validation can be disables to either improve performance or be accepting of malformed data. :param str raw_contents: descriptor content provided by the relay :param bool validate: checks the validity of the descriptor's content if **True**, skips these checks otherwise :param list annotations: lines that appeared prior to the descriptor :raises: **ValueError** if the contents is malformed and validate is True """ super(ServerDescriptor, self).__init__(raw_contents, lazy_load=not validate) self._annotation_lines = annotations if annotations else [] # A descriptor contains a series of 'keyword lines' which are simply a # keyword followed by an optional value. Lines can also be followed by a # signature block. # # We care about the ordering of 'accept' and 'reject' entries because this # influences the resulting exit policy, but for everything else the order # does not matter so breaking it into key / value pairs. entries, self._unparsed_exit_policy = _descriptor_components( stem.util.str_tools._to_unicode(raw_contents), validate, extra_keywords=('accept', 'reject'), non_ascii_fields=('contact', 'platform')) # TODO: Remove the following field in Stem 2.0. It has never been populated... # # https://gitweb.torproject.org/torspec.git/commit/?id=43c2f78 self.hidden_service_dir = ['2'] if validate: self._parse(entries, validate) _parse_exit_policy(self, entries) # if we have a negative uptime and a tor version that shouldn't exhibit # this bug then fail validation if validate and self.uptime and self.tor_version: if self.uptime < 0 and self.tor_version >= stem.version.Version( '0.1.2.7'): raise ValueError( "Descriptor for version '%s' had a negative uptime value: %i" % (self.tor_version, self.uptime)) self._check_constraints(entries) else: self._entries = entries
def __init__(self, raw_contents: bytes, validate: bool = False, annotations: Optional[Sequence[bytes]] = None) -> None: super(Microdescriptor, self).__init__(raw_contents, lazy_load = not validate) self._annotation_lines = annotations if annotations else [] entries = _descriptor_components(raw_contents, validate) if validate: self._parse(entries, validate) self._check_constraints(entries) else: self._entries = entries
def __init__(self, raw_contents: bytes, validate: bool) -> None: super(TorDNSEL, self).__init__(raw_contents) entries = _descriptor_components(raw_contents, validate) self.fingerprint = None # type: Optional[str] self.published = None # type: Optional[datetime.datetime] self.last_status = None # type: Optional[datetime.datetime] self.exit_addresses = [] # type: List[Tuple[str, datetime.datetime]] self._parse(entries, validate)
def __init__(self, raw_contents, validate=False, annotations=None): super(Microdescriptor, self).__init__(raw_contents, lazy_load=not validate) self._annotation_lines = annotations if annotations else [] entries = _descriptor_components(raw_contents, validate) if validate: self._parse(entries, validate) self._check_constraints(entries) else: self._entries = entries
def __init__(self, raw_contents, validate = False, annotations = None): super(Microdescriptor, self).__init__(raw_contents, lazy_load = not validate) self._annotation_lines = annotations if annotations else [] entries = _descriptor_components(raw_contents, validate) if validate: self.digest = hashlib.sha256(self.get_bytes()).hexdigest().upper() self._parse(entries, validate) self._check_constraints(entries) else: self._entries = entries
def __init__(self, raw_contents, validate): super(TorDNSEL, self).__init__(raw_contents) raw_contents = stem.util.str_tools._to_unicode(raw_contents) entries = _descriptor_components(raw_contents, validate) self.fingerprint = None self.published = None self.last_status = None self.exit_addresses = [] self._parse(entries, validate)
def __init__(self, raw_contents, validate=False, annotations=None): super(Microdescriptor, self).__init__(raw_contents, lazy_load=not validate) self._annotation_lines = annotations if annotations else [] entries = _descriptor_components(raw_contents, validate) if validate: self.digest = hashlib.sha256(self.get_bytes()).hexdigest().upper() self._parse(entries, validate) self._check_constraints(entries) else: self._entries = entries
def _parse_introduction_points(content): """ Provides the parsed list of IntroductionPoints for the unencrypted content. """ introduction_points = [] content_io = io.BytesIO(content) while True: content = b''.join(_read_until_keywords('introduction-point', content_io, ignore_first = True)) if not content: break # reached the end attr = dict(INTRODUCTION_POINTS_ATTR) entries = _descriptor_components(content, False) for keyword, values in list(entries.items()): value, block_type, block_contents = values[0] if keyword in SINGLE_INTRODUCTION_POINT_FIELDS and len(values) > 1: raise ValueError("'%s' can only appear once in an introduction-point block, but appeared %i times" % (keyword, len(values))) if keyword == 'introduction-point': attr['identifier'] = value elif keyword == 'ip-address': if not stem.util.connection.is_valid_ipv4_address(value): raise ValueError("'%s' is an invalid IPv4 address" % value) attr['address'] = value elif keyword == 'onion-port': if not stem.util.connection.is_valid_port(value): raise ValueError("'%s' is an invalid port" % value) attr['port'] = int(value) elif keyword == 'onion-key': attr['onion_key'] = block_contents elif keyword == 'service-key': attr['service_key'] = block_contents elif keyword == 'intro-authentication': auth_entries = [] for auth_value, _, _ in values: if ' ' not in auth_value: raise ValueError("We expected 'intro-authentication [auth_type] [auth_data]', but had '%s'" % auth_value) auth_type, auth_data = auth_value.split(' ')[:2] auth_entries.append((auth_type, auth_data)) introduction_points.append(IntroductionPoints(**attr)) return introduction_points
def _parse_introduction_points(content): """ Provides the parsed list of IntroductionPoints for the unencrypted content. """ introduction_points = [] content_io = io.BytesIO(content) while True: content = b''.join( _read_until_keywords('introduction-point', content_io, ignore_first=True)) if not content: break # reached the end attr = dict(INTRODUCTION_POINTS_ATTR) entries = _descriptor_components(content, False) for keyword, values in list(entries.items()): value, block_type, block_contents = values[0] if keyword in SINGLE_INTRODUCTION_POINT_FIELDS and len( values) > 1: raise ValueError( "'%s' can only appear once in an introduction-point block, but appeared %i times" % (keyword, len(values))) elif keyword == 'introduction-point': attr['link_specifier'] = value elif keyword == 'onion-key': attr['onion_key'] = value elif keyword == 'auth-key': attr[ 'auth_key'] = stem.descriptor.certificate.Ed25519Certificate.parse( ''.join(block_contents.splitlines()[1:-1])) elif keyword == 'enc-key': attr['enc_key'] = value elif keyword == 'enc-key-cert': attr[ 'enc_key_cert'] = stem.descriptor.certificate.Ed25519Certificate.parse( ''.join(block_contents.splitlines()[1:-1])) elif keyword == 'legacy-key': attr['legacy_key'] = block_contents elif keyword == 'legacy-key-cert': attr['legacy_key_cert'] = block_contents introduction_points.append(IntroductionPoints(**attr)) return introduction_points
def __init__(self, raw_contents, validate = False, annotations = None): """ Server descriptor constructor, created from an individual relay's descriptor content (as provided by 'GETINFO desc/*', cached descriptors, and metrics). By default this validates the descriptor's content as it's parsed. This validation can be disables to either improve performance or be accepting of malformed data. :param str raw_contents: descriptor content provided by the relay :param bool validate: checks the validity of the descriptor's content if **True**, skips these checks otherwise :param list annotations: lines that appeared prior to the descriptor :raises: **ValueError** if the contents is malformed and validate is True """ super(ServerDescriptor, self).__init__(raw_contents, lazy_load = not validate) self._annotation_lines = annotations if annotations else [] # A descriptor contains a series of 'keyword lines' which are simply a # keyword followed by an optional value. Lines can also be followed by a # signature block. # # We care about the ordering of 'accept' and 'reject' entries because this # influences the resulting exit policy, but for everything else the order # does not matter so breaking it into key / value pairs. entries, self._unparsed_exit_policy = _descriptor_components(stem.util.str_tools._to_unicode(raw_contents), validate, extra_keywords = ('accept', 'reject'), non_ascii_fields = ('contact', 'platform')) if validate: self._parse(entries, validate) _parse_exit_policy(self, entries) # if we have a negative uptime and a tor version that shouldn't exhibit # this bug then fail validation if validate and self.uptime and self.tor_version: if self.uptime < 0 and self.tor_version >= stem.version.Version('0.1.2.7'): raise ValueError("Descriptor for version '%s' had a negative uptime value: %i" % (self.tor_version, self.uptime)) self._check_constraints(entries) else: self._entries = entries
def __init__(self, raw_contents, validate=False): """ Extra-info descriptor constructor. By default this validates the descriptor's content as it's parsed. This validation can be disabled to either improve performance or be accepting of malformed data. :param str raw_contents: extra-info content provided by the relay :param bool validate: checks the validity of the extra-info descriptor if **True**, skips these checks otherwise :raises: **ValueError** if the contents is malformed and validate is True """ super(ExtraInfoDescriptor, self).__init__(raw_contents, lazy_load=not validate) entries = _descriptor_components(raw_contents, validate) if validate: for keyword in self._required_fields(): if keyword not in entries: raise ValueError( "Extra-info descriptor must have a '%s' entry" % keyword) for keyword in self._required_fields() + SINGLE_FIELDS: if keyword in entries and len(entries[keyword]) > 1: raise ValueError( "The '%s' entry can only appear once in an extra-info descriptor" % keyword) expected_first_keyword = self._first_keyword() if expected_first_keyword and expected_first_keyword != list( entries.keys())[0]: raise ValueError( "Extra-info descriptor must start with a '%s' entry" % expected_first_keyword) expected_last_keyword = self._last_keyword() if expected_last_keyword and expected_last_keyword != list( entries.keys())[-1]: raise ValueError("Descriptor must end with a '%s' entry" % expected_last_keyword) self._parse(entries, validate) else: self._entries = entries
def __init__(self, raw_contents, validate = False): """ Extra-info descriptor constructor. By default this validates the descriptor's content as it's parsed. This validation can be disabled to either improve performance or be accepting of malformed data. :param str raw_contents: extra-info content provided by the relay :param bool validate: checks the validity of the extra-info descriptor if **True**, skips these checks otherwise :raises: **ValueError** if the contents is malformed and validate is True """ super(ExtraInfoDescriptor, self).__init__(raw_contents, lazy_load = not validate) entries = _descriptor_components(raw_contents, validate) if validate: for keyword in self._required_fields(): if keyword not in entries: raise ValueError("Extra-info descriptor must have a '%s' entry" % keyword) for keyword in self._required_fields() + SINGLE_FIELDS: if keyword in entries and len(entries[keyword]) > 1: raise ValueError("The '%s' entry can only appear once in an extra-info descriptor" % keyword) expected_first_keyword = self._first_keyword() if expected_first_keyword and expected_first_keyword != list(entries.keys())[0]: raise ValueError("Extra-info descriptor must start with a '%s' entry" % expected_first_keyword) expected_last_keyword = self._last_keyword() if expected_last_keyword and expected_last_keyword != list(entries.keys())[-1]: raise ValueError("Descriptor must end with a '%s' entry" % expected_last_keyword) self._parse(entries, validate) else: self._entries = entries
def __init__(self, raw_contents, validate=False, skip_crypto_validation=False, onion_address=None): print(f"F**K: onion_address: {onion_address}") self.onion_address = onion_address super(Hsv3Descriptor, self).__init__(raw_contents, lazy_load=not validate) entries = _descriptor_components( raw_contents, validate, non_ascii_fields=('introduction-points')) if validate: for keyword in REQUIRED_FIELDS: if keyword not in entries: raise ValueError( "Hidden service descriptor must have a '%s' entry" % keyword) elif keyword in entries and len(entries[keyword]) > 1: raise ValueError( "The '%s' entry can only appear once in a hidden service descriptor" % keyword) if 'hs-descriptor' != list(entries.keys())[0]: raise ValueError( "Hidden service descriptor must start with a 'hs-descriptor' entry" ) elif 'signature' != list(entries.keys())[-1]: raise ValueError( "Hidden service descriptor must end with a 'signature' entry" ) # print(f"### entries: {entries}") self._parse(entries, validate) if not skip_crypto_validation and stem.prereq.is_crypto_available( ): print( f"#### descriptor_signing_key_cert: {self.descriptor_signing_key_cert}" ) print(f"#### certificate: {self.certificate}") print(f"#### signature: {self.signature}") signed_digest = self.certificate.validate(self) # self.blinded_public_key1 = self.certificate.key # easy to mix these up :) # self.blinded_public_key2 = self.certificate.extensions[0].data # easy to mix these up :) # XXX does the above validate()^ do enough? # XXX do we need to compute and store the below digest? # digest_content = self._content_range('hs-descriptor ', '\nsignature ') # content_digest = hashlib.sha1(digest_content).hexdigest().upper() # if signed_digest != content_digest: # raise ValueError('Decrypted digest does not match local digest (calculated: %s, local: %s)' % (signed_digest, content_digest)) # SECRET_DATA = blinded-public-key # STRING_CONSTANT = "hsdir-superencrypted-data" # credential = H("credential" | public-identity-key) # subcredential = H("subcredential" | credential | blinded-public-key). # subcredential = H("subcredential" | H("credential" | public-identity-key) | blinded-public-key) # public-identity-key comes from the onion address # blinded-public-key comes from the cert in the descriptor # onion_address = base32(PUBKEY || CHECKSUM || VERSION) + ".onion" onion_addr_bytes = base64.b32decode(self.onion_address.upper()) print(f"onion_addr_bytes: {onion_addr_bytes}") pubkey, checksum, version = struct.unpack( '!32s2sb', onion_addr_bytes) print(f"pubkey: {pubkey}") print(f"checksum: {checksum}") print(f"version: {version}") # XXX verify checksum self.public_identity_key = pubkey credential = hashlib.sha3_256( b"credential" + self.public_identity_key).digest() subcredential1 = hashlib.sha3_256( b"subcredential" + credential + self.certificate.key).digest() subcredential2 = hashlib.sha3_256( b"subcredential" + credential + self.certificate.extensions[0].data).digest() print(f"subcredential1: {subcredential1}") print(f"subcredential2: {subcredential2}") # secret_input = SECRET_DATA | subcredential | INT_8(revision_counter) secret_input1 = self.certificate.key + subcredential1 + struct.pack( '>Q', self.revision_counter) secret_input2 = self.certificate.extensions[ 0].data + subcredential2 + struct.pack( '>Q', self.revision_counter) print(f"secret_input1: {secret_input1}") print(f"secret_input2: {secret_input2}") kdf1 = hashlib.shake_256() kdf2 = hashlib.shake_256() kdf1.update(secret_input1 + self.introduction_points_salt + b"hsdir-superencrypted-data") kdf2.update(secret_input2 + self.introduction_points_salt + b"hsdir-superencrypted-data") keys1 = hashlib.shake_256.digest(kdf1, 32 + 16 + 32) keys2 = hashlib.shake_256.digest(kdf2, 32 + 16 + 32) print(f"keys1: {keys1}") print(f"keys2: {keys2}") # keys = KDF(secret_input | salt | STRING_CONSTANT, S_KEY_LEN + S_IV_LEN + MAC_KEY_LEN) # SECRET_KEY = first S_KEY_LEN bytes of keys # SECRET_IV = next S_IV_LEN bytes of keys # MAC_KEY = last MAC_KEY_LEN bytes of keys sec_key1 = keys1[:32] sec_key2 = keys2[:32] sec_iv1 = keys1[32:32 + 16] sec_iv2 = keys2[32:32 + 16] mac_key1 = keys1[32 + 16:] mac_key2 = keys2[32 + 16:] cipher1 = Cipher(algorithms.AES(sec_key1), modes.CTR(sec_iv1), default_backend()) cipher2 = Cipher(algorithms.AES(sec_key2), modes.CTR(sec_iv2), default_backend()) decryptor1 = cipher1.decryptor() decryptor2 = cipher2.decryptor() decrypted1 = decryptor1.update( self.introduction_points_encrypted) + decryptor1.finalize( ) decrypted2 = decryptor2.update( self.introduction_points_encrypted) + decryptor2.finalize( ) print(f"decrypted1: {decrypted1}") print(f"decrypted2: {decrypted2}") end_index = decrypted2.find(b"\n-----END MESSAGE-----", 0) self.first_layer_plaintext = decrypted2[:end_index + len( "\n-----END MESSAGE-----")] begin = self.first_layer_plaintext.find( b"\n-----BEGIN MESSAGE-----\n") begin += len(b"\n-----BEGIN MESSAGE-----\n") end = self.first_layer_plaintext.find( b"\n-----END MESSAGE-----", 0) self.second_layer_ciphertext_b64 = self.first_layer_plaintext[ begin:end] self.second_layer_ciphertext = base64.b64decode( self.second_layer_ciphertext_b64) print( f"second_layer_ciphertext_b64: {self.second_layer_ciphertext_b64}" ) # XXX only doing non-client-auth for now inner_salt = self.second_layer_ciphertext[:16] inner_encrypted = self.second_layer_ciphertext[16:-32] inner_mac = self.second_layer_ciphertext[-32:] credential_inner = hashlib.sha3_256( b"credential" + self.public_identity_key).digest() subcredential_inner = hashlib.sha3_256( b"subcredential" + credential_inner + self.certificate.extensions[0].data).digest() secret_input_inner = self.certificate.extensions[ 0].data + subcredential_inner + struct.pack( '>Q', self.revision_counter) kdf_inner = hashlib.shake_256() kdf_inner.update(secret_input_inner + inner_salt + b"hsdir-encrypted-data") keys_inner = hashlib.shake_256.digest(kdf_inner, 32 + 16 + 32) print(f"keys_inner: {keys_inner}") print(f"len(keys_inner): {len(keys_inner)}") sec_key_inner = keys_inner[:32] sec_iv_inner = keys_inner[32:32 + 16] mac_key_inner = keys_inner[32 + 16:] cipher_inner = Cipher(algorithms.AES(sec_key_inner), modes.CTR(sec_iv_inner), default_backend()) decryptor_inner = cipher_inner.decryptor() self.decrypted_inner = decryptor_inner.update( inner_encrypted) + decryptor_inner.finalize() print(f"decrypted_inner: {self.decrypted_inner.decode()}") else: self._entries = entries