def _parse_object(self, rpsl_text: str) -> Optional[str]: """ Parse a single object. If direct_error_return is set, returns an error string on encountering an error. Otherwise, returns None. """ try: self.obj_parsed += 1 # If an object turns out to be a key-cert, and strict_import_keycert_objects # is set, parse it again with strict validation to load it in the GPG keychain. obj = rpsl_object_from_text(rpsl_text.strip(), strict_validation=False) if self.strict_validation_key_cert and obj.__class__ == RPSLKeyCert: obj = rpsl_object_from_text(rpsl_text.strip(), strict_validation=True) if obj.messages.errors(): log_msg = f'Parsing errors: {obj.messages.errors()}, original object text follows:\n{rpsl_text}' if self.direct_error_return: return log_msg self.database_handler.record_mirror_error(self.source, log_msg) logger.critical( f'Parsing errors occurred while importing from file for {self.source}. ' f'This object is ignored, causing potential data inconsistencies. A new operation for ' f'this update, without errors, will still be processed and cause the inconsistency to ' f'be resolved. Parser error messages: {obj.messages.errors()}; ' f'original object text follows:\n{rpsl_text}') self.obj_errors += 1 return None if obj.source() != self.source: msg = f'Invalid source {obj.source()} for object {obj.pk()}, expected {self.source}' if self.direct_error_return: return msg logger.critical( msg + '. This object is ignored, causing potential data inconsistencies.' ) self.database_handler.record_mirror_error(self.source, msg) self.obj_errors += 1 return None if self.object_class_filter and obj.rpsl_object_class.lower( ) not in self.object_class_filter: self.obj_ignored_class += 1 return None self.database_handler.upsert_rpsl_object(obj, forced_serial=self.serial) except UnknownRPSLObjectClassException as e: # Ignore legacy IRRd artifacts # https://github.com/irrdnet/irrd4/issues/232 if e.rpsl_object_class.startswith('*xx'): self.obj_parsed -= 1 # This object does not exist to us return None if self.direct_error_return: return f'Unknown object class: {e.rpsl_object_class}' self.obj_unknown += 1 self.unknown_object_classes.add(e.rpsl_object_class) return None
def preload_gpg_key(): """ Fixture to load a known PGP key into the configured keychain. """ # Simply parsing the key-cert will load it into the GPG keychain rpsl_text = SAMPLE_KEY_CERT rpsl_object_from_text(rpsl_text)
def save(self, database_handler: DatabaseHandler) -> bool: default_source = self.source if self.operation == DatabaseOperation.delete else None try: object_text = self.object_text.strip() # If an object turns out to be a key-cert, and strict_import_keycert_objects # is set, parse it again with strict validation to load it in the GPG keychain. obj = rpsl_object_from_text(object_text, strict_validation=False, default_source=default_source) if self.strict_validation_key_cert and obj.__class__ == RPSLKeyCert: obj = rpsl_object_from_text(object_text, strict_validation=True, default_source=default_source) except UnknownRPSLObjectClassException as exc: # Unknown object classes are only logged if they have not been filtered out. if not self.object_class_filter or exc.rpsl_object_class.lower( ) in self.object_class_filter: logger.info(f'Ignoring NRTM operation {str(self)}: {exc}') return False if self.object_class_filter and obj.rpsl_object_class.lower( ) not in self.object_class_filter: return False if obj.messages.errors(): errors = '; '.join(obj.messages.errors()) logger.critical( f'Parsing errors occurred while processing NRTM operation {str(self)}. ' f'This operation is ignored, causing potential data inconsistencies. ' f'A new operation for this update, without errors, ' f'will still be processed and cause the inconsistency to be resolved. ' f'Parser error messages: {errors}; original object text follows:\n{self.object_text}' ) database_handler.record_mirror_error( self.source, f'Parsing errors: {obj.messages.errors()}, ' f'original object text follows:\n{self.object_text}') return False if 'source' in obj.parsed_data and obj.parsed_data['source'].upper( ) != self.source: msg = ( f'Incorrect source in NRTM object: stream has source {self.source}, found object with ' f'source {obj.source()} in operation {self.serial}/{self.operation.value}/{obj.pk()}. ' f'This operation is ignored, causing potential data inconsistencies.' ) database_handler.record_mirror_error(self.source, msg) logger.critical(msg) return False if self.operation == DatabaseOperation.add_or_update: database_handler.upsert_rpsl_object(obj, self.serial) elif self.operation == DatabaseOperation.delete: database_handler.delete_rpsl_object(obj, self.serial) logger.info(f'Completed NRTM operation {str(self)}/{obj.pk()}') return True
def test_modify_mntner(self, prepare_mocks, config_override): validator, mock_dq, mock_dh = prepare_mocks mntner = rpsl_object_from_text(SAMPLE_MNTNER) mock_dh.execute_query = lambda q: [ { 'object_class': 'mntner', 'object_text': SAMPLE_MNTNER }, ] # This counts as submitting all new hashes. validator.passwords = [SAMPLE_MNTNER_MD5] result = validator.process_auth(mntner, mntner) assert result.is_valid() assert not result.info_messages # This counts as submitting all new hashes, but not matching any password new_mntner = rpsl_object_from_text( MNTNER_OBJ_CRYPT_PW.replace('CRYPT', '')) validator.passwords = [SAMPLE_MNTNER_MD5] result = validator.process_auth(new_mntner, mntner) assert not result.is_valid() assert result.error_messages == { 'Authorisation failed for the auth methods on this mntner object.' } # This counts as submitting all dummy hashes. mntner_no_auth_hashes = remove_auth_hashes(SAMPLE_MNTNER) new_mntner = rpsl_object_from_text(mntner_no_auth_hashes) result = validator.process_auth(new_mntner, mntner) assert result.is_valid() assert not new_mntner.has_dummy_auth_value() assert result.info_messages == { 'As you submitted dummy hash values, all password hashes on this ' 'object were replaced with a new BCRYPT-PW hash of the password you ' 'provided for authentication.' } # # This is a multi password submission with dummy hashes which is rejected validator.passwords = [SAMPLE_MNTNER_MD5, SAMPLE_MNTNER_CRYPT] new_mntner = rpsl_object_from_text(mntner_no_auth_hashes) result = validator.process_auth(new_mntner, mntner) assert not result.is_valid() assert not result.info_messages assert result.error_messages == { 'Object submitted with dummy hash values, but multiple or no passwords ' 'submitted. Either submit only full hashes, or a single password.' }
def test_as_set_autnum_required_does_not_exist(self, prepare_mocks, config_override): config_override({ 'auth': { 'set_creation': { AUTH_SET_CREATION_COMMON_KEY: { 'autnum_authentication': 'required' } }, 'password_hashers': { 'crypt-pw': 'enabled' }, } }) validator, mock_dq, mock_dh = prepare_mocks as_set = rpsl_object_from_text(SAMPLE_AS_SET) assert as_set.clean_for_create() # fill pk_first_segment query_results = itertools.cycle([ [{ 'object_text': MNTNER_OBJ_CRYPT_PW }], # mntner for object [], # attempt to look for matching aut-num ]) mock_dh.execute_query = lambda q: next(query_results) validator.passwords = [SAMPLE_MNTNER_MD5, SAMPLE_MNTNER_CRYPT] result = validator.process_auth(as_set, None) assert not result.is_valid() assert result.error_messages == { 'Creating this object requires an aut-num for AS65537 to exist.', }
def test_as_set_autnum_disabled(self, prepare_mocks, config_override): config_override({ 'auth': { 'set_creation': { 'as-set': { 'autnum_authentication': 'disabled' } }, 'password_hashers': { 'crypt-pw': 'enabled' }, }, }) validator, mock_dq, mock_dh = prepare_mocks as_set = rpsl_object_from_text(SAMPLE_AS_SET) assert as_set.clean_for_create() # fill pk_asn_segment mock_dh.execute_query = lambda q: [ { 'object_text': MNTNER_OBJ_CRYPT_PW }, # mntner for object ] validator.passwords = [SAMPLE_MNTNER_MD5, SAMPLE_MNTNER_CRYPT] result = validator.process_auth(as_set, None) assert result.is_valid() assert flatten_mock_calls(mock_dq, flatten_objects=True) == [ ['sources', (['TEST'], ), {}], ['object_classes', (['mntner'], ), {}], ['rpsl_pks', ({'TEST-MNT'}, ), {}], ]
def test_related_route_no_match_v6(self, prepare_mocks): validator, mock_dq, mock_dh = prepare_mocks route = rpsl_object_from_text(SAMPLE_ROUTE6) query_results = itertools.cycle([ [{ 'object_text': SAMPLE_MNTNER }], # mntner for object [], # attempt to look for exact inetnum [], # attempt to look for one level less specific inetnum [], # attempt to look for less specific route ]) mock_dh.execute_query = lambda q: next(query_results) validator.passwords = [SAMPLE_MNTNER_MD5] result = validator.process_auth(route, None) assert result.is_valid() assert flatten_mock_calls(mock_dq, flatten_objects=True) == [ ['sources', (['TEST'], ), {}], ['object_classes', (['mntner'], ), {}], ['rpsl_pks', ({'TEST-MNT'}, ), {}], ['sources', (['TEST'], ), {}], ['object_classes', (['inet6num'], ), {}], ['first_only', (), {}], ['ip_exact', ('2001:db8::/48', ), {}], ['sources', (['TEST'], ), {}], ['object_classes', (['inet6num'], ), {}], ['first_only', (), {}], ['ip_less_specific_one_level', ('2001:db8::/48', ), {}], ['sources', (['TEST'], ), {}], ['object_classes', (['route6'], ), {}], ['first_only', (), {}], ['ip_less_specific_one_level', ('2001:db8::/48', ), {}], ]
def test_create_mntner_requires_override(self, prepare_mocks, config_override): validator, mock_dq, mock_dh = prepare_mocks mntner = rpsl_object_from_text(SAMPLE_MNTNER) mock_dh.execute_query = lambda q: [ { 'object_class': 'mntner', 'object_text': SAMPLE_MNTNER }, ] validator.passwords = [SAMPLE_MNTNER_MD5] result = validator.process_auth(mntner, None) assert not result.is_valid() assert not result.used_override assert result.error_messages == { 'New mntner objects must be added by an administrator.' } assert flatten_mock_calls(mock_dq, flatten_objects=True) == [ ['sources', (['TEST'], ), {}], ['object_classes', (['mntner'], ), {}], ['rpsl_pks', ({'TEST-MNT', 'OTHER1-MNT', 'OTHER2-MNT'}, ), {}], ] validator.overrides = [VALID_PW] config_override({ 'auth': { 'override_password': VALID_PW_HASH }, }) result = validator.process_auth(mntner, None) assert result.is_valid(), result.error_messages assert result.used_override
def test_valid_new_person_preapproved_mntner(self, prepare_mocks): validator, mock_dq, mock_dh = prepare_mocks person = rpsl_object_from_text(SAMPLE_PERSON) mock_dh.execute_query = lambda q: [ { 'object_class': 'mntner', 'object_text': SAMPLE_MNTNER }, ] validator.pre_approve([rpsl_object_from_text(SAMPLE_MNTNER)]) result = validator.process_auth(person, None) assert result.is_valid(), result.error_messages assert not result.used_override assert len(result.mntners_notify) == 1 assert result.mntners_notify[0].pk() == 'TEST-MNT'
def _check_mntners(self, mntner_pk_list: List[str], source: str) -> Tuple[bool, List[RPSLMntner]]: """ Check whether authentication passes for a list of maintainers. Returns True if at least one of the mntners in mntner_list passes authentication, given self.passwords and self.keycert_obj_pk. Updates and checks self.passed_mntner_cache to prevent double checking of maintainers. """ mntner_pk_set = set(mntner_pk_list) mntner_objs: List[RPSLMntner] = [m for m in self._mntner_db_cache if m.pk() in mntner_pk_set and m.source() == source] mntner_pks_to_resolve: Set[str] = mntner_pk_set - {m.pk() for m in mntner_objs} if mntner_pks_to_resolve: query = RPSLDatabaseQuery().sources([source]) query = query.object_classes(['mntner']).rpsl_pks(mntner_pks_to_resolve) results = list(self.database_handler.execute_query(query)) retrieved_mntner_objs: List[RPSLMntner] = [rpsl_object_from_text(r['object_text']) for r in results] # type: ignore self._mntner_db_cache.update(retrieved_mntner_objs) mntner_objs += retrieved_mntner_objs for mntner_name in mntner_pk_list: if mntner_name in self._pre_approved: return True, mntner_objs for mntner_obj in mntner_objs: if mntner_obj.verify_auth(self.passwords, self.keycert_obj_pk): return True, mntner_objs return False, mntner_objs
def test_filter_set_autnum_required_no_prefix(self, prepare_mocks, config_override): config_override({ 'auth': { 'set_creation': { AUTH_SET_CREATION_COMMON_KEY: { 'autnum_authentication': 'required', 'prefix_required': False, } }, 'password_hashers': { 'crypt-pw': 'enabled' }, } }) validator, mock_dq, mock_dh = prepare_mocks filter_set = rpsl_object_from_text(SAMPLE_FILTER_SET) assert filter_set.clean_for_create() mock_dh.execute_query = lambda q: [ { 'object_text': MNTNER_OBJ_CRYPT_PW }, # mntner for object ] validator.passwords = [SAMPLE_MNTNER_MD5, SAMPLE_MNTNER_CRYPT] result = validator.process_auth(filter_set, None) assert result.is_valid() assert flatten_mock_calls(mock_dq, flatten_objects=True) == [ ['sources', (['TEST'], ), {}], ['object_classes', (['mntner'], ), {}], ['rpsl_pks', ({'TEST-MNT'}, ), {}], ]
def parse_object(self, rpsl_text, strict_validation): try: self.obj_parsed += 1 obj = rpsl_object_from_text(rpsl_text.strip(), strict_validation=strict_validation) if (obj.messages.messages() and self.show_info) or obj.messages.errors(): if obj.messages.errors(): self.obj_errors += 1 print(rpsl_text.strip()) print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~') print(obj.messages) print('\n=======================================\n') if self.database_handler and obj and not obj.messages.errors(): self.database_handler.upsert_rpsl_object( obj, JournalEntryOrigin.mirror) except UnknownRPSLObjectClassException as e: self.obj_unknown += 1 self.unknown_object_classes.add(str(e).split(':')[1].strip()) except Exception as e: # pragma: no cover print('=======================================') print(rpsl_text) print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~') raise e
def test_nrtm_add_valid_with_strict_import_keycert(self, monkeypatch, tmp_gpg_dir): mock_dh = Mock() mock_scopefilter = Mock(spec=ScopeFilterValidator) monkeypatch.setattr( 'irrd.mirroring.nrtm_operation.ScopeFilterValidator', lambda: mock_scopefilter) mock_scopefilter.validate_rpsl_object = lambda obj: (ScopeFilterStatus. in_scope, '') operation = NRTMOperation( source='TEST', operation=DatabaseOperation.add_or_update, serial=42424242, object_text=SAMPLE_KEY_CERT, strict_validation_key_cert=True, object_class_filter=['route', 'route6', 'mntner', 'key-cert'], ) assert operation.save(database_handler=mock_dh) assert mock_dh.upsert_rpsl_object.call_count == 1 assert mock_dh.mock_calls[0][1][0].pk() == 'PGPKEY-80F238C6' assert mock_dh.mock_calls[0][1][1] == JournalEntryOrigin.mirror # key-cert should be imported in the keychain, therefore # verification should succeed key_cert_obj = rpsl_object_from_text(SAMPLE_KEY_CERT, strict_validation=False) assert key_cert_obj.verify(KEY_CERT_SIGNED_MESSAGE_VALID)
def parse_object(self, rpsl_text, strict_validation): if not rpsl_text.strip(): return try: self.obj_parsed += 1 obj = rpsl_object_from_text(rpsl_text.strip(), strict_validation=strict_validation) if (obj.messages.messages() and self.show_info) or obj.messages.errors(): if obj.messages.errors(): self.obj_errors += 1 print(rpsl_text.strip()) print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~") print(obj.messages) print("\n=======================================\n") if self.database_handler and obj and not obj.messages.errors(): self.database_handler.upsert_rpsl_object(obj) except UnknownRPSLObjectClassException as e: self.obj_unknown += 1 self.unknown_object_classes.add(str(e).split(":")[1].strip()) except Exception as e: # pragma: no cover print("=======================================") print(rpsl_text) print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~") raise e
def set_last_modified(): dh = DatabaseHandler() auth_sources = [ k for k, v in get_setting('sources').items() if v.get('authoritative') ] q = RPSLDatabaseQuery(column_names=['pk', 'object_text', 'updated'], enable_ordering=False) q = q.sources(auth_sources) results = list(dh.execute_query(q)) print(f'Updating {len(results)} objects in sources {auth_sources}') for result in results: rpsl_obj = rpsl_object_from_text(result['object_text'], strict_validation=False) if rpsl_obj.messages.errors(): # pragma: no cover print( f'Failed to process {rpsl_obj}: {rpsl_obj.messages.errors()}') continue new_text = rpsl_obj.render_rpsl_text(result['updated']) stmt = RPSLDatabaseObject.__table__.update().where( RPSLDatabaseObject.__table__.c.pk == result['pk']).values( object_text=new_text, ) dh.execute_statement(stmt) dh.commit() dh.close()
def _retrieve_existing_version(self): """ Retrieve the current version of this object, if any, and store it in rpsl_obj_current. Update self.status appropriately. """ query = RPSLDatabaseQuery().sources([self.rpsl_obj_new.source()]) query = query.object_classes([self.rpsl_obj_new.rpsl_object_class ]).rpsl_pk(self.rpsl_obj_new.pk()) results = list(self.database_handler.execute_query(query)) if not results: self.request_type = UpdateRequestType.CREATE logger.debug( f'{id(self)}: Did not find existing version for object {self.rpsl_obj_new}, request is CREATE' ) elif len(results) == 1: self.request_type = UpdateRequestType.MODIFY self.rpsl_obj_current = rpsl_object_from_text( results[0]['object_text'], strict_validation=False) logger.debug(f'{id(self)}: Retrieved existing version for object ' f'{self.rpsl_obj_current}, request is MODIFY/DELETE') else: # pragma: no cover # This should not be possible, as rpsl_pk/source are a composite unique value in the database scheme. # Therefore, a query should not be able to affect more than one row. affected_pks = ', '.join([r['pk'] for r in results]) msg = f'{id(self)}: Attempted to retrieve current version of object {self.rpsl_obj_new.pk()}/' msg += f'{self.rpsl_obj_new.source()}, but multiple ' msg += f'objects were found, internal pks found: {affected_pks}' logger.error(msg) raise ValueError(msg)
def clean(self, query: str, response: Optional[str]) -> Optional[str]: """Clean the query response, so that the text can be compared.""" if not response: return response irr_query = query[:2].lower() response = response.strip().lower() cleaned_result_list = None if irr_query in SSP_QUERIES or (irr_query == '!r' and query.lower().strip().endswith(',o')): cleaned_result_list = response.split(' ') if irr_query in ['!6', '!g'] and cleaned_result_list: cleaned_result_list = [str(IP(ip)) for ip in cleaned_result_list] if cleaned_result_list: return ' '.join(sorted(list(set(cleaned_result_list)))) else: new_responses = [] for paragraph in split_paragraphs_rpsl(response): rpsl_obj = rpsl_object_from_text(paragraph.strip(), strict_validation=False) new_responses.append(rpsl_obj) new_responses.sort(key=lambda i: i.parsed_data.get('source', '') + i.rpsl_object_class + i.pk()) texts = [r.render_rpsl_text() for r in new_responses] return '\n'.join(OrderedSet(texts))
def _check_mntners(self, mntner_list: List[str], source: str) -> bool: """ Check whether authentication passes for a list of maintainers. Returns True if at least one of the mntners in mntner_list passes authentication, given self.passwords and self.keycert_obj_pk. Updates and checks self.passed_mntner_cache to prevent double checking of maintainers. """ for mntner_name in mntner_list: if mntner_name in self._passed_cache or mntner_name in self._pre_approved: return True query = RPSLDatabaseQuery().sources([source]) query = query.object_classes(['mntner']).rpsl_pks(mntner_list) results = list(self.database_handler.execute_query(query)) mntner_objs: List[RPSLMntner] = [ rpsl_object_from_text(r['object_text']) for r in results ] # type: ignore for mntner_obj in mntner_objs: if mntner_obj.verify_auth(self.passwords, self.keycert_obj_pk): self._passed_cache.add(mntner_obj.pk()) return True return False
def test_parse(self, mock_scopefilter, caplog, tmp_gpg_dir, config_override): config_override({ 'sources': { 'TEST': { 'object_class_filter': ['route', 'key-cert'], 'strict_import_keycert_objects': True, } } }) mock_dh = Mock() mock_roa_validator = Mock(spec=BulkRouteROAValidator) mock_roa_validator.validate_route = lambda ip, length, asn, source: RPKIStatus.invalid test_data = [ SAMPLE_UNKNOWN_ATTRIBUTE, # valid, because mirror imports are non-strict SAMPLE_ROUTE6, # Valid, excluded by object class filter SAMPLE_KEY_CERT, SAMPLE_ROUTE.replace('TEST', 'BADSOURCE'), SAMPLE_UNKNOWN_CLASS, SAMPLE_MALFORMED_PK, SAMPLE_LEGACY_IRRD_ARTIFACT, ] test_input = '\n\n'.join(test_data) with tempfile.NamedTemporaryFile() as fp: fp.write(test_input.encode('utf-8')) fp.seek(0) parser = MirrorFileImportParser( source='TEST', filename=fp.name, serial=424242, database_handler=mock_dh, roa_validator=mock_roa_validator, ) parser.run_import() assert len(mock_dh.mock_calls) == 5 assert mock_dh.mock_calls[0][0] == 'upsert_rpsl_object' assert mock_dh.mock_calls[0][1][0].pk() == '192.0.2.0/24AS65537' assert mock_dh.mock_calls[0][1][0].rpki_status == RPKIStatus.invalid assert mock_dh.mock_calls[0][1][ 0].scopefilter_status == ScopeFilterStatus.in_scope assert mock_dh.mock_calls[1][0] == 'upsert_rpsl_object' assert mock_dh.mock_calls[1][1][0].pk() == 'PGPKEY-80F238C6' assert mock_dh.mock_calls[2][0] == 'record_mirror_error' assert mock_dh.mock_calls[3][0] == 'record_mirror_error' assert mock_dh.mock_calls[4][0] == 'record_serial_seen' assert mock_dh.mock_calls[4][1][0] == 'TEST' assert mock_dh.mock_calls[4][1][1] == 424242 assert 'Invalid source BADSOURCE for object' in caplog.text assert 'Invalid address prefix' in caplog.text assert 'File import for TEST: 6 objects read, 2 objects inserted, ignored 2 due to errors' in caplog.text assert 'ignored 1 due to object_class_filter' in caplog.text assert 'Ignored 1 objects found in file import for TEST due to unknown object classes' in caplog.text key_cert_obj = rpsl_object_from_text(SAMPLE_KEY_CERT, strict_validation=False) assert key_cert_obj.verify(KEY_CERT_SIGNED_MESSAGE_VALID)
def __init__(self, rpsl_text_submitted: str, database_handler: DatabaseHandler, auth_validator: AuthValidator, reference_validator: ReferenceValidator, delete_reason=Optional[str]) -> None: """ Initialise a new update request for a single RPSL object. :param rpsl_text_submitted: the object text :param database_handler: a DatabaseHandler instance :param auth_validator: a AuthValidator instance, to resolve authentication requirements :param reference_validator: a ReferenceValidator instance, to resolve references between objects :param delete_reason: a string with the deletion reason, if this was a deletion request The rpsl_text passed into this function should be cleaned from any meta attributes like delete/override/password. Those should be passed into this method as delete_reason, or provided to the AuthValidator. The passed_mntner_cache and reference_validator must be shared between different instances, to benefit from caching, and to resolve references between different objects that are part of the same update. NOTE: passed_mntner_cache and keycert_obj_pk are trusted without further verification. User provided values must never be passed into them without prior validation. """ self.database_handler = database_handler self.auth_validator = auth_validator self.reference_validator = reference_validator self.rpsl_text_submitted = rpsl_text_submitted try: self.rpsl_obj_new = rpsl_object_from_text(rpsl_text_submitted, strict_validation=True) if self.rpsl_obj_new.messages.errors(): self.status = UpdateRequestStatus.ERROR_PARSING self.error_messages = self.rpsl_obj_new.messages.errors() self.info_messages = self.rpsl_obj_new.messages.infos() except UnknownRPSLObjectClassException as exc: self.rpsl_obj_new = None self.request_type = None self.status = UpdateRequestStatus.ERROR_UNKNOWN_CLASS self.info_messages = [] self.error_messages = [str(exc)] if self.is_valid(): self._retrieve_existing_version() if delete_reason: self.request_type = UpdateRequestType.DELETE if not self.rpsl_obj_current: self.status = UpdateRequestStatus.ERROR_PARSING self.error_messages.append( f"Can not delete object: no object found for this key in this database." )
def test_override_valid(self, prepare_mocks, config_override): config_override({ 'auth': { 'override_password': VALID_PW_HASH }, }) validator, mock_dq, mock_dh = prepare_mocks person = rpsl_object_from_text(SAMPLE_PERSON) validator.overrides = [VALID_PW] result = validator.process_auth(person, None) assert result.is_valid(), result.error_messages assert result.used_override person = rpsl_object_from_text(SAMPLE_PERSON) result = validator.process_auth(person, rpsl_obj_current=person) assert result.is_valid(), result.error_messages assert result.used_override
def test_validate_rpsl_object(self, config_override): validator = ScopeFilterValidator() route_obj = rpsl_object_from_text(SAMPLE_ROUTE) assert validator.validate_rpsl_object(route_obj) == ( ScopeFilterStatus.in_scope, '') autnum_obj = rpsl_object_from_text(SAMPLE_AUT_NUM) assert validator.validate_rpsl_object(autnum_obj) == ( ScopeFilterStatus.in_scope, '') config_override({ 'scopefilter': { 'asns': ['65537'], }, }) validator.load_filters() result = validator.validate_rpsl_object(route_obj) assert result == (ScopeFilterStatus.out_scope_as, 'ASN 65537 is out of scope') result = validator.validate_rpsl_object(autnum_obj) assert result == (ScopeFilterStatus.out_scope_as, 'ASN 65537 is out of scope') config_override({ 'scopefilter': { 'prefixes': ['192.0.2.0/32'], }, }) validator.load_filters() result = validator.validate_rpsl_object(route_obj) assert result == (ScopeFilterStatus.out_scope_prefix, 'prefix 192.0.2.0/24 is out of scope') config_override({ 'scopefilter': { 'prefix': ['0/0'], }, }) validator.load_filters() # Ignored object class result = validator.validate_rpsl_object( rpsl_object_from_text(SAMPLE_INETNUM)) assert result == (ScopeFilterStatus.in_scope, '')
def test_related_route_less_specific_route(self, prepare_mocks): validator, mock_dq, mock_dh = prepare_mocks route = rpsl_object_from_text(SAMPLE_ROUTE) query_results = itertools.cycle([ [{ 'object_text': MNTNER_OBJ_CRYPT_PW }], # mntner for object [], # attempt to look for exact inetnum [], # attempt to look for one level less specific inetnum [{ # attempt to look for less specific route 'object_class': 'route', 'rpsl_pk': '192.0.2.0/24AS65537', 'parsed_data': { 'mnt-by': ['RELATED-MNT'] } }], [{ 'object_text': MNTNER_OBJ_MD5_PW }], # related mntner retrieval ]) mock_dh.execute_query = lambda q: next(query_results) validator.passwords = [SAMPLE_MNTNER_MD5, SAMPLE_MNTNER_CRYPT] result = validator.process_auth(route, None) assert result.is_valid() assert flatten_mock_calls(mock_dq, flatten_objects=True) == [ ['sources', (['TEST'], ), {}], ['object_classes', (['mntner'], ), {}], ['rpsl_pks', ({'TEST-MNT'}, ), {}], ['sources', (['TEST'], ), {}], ['object_classes', (['inetnum'], ), {}], ['first_only', (), {}], ['ip_exact', ('192.0.2.0/24', ), {}], ['sources', (['TEST'], ), {}], ['object_classes', (['inetnum'], ), {}], ['first_only', (), {}], ['ip_less_specific_one_level', ('192.0.2.0/24', ), {}], ['sources', (['TEST'], ), {}], ['object_classes', (['route'], ), {}], ['first_only', (), {}], ['ip_less_specific_one_level', ('192.0.2.0/24', ), {}], ['sources', (['TEST'], ), {}], ['object_classes', (['mntner'], ), {}], ['rpsl_pks', ({'RELATED-MNT'}, ), {}] ] validator = AuthValidator(mock_dh, None) validator.passwords = [SAMPLE_MNTNER_CRYPT ] # related only has MD5, so this is invalid result = validator.process_auth(route, None) assert not result.is_valid() assert result.error_messages == { 'Authorisation for route 192.0.2.0/24AS65537 failed: must be authenticated by one of: ' 'RELATED-MNT - from parent route 192.0.2.0/24AS65537' }
def test_mntner_create(self, prepare_mocks): validator, mock_dsq, mock_dh = prepare_mocks person = rpsl_object_from_text(SAMPLE_PERSON) mntner = rpsl_object_from_text(SAMPLE_MNTNER) mock_dh.execute_query.return_value = [] assert validator.validate(person, UpdateRequestType.CREATE).is_valid() assert validator.validate(mntner, UpdateRequestType.MODIFY).is_valid() assert validator.validate(mntner, UpdateRequestType.DELETE).is_valid() assert validator.validate(mntner, UpdateRequestType.CREATE).is_valid() validator._check_suspended_mntner_with_same_pk.cache_clear() mock_dh.execute_query.return_value = [{ 'rpsl_pk': 'conflicting entry which is only counted, not used' }] assert validator.validate(person, UpdateRequestType.CREATE).is_valid() assert validator.validate(mntner, UpdateRequestType.MODIFY).is_valid() assert validator.validate(mntner, UpdateRequestType.DELETE).is_valid() invalid = validator.validate(mntner, UpdateRequestType.CREATE) assert not invalid.is_valid() assert invalid.error_messages == { 'A suspended mntner with primary key TEST-MNT already exists for TEST' } assert flatten_mock_calls( mock_dsq, flatten_objects=True) == [['', (), {}], ['object_classes', (['mntner'], ), {}], ['rpsl_pk', ('TEST-MNT', ), {}], ['sources', (['TEST'], ), {}], ['first_only', (), {}], ['', (), {}], ['object_classes', (['mntner'], ), {}], ['rpsl_pk', ('TEST-MNT', ), {}], ['sources', (['TEST'], ), {}], ['first_only', (), {}]]
def parse_object(self, rpsl_text: str) -> None: try: self.obj_parsed += 1 # If an object turns out to be a key-cert, and strict_import_keycert_objects # is set, parse it again with strict validation to load it in the GPG keychain. obj = rpsl_object_from_text(rpsl_text.strip(), strict_validation=False) if self.strict_validation_key_cert and obj.__class__ == RPSLKeyCert: obj = rpsl_object_from_text(rpsl_text.strip(), strict_validation=True) if obj.messages.errors(): logger.critical(f'Parsing errors occurred while importing from file for {self.source}. ' f'This object is ignored, causing potential data inconsistencies. A new operation for ' f'this update, without errors, will still be processed and cause the inconsistency to ' f'be resolved. Parser error messages: {obj.messages.errors()}; ' f'original object text follows:\n{rpsl_text}') self.database_handler.record_mirror_error(self.source, f'Parsing errors: {obj.messages.errors()}, ' f'original object text follows:\n{rpsl_text}') self.obj_errors += 1 return if obj.source() != self.source: msg = f'Invalid source {obj.source()} for object {obj.pk()}, expected {self.source}. ' logger.critical(msg + 'This object is ignored, causing potential data inconsistencies.') self.database_handler.record_mirror_error(self.source, msg) self.obj_errors += 1 return if self.object_class_filter and obj.rpsl_object_class.lower() not in self.object_class_filter: self.obj_ignored_class += 1 return self.database_handler.upsert_rpsl_object(obj, forced_serial=self.serial) except UnknownRPSLObjectClassException as e: self.obj_unknown += 1 self.unknown_object_classes.add(str(e).split(':')[1].strip())
def test_override_invalid_or_missing(self, prepare_mocks, config_override): # This test mostly ignores the regular process that happens # after override validation fails. validator, mock_dq, mock_dh = prepare_mocks mock_dh.execute_query = lambda q: [] person = rpsl_object_from_text(SAMPLE_PERSON) validator.overrides = [VALID_PW] result = validator.process_auth(person, None) assert not result.is_valid() assert not result.used_override config_override({ 'auth': { 'override_password': VALID_PW_HASH }, }) validator.overrides = [] result = validator.process_auth(person, None) assert not result.is_valid() assert not result.used_override validator.overrides = [INVALID_PW] result = validator.process_auth(person, None) assert not result.is_valid() assert not result.used_override config_override({ 'auth': { 'override_password': '******' }, }) person = rpsl_object_from_text(SAMPLE_PERSON) result = validator.process_auth(person, None) assert not result.is_valid() assert not result.used_override
def save(self, database_handler: DatabaseHandler) -> bool: try: obj = rpsl_object_from_text(self.object_text.strip(), strict_validation=False) except UnknownRPSLObjectClassException as exc: logger.warning( f'Ignoring NRTM from {self.source} operation {self.serial}/{self.operation.value}: {exc}' ) return False if self.object_class_filter and obj.rpsl_object_class.lower( ) not in self.object_class_filter: return False if obj.messages.errors(): errors = '; '.join(obj.messages.errors()) logger.critical( f'Parsing errors occurred while processing NRTM from {self.source}, ' f'operation {self.serial}/{self.operation.value}. This operation is ignored, ' f'causing potential data inconsistencies. A new operation for this update, without errors, ' f'will still be processed and cause the inconsistency to be resolved. ' f'Parser error messages: {errors}; original object text follows:\n{self.object_text}' ) database_handler.record_mirror_error( self.source, f'Parsing errors: {obj.messages.errors()}, ' f'original object text follows:\n{self.object_text}') return False if 'source' in obj.parsed_data and obj.parsed_data['source'].upper( ) != self.source: msg = ( f'Incorrect source in NRTM object: stream has source {self.source}, found object with ' f'source {obj.source()} in operation {self.serial}/{self.operation.value}/{obj.pk()}. ' f'This operation is ignored, causing potential data inconsistencies.' ) database_handler.record_mirror_error(self.source, msg) logger.critical(msg) return False if self.operation == DatabaseOperation.add_or_update: database_handler.upsert_rpsl_object(obj, self.serial) elif self.operation == DatabaseOperation.delete: database_handler.delete_rpsl_object(obj, self.serial) logger.info( f'Completed NRTM operation in {self.source}: {self.serial}/{self.operation.value}/{obj.pk()}' ) return True
def load_pgp_keys(source: str) -> None: dh = DatabaseHandler() query = RPSLDatabaseQuery(column_names=['rpsl_pk', 'object_text']) query = query.sources([source]).object_classes(['key-cert']) keycerts = dh.execute_query(query) for keycert in keycerts: rpsl_pk = keycert["rpsl_pk"] print(f'Loading key-cert {rpsl_pk}') # Parsing the keycert in strict mode will load it into the GPG keychain result = rpsl_object_from_text(keycert['object_text'], strict_validation=True) if result.messages.errors(): print(f'Errors in PGP key {rpsl_pk}: {result.messages.errors()}') print('All valid key-certs loaded into the GnuPG keychain.') dh.close()
def test_valid_new_person(self, prepare_mocks): validator, mock_dq, mock_dh = prepare_mocks person = rpsl_object_from_text(SAMPLE_PERSON) mock_dh.execute_query = lambda q: [ { 'object_class': 'mntner', 'object_text': SAMPLE_MNTNER }, ] validator.passwords = [SAMPLE_MNTNER_MD5] result = validator.process_auth(person, None) assert result.is_valid(), result.error_messages assert not result.used_override assert len(result.mntners_notify) == 1 assert result.mntners_notify[0].pk() == 'TEST-MNT' assert flatten_mock_calls(mock_dq, flatten_objects=True) == [ ['sources', (['TEST'], ), {}], ['object_classes', (['mntner'], ), {}], ['rpsl_pks', ({'TEST-MNT'}, ), {}], ]
def test_nrtm_add_valid_without_strict_import_keycert(self, tmp_gpg_dir): mock_dh = Mock() operation = NRTMOperation( source='TEST', operation=DatabaseOperation.add_or_update, serial=42424242, object_text=SAMPLE_KEY_CERT, strict_validation_key_cert=False, object_class_filter=['route', 'route6', 'mntner', 'key-cert'], ) assert operation.save(database_handler=mock_dh) assert mock_dh.upsert_rpsl_object.call_count == 1 assert mock_dh.mock_calls[0][1][0].pk() == 'PGPKEY-80F238C6' assert mock_dh.mock_calls[0][1][1] == JournalEntryOrigin.mirror # key-cert should not be imported in the keychain, therefore # verification should fail key_cert_obj = rpsl_object_from_text(SAMPLE_KEY_CERT, strict_validation=False) assert not key_cert_obj.verify(KEY_CERT_SIGNED_MESSAGE_VALID)