def test_route_search_less_specific_one_level(self, prepare_parser): mock_dq, mock_dh, parser = prepare_parser response = parser.handle_query('!r192.0.2.0/25,l') assert response.response_type == WhoisQueryResponseType.SUCCESS assert response.mode == WhoisQueryResponseMode.IRRD assert response.result == MOCK_ROUTE_COMBINED assert flatten_mock_calls(mock_dq) == [ ['object_classes', (['route', 'route6'],), {}], ['ip_less_specific_one_level', (IP('192.0.2.0/25'),), {}] ]
def test_route_search_more_specific(self, prepare_parser): mock_dq, mock_dh, mock_preloader, parser = prepare_parser response = parser.handle_query('-M 192.0.2.0/25') assert response.response_type == WhoisQueryResponseType.SUCCESS assert response.mode == WhoisQueryResponseMode.RIPE assert response.result == MOCK_ROUTE_COMBINED assert flatten_mock_calls(mock_dq) == [ ['object_classes', (['route', 'route6'],), {}], ['ip_more_specific', (IP('192.0.2.0/25'),), {}] ]
def test_text_search(self, prepare_parser): mock_dq, mock_dh, parser = prepare_parser mock_dh.reset_mock() response = parser.handle_query('query') assert response.response_type == WhoisQueryResponseType.SUCCESS assert response.mode == WhoisQueryResponseMode.RIPE assert response.result == MOCK_ROUTE_COMBINED assert flatten_mock_calls(mock_dq) == [ ['text_search', ('query',), {}], ]
def test_run_import_http_file_success(self, monkeypatch, config_override, tmpdir, caplog): slurm_path = str(tmpdir) + '/slurm.json' config_override({ 'rpki': { 'roa_source': 'https://host/roa.json', 'slurm_source': 'file://' + slurm_path } }) class MockRequestsSuccess: status_code = 200 def __init__(self, url, stream, timeout): assert url == 'https://host/roa.json' assert stream assert timeout def iter_content(self, size): return iter([b'roa_', b'data']) with open(slurm_path, 'wb') as fh: fh.write(b'slurm_data') mock_dh = Mock(spec=DatabaseHandler) monkeypatch.setattr('irrd.mirroring.mirror_runners_import.DatabaseHandler', lambda: mock_dh) monkeypatch.setattr('irrd.mirroring.mirror_runners_import.ROADataImporter', MockROADataImporter) mock_bulk_validator = Mock(spec=BulkRouteROAValidator) monkeypatch.setattr('irrd.mirroring.mirror_runners_import.BulkRouteROAValidator', lambda dh, roas: mock_bulk_validator) monkeypatch.setattr('irrd.mirroring.mirror_runners_import.requests.get', MockRequestsSuccess) monkeypatch.setattr('irrd.mirroring.mirror_runners_import.notify_rpki_invalid_owners', lambda dh, invalids: 1) mock_bulk_validator.validate_all_routes = lambda: ( [{'rpsl_pk': 'pk_now_valid1'}, {'rpsl_pk': 'pk_now_valid2'}], [{'rpsl_pk': 'pk_now_invalid1'}, {'rpsl_pk': 'pk_now_invalid2'}], [{'rpsl_pk': 'pk_now_unknown1'}, {'rpsl_pk': 'pk_now_unknown2'}], ) ROAImportRunner().run() assert flatten_mock_calls(mock_dh) == [ ['disable_journaling', (), {}], ['delete_all_roa_objects', (), {}], ['delete_all_rpsl_objects_with_journal', ('RPKI',), {'journal_guaranteed_empty': True}], ['commit', (), {}], ['enable_journaling', (), {}], ['update_rpki_status', (), { 'rpsl_objs_now_valid': [{'rpsl_pk': 'pk_now_valid1'}, {'rpsl_pk': 'pk_now_valid2'}], 'rpsl_objs_now_invalid': [{'rpsl_pk': 'pk_now_invalid1'}, {'rpsl_pk': 'pk_now_invalid2'}], 'rpsl_objs_now_not_found': [{'rpsl_pk': 'pk_now_unknown1'}, {'rpsl_pk': 'pk_now_unknown2'}], }], ['commit', (), {}], ['close', (), {}] ] assert '2 newly valid, 2 newly invalid, 2 newly not_found routes, 1 emails sent to contacts of newly invalid authoritative objects' in caplog.text
def test_exception_handling(self, monkeypatch, config_override, tmpdir, caplog): mock_dh = Mock(spec=DatabaseHandler) monkeypatch.setattr('irrd.mirroring.mirror_runners_import.DatabaseHandler', lambda: mock_dh) mock_scopefilter = Mock(side_effect=ValueError('expected-test-error')) monkeypatch.setattr('irrd.mirroring.mirror_runners_import.ScopeFilterValidator', mock_scopefilter) ScopeFilterUpdateRunner().run() assert flatten_mock_calls(mock_dh) == [ ['close', (), {}] ] assert 'expected-test-error' in caplog.text
def test_limit_sources_key_lookup(self, prepare_resolver): mock_dq, mock_dh, mock_preloader, mock_query_result, resolver = prepare_resolver resolver.set_query_sources(['TEST1']) result = resolver.key_lookup('route', '192.0.2.0/25') assert list(result) == mock_query_result assert flatten_mock_calls(mock_dq) == [ ['sources', (['TEST1'], ), {}], ['object_classes', (['route'], ), {}], ['rpsl_pk', ('192.0.2.0/25', ), {}], ['first_only', (), {}], ]
def test_missing_source_settings_ftp(self, config_override): config_override({ 'sources': { 'TEST': { 'import_serial_source': 'ftp://host/serial', } } }) mock_dh = Mock() MirrorFullImportRunner('TEST').run(mock_dh) assert not flatten_mock_calls(mock_dh)
def test_load_database_success(capsys, monkeypatch): mock_dh = Mock() monkeypatch.setattr('irrd.scripts.load_database.DatabaseHandler', lambda: mock_dh) mock_roa_validator = Mock() monkeypatch.setattr('irrd.scripts.load_database.BulkRouteROAValidator', lambda dh: mock_roa_validator) mock_parser = Mock() monkeypatch.setattr('irrd.scripts.load_database.MirrorFileImportParser', lambda *args, **kwargs: mock_parser) mock_parser.run_import = lambda: None assert load('TEST', 'test.db', 42) == 0 assert flatten_mock_calls(mock_dh) == [[ 'delete_all_rpsl_objects_with_journal', ('TEST', ), {} ], ['disable_journaling', (), {}], ['commit', (), {}], ['close', (), {}]] # run_import() call is not included here assert flatten_mock_calls(mock_parser) == [] assert not capsys.readouterr().out
def test_restrict_object_class(self, prepare_resolver): mock_dq, mock_dh, mock_preloader, mock_query_result, resolver = prepare_resolver mock_dh.reset_mock() resolver.set_object_class_filter_next_query(['route']) result = resolver.rpsl_attribute_search('mnt-by', 'MNT-TEST') assert list(result) == mock_query_result assert flatten_mock_calls(mock_dq) == [ ['sources', (['TEST1', 'TEST2'], ), {}], ['object_classes', (['route'], ), {}], ['lookup_attr', ('mnt-by', 'MNT-TEST'), {}], ] mock_dq.reset_mock() # filter should not persist result = resolver.rpsl_attribute_search('mnt-by', 'MNT-TEST') assert list(result) == mock_query_result assert flatten_mock_calls(mock_dq) == [ ['sources', (['TEST1', 'TEST2'], ), {}], ['lookup_attr', ('mnt-by', 'MNT-TEST'), {}], ]
def test_missing_argument(self, prepare_parser): mock_dq, mock_dh, mock_preloader, parser = prepare_parser mock_dh.reset_mock() missing_arg_queries = ['-i ', '-i mnt-by ', '-s', '-T', '-t', '-V', '-x '] for query in missing_arg_queries: response = parser.handle_query(query) assert response.response_type == WhoisQueryResponseType.ERROR assert response.mode == WhoisQueryResponseMode.RIPE assert response.result == 'Missing argument for flag/search: ' + query[1] assert flatten_mock_calls(mock_dh) == [['close', (), {}]] mock_dh.reset_mock()
def test_submit_email_success(capsys, monkeypatch): mock_handle_email = Mock() monkeypatch.setattr('irrd.scripts.submit_email.handle_email_submission', lambda data: mock_handle_email) mock_handle_email.user_report = lambda: 'output' mock_send_reload_signal = Mock() monkeypatch.setattr('irrd.scripts.submit_email.send_reload_signal', mock_send_reload_signal) run('test input', 'pidfile') assert flatten_mock_calls(mock_send_reload_signal) == [ ['', ('pidfile',), {}] ]
def test_update_database_success(capsys, monkeypatch): mock_dh = Mock() monkeypatch.setattr('irrd.scripts.update_database.DatabaseHandler', lambda enable_preload_update=False: mock_dh) mock_roa_validator = Mock() monkeypatch.setattr('irrd.scripts.update_database.BulkRouteROAValidator', lambda dh: mock_roa_validator) mock_parser = Mock() monkeypatch.setattr( 'irrd.scripts.update_database.MirrorUpdateFileImportParser', lambda *args, **kwargs: mock_parser) mock_parser.run_import = lambda: None assert update('TEST', 'test.db') == 0 assert flatten_mock_calls(mock_dh) == [['commit', (), {}], ['close', (), {}]] # run_import() call is not included here assert flatten_mock_calls(mock_parser) == [] assert not capsys.readouterr().out
def test_run(self, monkeypatch, config_override, tmpdir, caplog): mock_dh = Mock(spec=DatabaseHandler) monkeypatch.setattr( 'irrd.mirroring.mirror_runners_import.DatabaseHandler', lambda: mock_dh) mock_scopefilter = Mock(spec=ScopeFilterValidator) monkeypatch.setattr( 'irrd.mirroring.mirror_runners_import.ScopeFilterValidator', lambda: mock_scopefilter) mock_scopefilter.validate_all_rpsl_objects = lambda database_handler: ( [{ 'rpsl_pk': 'pk_now_in_scope1' }, { 'rpsl_pk': 'pk_now_in_scope2' }], [{ 'rpsl_pk': 'pk_now_out_scope_as1' }, { 'rpsl_pk': 'pk_now_out_scope_as2' }], [{ 'rpsl_pk': 'pk_now_out_scope_prefix1' }, { 'rpsl_pk': 'pk_now_out_scope_prefix2' }], ) ScopeFilterUpdateRunner().run() assert flatten_mock_calls(mock_dh) == [[ 'update_scopefilter_status', (), { 'rpsl_objs_now_in_scope': [{ 'rpsl_pk': 'pk_now_in_scope1' }, { 'rpsl_pk': 'pk_now_in_scope2' }], 'rpsl_objs_now_out_scope_as': [{ 'rpsl_pk': 'pk_now_out_scope_as1' }, { 'rpsl_pk': 'pk_now_out_scope_as2' }], 'rpsl_objs_now_out_scope_prefix': [{ 'rpsl_pk': 'pk_now_out_scope_prefix1' }, { 'rpsl_pk': 'pk_now_out_scope_prefix2' }], } ], ['commit', (), {}], ['close', (), {}]] assert '2 newly in scope, 2 newly out of scope AS, 2 newly out of scope prefix' in caplog.text
def test_load_database_import_error(capsys, monkeypatch, caplog): mock_dh = Mock() monkeypatch.setattr('irrd.scripts.load_database.DatabaseHandler', lambda: mock_dh) mock_parser = Mock() monkeypatch.setattr('irrd.scripts.load_database.MirrorFileImportParser', lambda *args, **kwargs: mock_parser) mock_parser.run_import = lambda: 'object-parsing-error' assert load('TEST', 'test.db', 42) == 1 assert flatten_mock_calls(mock_dh) == [ ['delete_all_rpsl_objects_with_journal', ('TEST',), {}], ['disable_journaling', (), {}], ['rollback', (), {}], ['close', (), {}] ] # run_import() call is not included here assert flatten_mock_calls(mock_parser) == [] assert 'object-parsing-error' not in caplog.text stdout = capsys.readouterr().out assert 'Error occurred while processing object:\nobject-parsing-error' in stdout
def test_exception_handling(self, prepare_parser, caplog): mock_dq, mock_dh, mock_preloader, parser = prepare_parser mock_dh.reset_mock() mock_dh.execute_query = Mock(side_effect=Exception('test-error')) response = parser.handle_query('foo') assert response.response_type == WhoisQueryResponseType.ERROR assert response.mode == WhoisQueryResponseMode.RIPE assert response.result == 'An internal error occurred while processing this query.' assert flatten_mock_calls(mock_dh)[1] == ['close', (), {}] assert 'An exception occurred while processing whois query' in caplog.text assert 'test-error' in caplog.text
def test_database_serial_range(self, monkeypatch, prepare_parser): mock_dq, mock_dh, mock_preloader, parser = prepare_parser mock_dsq = Mock() monkeypatch.setattr( 'irrd.server.whois.query_parser.DatabaseStatusQuery', lambda: mock_dsq) mock_query_result = [ { 'source': 'TEST1', 'serial_oldest_seen': 10, 'serial_newest_seen': 20, 'serial_last_export': 10 }, { 'source': 'TEST2', 'serial_oldest_seen': None, 'serial_newest_seen': None, 'serial_last_export': None }, ] mock_dh.execute_query = lambda query: mock_query_result response = parser.handle_query('!j-*') assert response.response_type == WhoisQueryResponseType.SUCCESS assert response.mode == WhoisQueryResponseMode.IRRD assert response.result == 'TEST1:N:10-20:10\nTEST2:N:-' assert flatten_mock_calls(mock_dsq) == [[ 'sources', (['TEST1', 'TEST2'], ), {} ]] mock_dsq.reset_mock() response = parser.handle_query('!jtest1,test-invalid') assert response.response_type == WhoisQueryResponseType.SUCCESS assert response.mode == WhoisQueryResponseMode.IRRD assert response.result == 'TEST1:N:10-20:10\nTEST2:N:-\nTEST-INVALID:X:Database unknown' assert flatten_mock_calls(mock_dsq) == [[ 'sources', (['TEST1', 'TEST-INVALID'], ), {} ]]
def test_rpsl_attribute_search(self, prepare_resolver): mock_dq, mock_dh, mock_preloader, mock_query_result, resolver = prepare_resolver result = resolver.rpsl_attribute_search('mnt-by', 'MNT-TEST') assert list(result) == mock_query_result assert flatten_mock_calls(mock_dq) == [ ['sources', (['TEST1', 'TEST2'], ), {}], ['lookup_attr', ('mnt-by', 'MNT-TEST'), {}], ] mock_dh.execute_query = lambda query, refresh_on_error=False: [] with pytest.raises(InvalidQueryException): resolver.rpsl_attribute_search('invalid-attr', 'MNT-TEST')
def test_route_search_exact_rpki_aware(self, prepare_resolver, config_override): mock_dq, mock_dh, mock_preloader, mock_query_result, resolver = prepare_resolver config_override({ 'sources': { 'TEST1': {}, 'TEST2': {} }, 'sources_default': [], 'rpki': { 'roa_source': 'https://example.com/roa.json' }, }) resolver = QueryResolver(mock_preloader, mock_dh) resolver.out_scope_filter_enabled = False result = resolver.route_search(IP('192.0.2.0/25'), RouteLookupType.EXACT) assert list(result) == mock_query_result assert flatten_mock_calls(mock_dq) == [ ['sources', (['TEST1', 'TEST2', 'RPKI'], ), {}], ['rpki_status', ([RPKIStatus.not_found, RPKIStatus.valid], ), {}], ['object_classes', (['route', 'route6'], ), {}], ['ip_exact', (IP('192.0.2.0/25'), ), {}] ] mock_dq.reset_mock() resolver.disable_rpki_filter() result = resolver.route_search(IP('192.0.2.0/25'), RouteLookupType.EXACT) assert list(result) == mock_query_result assert flatten_mock_calls(mock_dq) == [[ 'sources', (['TEST1', 'TEST2', 'RPKI'], ), {} ], ['object_classes', (['route', 'route6'], ), {}], ['ip_exact', (IP('192.0.2.0/25'), ), {}]] mock_dq.reset_mock() resolver.set_query_sources(['RPKI']) assert resolver.sources == ['RPKI']
def test_restrict_object_class(self, prepare_parser): mock_dq, mock_dh, parser = prepare_parser mock_dh.reset_mock() response = parser.handle_query('-T route -i mnt-by MNT-TEST') assert response.response_type == WhoisQueryResponseType.SUCCESS assert response.mode == WhoisQueryResponseMode.RIPE assert response.result == MOCK_ROUTE_COMBINED assert flatten_mock_calls(mock_dq) == [ ['object_classes', (['route'], ), {}], ['lookup_attr', ('mnt-by', 'MNT-TEST'), {}], ] mock_dq.reset_mock() # -T should not persist response = parser.handle_query('-i mnt-by MNT-TEST') assert response.response_type == WhoisQueryResponseType.SUCCESS assert response.mode == WhoisQueryResponseMode.RIPE assert response.result == MOCK_ROUTE_COMBINED assert flatten_mock_calls(mock_dq) == [ ['lookup_attr', ('mnt-by', 'MNT-TEST'), {}], ]
def test_handle_irrd_routes_for_as_set(self, prepare_parser, monkeypatch): mock_dq, mock_dh, parser = prepare_parser monkeypatch.setattr( 'irrd.server.whois.query_parser.WhoisQueryParser._recursive_set_resolve', lambda self, set_name: {'AS65547', 'AS65548'} ) response = parser.handle_query('!aAS-FOO') assert parser._current_set_root_object_class == 'as-set' assert response.response_type == WhoisQueryResponseType.SUCCESS assert response.mode == WhoisQueryResponseMode.IRRD assert response.result == '192.0.2.0/25 192.0.2.128/25' assert flatten_mock_calls(mock_dq) == [ ['object_classes', (['route', 'route6'],), {}], ['asns_first', ({65547, 65548},), {}], ] mock_dq.reset_mock() response = parser.handle_query('!a4AS-FOO') assert response.response_type == WhoisQueryResponseType.SUCCESS assert response.mode == WhoisQueryResponseMode.IRRD assert response.result == '192.0.2.0/25 192.0.2.128/25' assert flatten_mock_calls(mock_dq) == [ ['object_classes', (['route'],), {}], ['asns_first', ({65547, 65548},), {}], ] mock_dq.reset_mock() response = parser.handle_query('!a6AS-FOO') assert response.response_type == WhoisQueryResponseType.KEY_NOT_FOUND assert response.mode == WhoisQueryResponseMode.IRRD assert response.result == '' assert flatten_mock_calls(mock_dq) == [ ['object_classes', (['route6'],), {}], ['asns_first', ({65547, 65548},), {}], ] mock_dq.reset_mock()
def test_run_import_ftp(self, monkeypatch, config_override): config_override({ 'rpki': { 'roa_source': 'https://example.com/roa.json' }, 'sources': { 'TEST': { 'import_source': ['ftp://host/source1.gz', 'ftp://host/source2'], 'import_serial_source': 'ftp://host/serial', } } }) mock_dh = Mock() mock_ftp = Mock() MockMirrorFileImportParser.rpsl_data_calls = [] monkeypatch.setattr( 'irrd.mirroring.mirror_runners_import.MirrorFileImportParser', MockMirrorFileImportParser) monkeypatch.setattr('irrd.mirroring.mirror_runners_import.FTP', lambda url, timeout: mock_ftp) MockMirrorFileImportParser.expected_serial = 424242 mock_bulk_validator_init = Mock() monkeypatch.setattr( 'irrd.mirroring.mirror_runners_import.BulkRouteROAValidator', mock_bulk_validator_init) responses = { # gzipped data, contains 'source1' 'RETR /source1.gz': b64decode('H4sIAE4CfFsAAyvOLy1KTjUEAE5Fj0oHAAAA'), 'RETR /source2': b'source2', 'RETR /serial': b'424242', } mock_ftp.retrbinary = lambda path, callback: callback(responses[path]) RPSLMirrorFullImportRunner('TEST').run(mock_dh, serial_newest_mirror=424241) assert MockMirrorFileImportParser.rpsl_data_calls == [ 'source1', 'source2' ] assert flatten_mock_calls(mock_dh) == [ ['delete_all_rpsl_objects_with_journal', ('TEST', ), {}], ['disable_journaling', (), {}], ['record_serial_newest_mirror', ('TEST', 424242), {}], ] assert mock_bulk_validator_init.mock_calls[0][1][0] == mock_dh
def test_submit_email_fail(capsys, monkeypatch, caplog): mock_handle_email = Mock(side_effect=Exception('expected-test-error')) monkeypatch.setattr('irrd.scripts.submit_email.handle_email_submission', mock_handle_email) mock_send_reload_signal = Mock() monkeypatch.setattr('irrd.scripts.submit_email.send_reload_signal', mock_send_reload_signal) run('test input', 'pidfile') assert 'expected-test-error' in caplog.text stdout = capsys.readouterr().out assert 'An internal error occurred' in stdout assert 'expected-test-error' not in stdout assert flatten_mock_calls(mock_send_reload_signal) == []
def test_route_search_invalid(self, prepare_parser): mock_dq, mock_dh, parser = prepare_parser response = parser.handle_query('!rz') assert response.response_type == WhoisQueryResponseType.ERROR assert response.mode == WhoisQueryResponseMode.IRRD assert response.result == "Invalid input for route search: z" assert flatten_mock_calls(mock_dh) == [['close', (), {}]] mock_dh.reset_mock() response = parser.handle_query('!rz,o') assert response.response_type == WhoisQueryResponseType.ERROR assert response.mode == WhoisQueryResponseMode.IRRD assert response.result == "Invalid input for route search: z,o" assert flatten_mock_calls(mock_dh) == [['close', (), {}]] mock_dh.reset_mock() response = parser.handle_query('!r192.0.2.0/25,z') assert response.response_type == WhoisQueryResponseType.ERROR assert response.mode == WhoisQueryResponseMode.IRRD assert response.result == "Invalid route search option: z" assert flatten_mock_calls(mock_dh) == [['close', (), {}]] mock_dh.reset_mock()
def test_database_status_get_permitted_client_in_access_list( self, prepare_resource_mocks): mock_database_status_request, mock_http_request = prepare_resource_mocks resource = DatabaseStatusResource() assert resource.isLeaf mock_database_status_request.generate_status = lambda: 'test 🦄' response = resource.render_GET(mock_http_request) assert response == b'test \xf0\x9f\xa6\x84' assert flatten_mock_calls(mock_http_request) == [[ 'setHeader', (b'Content-Type', b'text/plain; charset=utf-8'), {} ]]
def test_database_status_get_denied_unknown_client_address( self, prepare_resource_mocks): mock_database_status_request, mock_http_request = prepare_resource_mocks mock_http_request.getClientAddress = lambda: UNIXAddress( 'not-supported') resource = DatabaseStatusResource() mock_database_status_request.generate_status = lambda: 'test 🦄' response = resource.render_GET(mock_http_request) assert response == b'Access denied' assert flatten_mock_calls(mock_http_request) == [[ 'setResponseCode', (403, ), {} ]]
def test_route_set_members(self, prepare_parser): mock_dq, mock_dh, mock_preloader, parser = prepare_parser mock_query_result1 = [ { 'pk': uuid.uuid4(), 'rpsl_pk': 'RS-FIRSTLEVEL', 'parsed_data': {'as-set': 'RS-FIRSTLEVEL', 'members': ['RS-SECONDLEVEL', 'RS-2nd-UNKNOWN']}, 'object_text': 'text', 'object_class': 'route-set', 'source': 'TEST1', }, ] mock_query_result2 = [ { 'pk': uuid.uuid4(), 'rpsl_pk': 'RS-SECONDLEVEL', 'parsed_data': {'as-set': 'RS-SECONDLEVEL', 'members': ['AS-REFERRED', '192.0.2.0/25']}, 'object_text': 'text', 'object_class': 'route-set', 'source': 'TEST1', }, ] mock_query_result3 = [ { 'pk': uuid.uuid4(), 'rpsl_pk': 'AS-REFERRED', 'parsed_data': {'as-set': 'AS-REFERRED', 'members': ['AS65545']}, 'object_text': 'text', 'object_class': 'as-set', 'source': 'TEST2', }, ] mock_query_iterator = iter([mock_query_result1, mock_query_result2, mock_query_result3, []]) mock_dh.execute_query = lambda query: iter(next(mock_query_iterator)) mock_preloader.routes_for_origins = Mock(return_value=['192.0.2.128/25']) response = parser.handle_query('!iRS-FIRSTLEVEL,1') assert response.response_type == WhoisQueryResponseType.SUCCESS assert response.mode == WhoisQueryResponseMode.IRRD assert response.result == '192.0.2.0/25 192.0.2.128/25' assert flatten_mock_calls(mock_dq) == [ ['object_classes', (['as-set', 'route-set'],), {}], ['rpsl_pks', ({'RS-FIRSTLEVEL'},), {}], ['object_classes', (['as-set', 'route-set'],), {}], ['rpsl_pks', ({'RS-SECONDLEVEL', 'RS-2nd-UNKNOWN'},), {}], ['object_classes', (['as-set', 'route-set'],), {}], ['rpsl_pks', ({'AS-REFERRED'},), {}], ]
def test_parse_invalid_object_syntax(self, prepare_mocks): mock_dq, mock_dh, mock_email = prepare_mocks mock_dh.execute_query = lambda query: [] rpsl_text = textwrap.dedent(""" person: Placeholder Person Object nic-hdl: PERSON-TEST changed: [email protected] 20190701 # comment source: TEST """) handler = ChangeSubmissionHandler(rpsl_text) assert handler.status() == 'FAILED' assert flatten_mock_calls(mock_dq) == [] assert mock_dh.mock_calls[0][0] == 'commit' assert mock_dh.mock_calls[1][0] == 'close' assert handler.submitter_report() == textwrap.dedent(""" SUMMARY OF UPDATE: Number of objects found: 1 Number of objects processed successfully: 0 Create: 0 Modify: 0 Delete: 0 Number of objects processed with errors: 1 Create: 0 Modify: 0 Delete: 0 DETAILED EXPLANATION: ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --- Request FAILED: [person] PERSON-TEST person: Placeholder Person Object nic-hdl: PERSON-TEST changed: [email protected] 20190701 # comment source: TEST ERROR: Mandatory attribute "address" on object person is missing ERROR: Mandatory attribute "phone" on object person is missing ERROR: Mandatory attribute "e-mail" on object person is missing ERROR: Mandatory attribute "mnt-by" on object person is missing ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ """)
def test_parse_invalid_object_syntax(self, prepare_mocks): mock_dq, mock_dh = prepare_mocks mock_dh.execute_query = lambda query: [] rpsl_text = textwrap.dedent(""" person: Placeholder Person Object nic-hdl: PERSON-TEST changed: 2009-07-24T17:00:00Z source: TEST """) handler = UpdateRequestHandler(rpsl_text) assert handler.status() == 'FAILED' assert flatten_mock_calls(mock_dq) == [] assert mock_dh.mock_calls[0][0] == 'commit' assert mock_dh.mock_calls[1][0] == 'close' assert handler.user_report() == textwrap.dedent(""" SUMMARY OF UPDATE: Number of objects found: 1 Number of objects processed successfully: 0 Create: 0 Modify: 0 Delete: 0 Number of objects processed with errors: 1 Create: 0 Modify: 0 Delete: 0 DETAILED EXPLANATION: ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --- Request FAILED: [person] PERSON-TEST person: Placeholder Person Object nic-hdl: PERSON-TEST changed: 2009-07-24T17:00:00Z source: TEST ERROR: Mandatory attribute 'address' on object person is missing ERROR: Mandatory attribute 'phone' on object person is missing ERROR: Mandatory attribute 'e-mail' on object person is missing ERROR: Mandatory attribute 'mnt-by' on object person is missing ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ """)
def test_full_import_call(self, monkeypatch): mock_dh = Mock() mock_dq = Mock() mock_full_import_runner = Mock() monkeypatch.setattr('irrd.mirroring.nrtm_runner.DatabaseHandler', lambda: mock_dh) monkeypatch.setattr( 'irrd.mirroring.nrtm_runner.RPSLDatabaseStatusQuery', lambda: mock_dq) monkeypatch.setattr( 'irrd.mirroring.nrtm_runner.MirrorFullImportRunner', lambda source: mock_full_import_runner) mock_dh.execute_query = lambda q: iter([]) runner = MirrorUpdateRunner(source='TEST') runner.run() assert flatten_mock_calls(mock_dq) == [['source', ('TEST', ), {}]] assert flatten_mock_calls(mock_dh) == [['commit', (), {}], ['close', (), {}]] assert len(mock_full_import_runner.mock_calls) == 1 assert mock_full_import_runner.mock_calls[0][0] == 'run'
def test_test_parse_nrtm_v1_valid(self, config_override): config_override({ 'sources': { 'TEST': { 'object_class_filter': 'person', 'strict_import_keycert_objects': True, } } }) mock_dh = Mock() parser = NRTMStreamParser('TEST', SAMPLE_NRTM_V1, mock_dh) self._assert_valid(parser) assert flatten_mock_calls(mock_dh) == [[ 'force_record_serial_seen', ('TEST', 11012701), {} ]]