class TestDoubleFilter(BaseFilter): FILTER = DoubleFilter unary_comparison_scenario = fixture( None, not None, 1, 10.0, autoparam=True, ) binary_comparison_scenario = fixture( (1.0, 1), (2.0, 2), (0.0, 0), (-3.0, 10), (1.0, 2), (3.0, 5), (1000000000, 100000000000), autoparam=True, ) ternary_comparison_scenario = fixture( (1, 1.0, 1), (3, 1.0, 5), (1, 2.0, 3), (1, 2.0, 2), (2, 2.0, 3), (-1, 0, 1), autoparam=True, )
class TestNumericFilter(BaseFilter): FILTER = NumericFilter unary_comparison_scenario = fixture( None, not None, 10, autoparam=True, ) binary_comparison_scenario = fixture( (1, 1), (2, 2), (0, 0), (-3, 10), (1, 2), (3, 5), autoparam=True, ) ternary_comparison_scenario = fixture( (1, 1, 1), (3, 1, 5), (1, 2, 3), (1, 2, 2), (2, 2, 3), (-1, 0, 1), autoparam=True, )
class TestBooleanFilter(BaseFilter): FILTER = BooleanFilter unary_comparison_scenario = fixture(True, False, autoparam=True) binary_comparison_scenario = fixture((True, False), (False, True), (True, True), (False, False), autoparam=True)
def test_command_name(kwargs, name, collections, db, caplog): schema = f'view_data_{name}' identifier = sql.Identifier(schema) with fixture(db, **kwargs) as result: assert db.schema_exists(schema) assert db.all( sql.SQL('SELECT * FROM {schema}.selected_collections').format( schema=identifier)) == [(collection, ) for collection in collections] assert db.all( sql.SQL('SELECT id, note FROM {schema}.note').format( schema=identifier)) == [ (1, 'Default'), ] assert result.exit_code == 0 assert result.output == '' assert_log_records(caplog, command, [ f'Arguments: collections={collections!r} note=Default name={kwargs.get("name")} tables_only=False', f'Added {name}', 'Running summary-tables routine', 'Running field-counts routine', 'Running field-lists routine', ])
def test_sample_resource_not_found(self, mock_read_definition, mock_flatten_items, *args): mock_read_definition.return_value = fixture('valid-definition.yaml', get_yaml=True) mock_flatten_items.return_value = [] with self.assertRaisesRegex(VirgaException, 'Resource not found'): self.provider.sample('subnets', 'id-123456')
def test_invalid_testfile_raise_virga_exc(self): with patch('builtins.open', mock_open(read_data=fixture('invalid.yaml'))) as _: with self.assertRaisesRegex(VirgaException, 'Invalid test file'): read_testfile([ 'test-file-not-here.yaml', ])
def test_sample_definition_is_virga_client(self, mock_read_definition, *args): mock_read_definition.return_value = fixture('valid-definition.yaml', get_yaml=True) with self.assertRaisesRegex( VirgaException, 'Resource sample for certificates not supported'): self.provider.sample('certificates', 'id-123456')
class TestStringFilter(BaseFilter): FILTER = StringFilter unary_comparison_scenario = fixture(None, not None, 'hi', '', autoparam=True) binary_comparison_scenario = fixture(('a', 'b'), ('', ''), ('b', 'a'), ('a', 'a'), autoparam=True) ternary_comparison_scenario = fixture(('a', 'h', 'z'), ('a', 'hiho', 'z'), ('a', 'b', 'c'), ('a', 'c', 'b'), ('b', 'a', 'c'), autoparam=True) collection_scenario = fixture(('a', 'abcd'), ('abcd', 'a'), autoparam=True) strings_scenario = fixture(('hello', 'hell'), ('hello', 'stuff'), autoparam=True)
def test_find_certificate_expected_bahaviour(self, mock_call): mock_call.side_effect = [ responses.acm_certificate_list, fixture('certificate.json', get_json=True) ] expected = responses.acm_result_find_certificate self.assertDictEqual( expected, VirgaClient.find_certificate({'domain_name': 'my.any-domain.com'}))
def test_command(db, caplog): with fixture(db): runner = CliRunner() result = runner.invoke(cli, [command, 'collection_1']) assert result.exit_code == 0 assert result.output == '' assert_log_records(caplog, command, [])
def test_command(db, caplog): with fixture(db): runner = CliRunner() result = runner.invoke(cli, [command]) text = dedent(f"""\ | Name | Collections | Note | |--------------|---------------|-------------------------------| | collection_1 | 1 | Default (202""") assert result.exit_code == 0 assert result.output.startswith(text) assert_log_records(caplog, command, [])
class TestDateTimeFilter(BaseFilter): FILTER = DateTimeFilter t0 = datetime(2017, 11, 1) t1 = datetime(2017, 11, 2) t2 = datetime(2017, 11, 3) t3 = datetime(2017, 11, 4) tnow = datetime.max tmax = datetime.max tmin = datetime.min unary_comparison_scenario = fixture(None, not None, t1, t2, t3, autoparam=True) binary_comparison_scenario = fixture((t0, t1), (t1, t0), (t0, t0), (tmax, tmin), autoparam=True) ternary_comparison_scenario = fixture((t0, t2, tnow), (t0, t2, tnow), (t0, t1, t3), (t0, t3, t1), (t1, t0, t3), (t0, tmin, tmax), autoparam=True)
class TestTimeFilter(BaseFilter): FILTER = TimeFilter t0 = time(0) t1 = time(1) t2 = time(2) t3 = time(3) tmax = time.max tmin = time.min tnow = time() unary_comparison_scenario = fixture(None, not None, t1, t2, t3, autoparam=True) binary_comparison_scenario = fixture((t0, t1), (t1, t0), (t0, t0), (tmax, tmin), autoparam=True) ternary_comparison_scenario = fixture((t0, t2, tnow), (t0, t2, tnow), (t0, t1, t3), (t0, t3, t1), (t1, t0, t3), (t0, tmin, tmax), autoparam=True)
def test_command(db, caplog): with fixture(db): runner = CliRunner() schema = f'view_data_collection_1' assert db.schema_exists(schema) result = runner.invoke(cli, [command, 'collection_1']) assert not db.schema_exists(schema) assert result.exit_code == 0 assert result.output == '' assert_log_records(caplog, command, [ 'Arguments: name=view_data_collection_1', f'DROP SCHEMA "{schema}" CASCADE', ])
def test_command_multiple(db, caplog): with fixture(db, collections='1,2'): runner = CliRunner() statement = sql.SQL( "INSERT INTO {table} (note, created_at) VALUES (%(note)s, %(created_at)s)" ).format(table=sql.Identifier(f'view_data_collection_1_2', 'note')) db.execute(statement, { 'note': 'Another', 'created_at': datetime(2000, 1, 1) }) db.commit() result = runner.invoke(cli, [command]) text = dedent(f"""\ | Name | Collections | Note | |----------------|---------------|-------------------------------| | collection_1_2 | 1, 2 | Another (2000-01-01 00:00:00) | | | | Default (202""") assert result.exit_code == 0 assert result.output.startswith(text) assert_log_records(caplog, command, [])
class TestAbstractProvider(TestCase): def setUp(self): self.arg_parse = MockArgParse(debug=False, silent=True, logfile=None, output='/tmp', definitions='any') self.provider = MockProvider(self.arg_parse) def test_abstract_method(self): class Provider(AbstractProvider): pass provider = Provider( MockArgParse(debug=False, silent=False, logfile=None)) with self.assertRaises(NotImplementedError): provider.action() with self.assertRaises(NotImplementedError): provider.lookup('', '', '') @patch('logging.addLevelName') def test_set_logger_add_success(self, mock_add_level): arg_parse = MockArgParse(debug=False, silent=False, logfile=None) provider = MockProvider(arg_parse) provider.set_logger(arg_parse) mock_add_level.assert_called_once_with(SUCCESS, 'SUCCESS') @patch('logging.Formatter') def test_set_logger_set_formatter(self, mock_formatter): arg_parse = MockArgParse(debug=False, silent=False, logfile=None) provider = MockProvider(arg_parse) provider.set_logger(arg_parse) mock_formatter.assert_called_once_with( '%(asctime)s %(levelname)s - %(message)s') def test_set_logger_logging_level_info(self): arg_parse = MockArgParse(debug=False, silent=False, logfile=None) provider = MockProvider(arg_parse) provider.set_logger(arg_parse) self.assertEqual(logging.INFO, provider.logger.level) def test_set_logger_logging_level_debug(self): arg_parse = MockArgParse(debug=True, silent=False, logfile=None) provider = MockProvider(arg_parse) provider.set_logger(arg_parse) self.assertEqual(logging.DEBUG, provider.logger.level) def test_set_logger_logging_level_silent(self): self.provider.set_logger(self.arg_parse) self.assertEqual(logging.CRITICAL, self.provider.logger.level) @patch('logging.FileHandler') @skipIf(sys.platform.startswith('win'), 'Seriously?') def test_set_logger_log_on_file(self, mock_file_handler): arg_parse = MockArgParse(debug=False, silent=True, logfile='/dev/null') provider = MockProvider(arg_parse) provider.set_logger(arg_parse) mock_file_handler.assert_called_once_with('/dev/null') @patch('logging.StreamHandler') def test_set_logger_log_on_stream(self, mock_stream_handler): self.provider.set_logger(self.arg_parse) mock_stream_handler.assert_called_once_with() @patch('logging.getLogger') def test_set_logger_instantiate_the_logger(self, mock_get_logger): self.provider.set_logger(self.arg_parse) mock_get_logger.assert_called_once_with('virga.providers.abstract') self.assertIsNotNone(self.provider.logger) @patch('logging.Logger.setLevel') @patch('logging.Logger.addHandler') def test_set_logger_set_the_logger_properties(self, mock_add_handler, mock_set_level): self.provider.set_logger(self.arg_parse) mock_add_handler.assert_called_once() mock_set_level.assert_called_once_with(logging.CRITICAL) @patch('virga.providers.abstract.AbstractProvider.set_logger') def test_logs_instantiate_the_logger_if_it_is_none(self, mock_set_logger): self.provider.logs([{}]) mock_set_logger.assert_called_once_with(self.arg_parse) @patch('logging.Logger.log') def test_logs_empty_outcomes(self, mock_log): self.provider.logs([]) mock_log.assert_not_called() @patch('logging.Logger.log') def test_logs_outcomes_success(self, mock_log): self.provider.logs([{'level': SUCCESS, 'message': 'Success'}]) mock_log.assert_called_once_with(SUCCESS, 'Success') @patch('logging.Logger.log') def test_logs_outcomes_failure(self, mock_log): self.provider.logs([{'level': logging.ERROR, 'message': 'Failure'}]) mock_log.assert_called_once_with(logging.ERROR, 'Failure') @patch('logging.Logger.log') def test_logs_default_outcomes_failure(self, mock_log): self.provider.logs([{}]) mock_log.assert_called_once_with(logging.CRITICAL, 'No message') def test_flatten_empty_list(self): self.assertListEqual([], self.provider.flatten([])) def test_flatten_one_string(self): self.assertListEqual(['t1'], self.provider.flatten('t1')) def test_flatten_one_none(self): self.assertListEqual([None], self.provider.flatten(None)) def test_flatten_list_one_level(self): self.assertListEqual(['t1', 't2', 't3'], self.provider.flatten(['t1', 't2', 't3'])) def test_flatten_list_one_level_multiple_types(self): self.assertListEqual(['t1', None, False], self.provider.flatten(['t1', None, False])) def test_flatten_list_two_levels(self): data = [ ['t1', 't2', 't3'], ['t4', 't5', 't6'], ] expected = ['t1', 't2', 't3', 't4', 't5', 't6'] self.assertListEqual(expected, self.provider.flatten(data)) def test_flatten_list_three_levels(self): data = [ ['t1', 't2', ['t3', 't4']], ['t5', ['t6', 't7', 't8'], ['t9', None]], ] expected = ['t1', 't2', 't3', 't4', 't5', 't6', 't7', 't8', 't9', None] self.assertListEqual(expected, self.provider.flatten(data)) def test_outcome_result_none(self): self.assertFalse(self.provider.outcome(None)) @patch('virga.providers.abstract.AbstractProvider.flatten') def test_outcome_flatten_result(self, mock_flatten): self.provider.outcome('any') mock_flatten.called_once_with('any') def test_outcome_result_empty(self): self.assertFalse(self.provider.outcome([])) def test_outcome_all_result_are_none(self): self.assertFalse(self.provider.outcome([None, None])) def test_outcome_all_result_are_false(self): self.assertFalse(self.provider.outcome([False, False])) def test_outcome_all_result_are_empty(self): self.assertFalse(self.provider.outcome(['', ''])) def test_outcome_any_result_exists(self): self.assertTrue(self.provider.outcome(['test', ''])) def test_lookup_not_there(self): data = 'Any=`any`' self.assertEqual(data, self.provider._lookup(data)) # mocking the mock!!! @patch('tests.test_providers.MockProvider.lookup') def test_lookup_with_one_lookup(self, mock_lookup): mock_lookup.return_value = 'lookup-return' data = 'Any=`_lookup(section, identifier, id)`' expected = 'Any=`lookup-return`' self.assertEqual(expected, self.provider._lookup(data)) @patch('tests.test_providers.MockProvider.lookup') def test_lookup_with_multiple_lookups(self, mock_lookup): mock_lookup.return_value = 'lookup-return' data = 'Any=`_lookup(section_1, identifier1, id-1)` && Any=`_lookup(section_2, identifier2, id-2)`' expected = 'Any=`lookup-return` && Any=`lookup-return`' self.assertEqual(expected, self.provider._lookup(data)) @patch('builtins.open', new_callable=mock_open) def test_output_save_resource_on_file(self, mock_opened): data = {'k1': 'v1', 'k2': 'v2'} # this one is copied from the debug # I expected the calls where split only by the CR expected = [ call('{'), call('\n '), call('"k1"'), call(': '), call('"v1"'), call(',\n '), call('"k2"'), call(': '), call('"v2"'), call('\n'), call('}') ] self.provider.output(data, 'any') mock_opened.assert_called_once_with('/tmp/any.json', 'w') handle = mock_opened() handle.write.assert_has_calls(expected) @patch('builtins.open', new_callable=mock_open) def test_output_save_resource_with_datetime(self, mock_opened): data = {'k1': 'v1', 'k2': datetime(2015, 1, 1, 12, 30, 59)} expected = [ call('{'), call('\n '), call('"k1"'), call(': '), call('"v1"'), call(',\n '), call('"k2"'), call(': '), call('"2015-01-01T12:30:59"'), call('\n'), call('}') ] self.provider.output(data, 'any') mock_opened.assert_called_once_with('/tmp/any.json', 'w') handle = mock_opened() handle.write.assert_has_calls(expected) @patch('virga.providers.abstract.AbstractProvider.output') def test_assertion_call_output(self, mock_output): self.provider.assertion("AnyKey=='any-value'", 'Context', {}, 'resource-id') mock_output.assert_called_once_with({}, 'resource-id') @patch('virga.providers.abstract.AbstractProvider._lookup') def test_assertion_call_lookup(self, mock_lookup): mock_lookup.return_value = "AnyKey=='any-value'" self.provider.assertion("AnyKey=='any-value'", 'Context', {}, 'resource-id') mock_lookup.assert_called_once_with("AnyKey=='any-value'") @patch('jmespath.search') def test_assertion_call_search(self, mock_search): self.provider.assertion("AnyKey=='any-value'", 'Context', {}, 'resource-id') mock_search.assert_called_once_with("AnyKey=='any-value'", {}) @patch('logging.Logger.debug') def test_assertion_call_debug(self, mock_debug): self.provider.assertion("AnyKey=='any-value'", 'Context', {}, 'resource-id') mock_debug.assert_called_once_with( "resource-id: AnyKey=='any-value' eval False == False") @patch('virga.providers.abstract.AbstractProvider.outcome') def test_assertion_call_outcome(self, mock_outcome): self.provider.assertion("AnyKey=='any-value'", 'Context', {}, 'resource-id') mock_outcome.assert_called_once_with(False) @patch('os.listdir', return_value=['bare-valid.yaml']) @patch('builtins.open', new_callable=mock_open, read_data=fixture('bare-valid.yaml')) def test_definition_file(self, *args): self.provider.definitions_path = 'any' self.assertDictEqual({'test': 'ok'}, self.provider.definitions) @patch('builtins.open', new_callable=mock_open, read_data=fixture('bare-valid.yaml')) def test_definition_file_missing(self, *args): with self.assertRaisesRegex(NotImplementedError, 'Implement definition_file property'): _ = self.provider.definitions @patch('sys.stderr.write') @patch('sys.exit') def test_result_no_messages(self, mock_exit, mock_write): self.provider.result([]) mock_exit.assert_not_called() mock_write.assert_not_called() @patch('sys.stderr.write') @patch('sys.exit') def test_result_only_successful_messages(self, mock_exit, mock_write): messages = [ { 'success': True }, { 'success': True }, { 'success': True }, ] self.provider.result(messages) mock_exit.assert_not_called() mock_write.assert_not_called() @patch('sys.stderr.write') @patch('sys.exit') def test_result_only_one_failing_message(self, mock_exit, mock_write): messages = [ { 'success': True }, { 'success': True }, { 'success': False }, ] self.provider.result(messages) mock_exit.assert_called_once_with(1) mock_write.assert_called_once_with('There is an error on 3 tests.\n') @patch('sys.stderr.write') @patch('sys.exit') def test_result_only_failing_messages(self, mock_exit, mock_write): messages = [ { 'success': False }, { 'success': False }, { 'success': False }, ] self.provider.result(messages) mock_exit.assert_called_once_with(1) mock_write.assert_called_once_with('There are 3 errors on 3 tests.\n')
from tests import fixture from urlparse import urlunsplit method = fixture(GET='GET', POST='POST', NOPE='NOPE', autoparam=True) scheme = fixture(http='http', https='https', autoparam=True) netloc = fixture(one='onehost.com', two='twohost.net', autoparam=True) path = fixture('some/path', autoparam=True) query = fixture('query=string', autoparam=True) @fixture() def url(scheme, netloc, path, query, fragment=None): return urlunsplit((scheme, netloc, path, query, fragment))
class BaseFilter(object): """ Each filter is run through these tests with the fixtures provided in their own subclasses. The fixtures are by operator type (unary, binary, ternary, collection, string). If a filter does not handle an operator the test will be marked as skipped. Expectation values are calculated based on python's built in comparisons. """ # Dummy fixtures, these need to be overridden when appropriate in subclasses unary_comparison_scenario = fixture(None, not None, autoparam=True) binary_comparison_scenario = fixture(None, autoparam=True) ternary_comparison_scenario = fixture(None, autoparam=True) collection_scenario = fixture(None, autoparam=True) strings_scenario = fixture(None, autoparam=True) @fixture() def Filter(self): return self.FILTER() @fixture(autouse=True) def skip_if_not_applicable(self, request): # this filter will skip the test if the filter doesn't respond to the operator being tested op_name = request.node.originalname[len('test_Operators_'):].upper() op = getattr(Operator, op_name) if op not in self.FILTER.OPERATORS: pytest.skip('{} does not respond to {}'.format(self.FILTER, op)) ########################################################################### # unary comp def test_Operators_is_null(self, Filter, unary_comparison_scenario): op = unary_comparison_scenario expects = (op is None) assert Filter.is_null(op) == expects, '({op} is None)'.format( **locals()) def test_Operators_is_not_null(self, Filter, unary_comparison_scenario): op = unary_comparison_scenario expects = (op is not None) assert Filter.is_not_null(op) == expects, '({op} is not None)'.format( **locals()) ########################################################################### # binary comp def test_Operators_equal(self, Filter, binary_comparison_scenario): lop, rop = binary_comparison_scenario expects = (lop == rop) assert Filter.equal( lop, rop) == expects, '({lop} == {rop})'.format(**locals()) def test_Operators_not_equal(self, Filter, binary_comparison_scenario): lop, rop = binary_comparison_scenario expects = (lop != rop) assert Filter.not_equal( lop, rop) == expects, '({lop} != {rop})'.format(**locals()) def Operator_less(self, Filter, binary_comparison_scenario): lop, rop = binary_comparison_scenario expects = (lop < rop) assert Filter.less( lop, rop) == expects, '({lop} < {rop})'.format(**locals()) def test_Operators_less_or_equal(self, Filter, binary_comparison_scenario): lop, rop = binary_comparison_scenario expects = (lop <= rop) assert Filter.less_or_equal( lop, rop) == expects, '({lop} <= {rop})'.format(**locals()) def test_Operators_greater(self, Filter, binary_comparison_scenario): lop, rop = binary_comparison_scenario expects = (lop > rop) assert Filter.greater( lop, rop) == expects, '({lop} > {rop})'.format(**locals()) def test_Operators_greater_or_equal(self, Filter, binary_comparison_scenario): lop, rop = binary_comparison_scenario expects = (lop >= rop) assert Filter.greater_or_equal( lop, rop) == expects, '({lop} >= {rop})'.format(**locals()) ########################################################################### # ternary comp def test_Operators_between(self, Filter, ternary_comparison_scenario): op, minop, maxop = ternary_comparison_scenario expects = (minop <= op <= maxop) assert Filter.between( op, minop, maxop) == expects, '({minop} <= {op} <= {maxop})'.format( **locals()) def test_Operators_not_between(self, Filter, ternary_comparison_scenario): op, minop, maxop = ternary_comparison_scenario expects = not (minop <= op <= maxop) assert Filter.not_between( op, minop, maxop) == expects, 'not ({minop} <= {op} <= {maxop})'.format( **locals()) ########################################################################### # collections def test_Operators_in(self, Filter, collection_scenario): lop, rop = collection_scenario expects = (lop in rop) assert Filter._in( lop, rop) == expects, '({lop} in {rop})'.format(**locals()) def test_Operators_not_in(self, Filter, collection_scenario): lop, rop = collection_scenario expects = (lop not in rop) assert Filter.not_in( lop, rop) == expects, '({lop} not in {rop})'.format(**locals()) def test_Operators_contains(self, Filter, collection_scenario): lop, rop = collection_scenario expects = (lop in rop) assert Filter.contains( lop, rop) == expects, '({lop} in {rop})'.format(**locals()) def test_Operators_not_contains(self, Filter, collection_scenario): lop, rop = collection_scenario expects = (lop not in rop) assert Filter.not_contains( lop, rop) == expects, '({lop} not in {rop})'.format(**locals()) def test_Operators_is_empty(self, Filter, collection_scenario): op = collection_scenario expects = (len(op) == 0) assert Filter.is_empty(op) == expects, '(len({op}) == 0)'.format( **locals()) def test_Operators_is_not_empty(self, Filter, collection_scenario): op = collection_scenario expects = (len(op) >= 0) assert Filter.is_not_empty(op) == expects, '(len({op}) >= 0)'.format( **locals()) ########################################################################### # strings def test_Operators_ends_with(self, Filter, strings_scenario): lop, rop = strings_scenario expects = (lop.endswith(rop)) assert Filter.ends_with( lop, rop) == expects, '({lop}.endswith({rop}))'.format(**locals()) def test_Operators_not_ends_with(self, Filter, strings_scenario): lop, rop = strings_scenario expects = (not lop.endswith(rop)) assert Filter.not_ends_with( lop, rop) == expects, '(not {lop}.endswith({rop}))'.format(**locals()) def test_Operators_begins_with(self, Filter, strings_scenario): lop, rop = strings_scenario expects = (lop.startswith(rop)) assert Filter.begins_with( lop, rop) == expects, '({lop}.startswith({rop}))'.format(**locals()) def test_Operators_not_begins_with(self, Filter, strings_scenario): lop, rop = strings_scenario expects = (not lop.startswith(rop)) assert Filter.not_begins_with( lop, rop) == expects, '(not {lop}.startswith({rop}))'.format(**locals())
def test_sample_definition_not_found(self, mock_read_definition, *args): mock_read_definition.return_value = fixture('valid-definition.yaml', get_yaml=True) with self.assertRaisesRegex(VirgaException, 'Resource definition not found'): self.provider.sample('not_here', 'id-123456')
def test_command(db, tables_only, field_counts, field_lists, tables, views, caplog): with fixture(db, tables_only=tables_only, field_counts=field_counts, field_lists=field_lists) as result: # Check existence of schema, tables and views. if field_counts: tables.add('field_counts') assert db.schema_exists('view_data_collection_1') assert set( db.pluck( "SELECT table_name FROM information_schema.tables WHERE table_schema = %(schema)s " "AND table_type = 'BASE TABLE'", {'schema': 'view_data_collection_1'})) == tables assert set( db.pluck( "SELECT table_name FROM information_schema.tables WHERE table_schema = %(schema)s " "AND table_type = 'VIEW'", {'schema': 'view_data_collection_1'})) == views # Check contents of summary relations. rows = db.all(""" SELECT award_index, release_type, collection_id, ocid, release_id, award_id, award_title, award_status, award_description, award_value_amount, award_value_currency, award_date, award_contractperiod_startdate, award_contractperiod_enddate, award_contractperiod_maxextentdate, award_contractperiod_durationindays, suppliers_count, documents_count, documenttype_counts, items_count FROM view_data_collection_1.awards_summary ORDER BY id, award_index """) assert rows[0] == ( 0, # award_index 'release', # release_type 1, # collection_id 'dolore', # ocid 'ex laborumsit autein magna veniam', # release_id 'reprehenderit magna cillum eu nisi', # award_id 'laborum aute nisi eiusmod', # award_title 'pending', # award_status 'ullamco in voluptate', # award_description decimal.Decimal('-95099396'), # award_value_amount 'AMD', # award_value_currency datetime.datetime(3263, 12, 5, 21, 24, 19, 161000), # award_date datetime.datetime(4097, 9, 16, 5, 55, 19, 125000), # award_contractperiod_startdate datetime.datetime(4591, 4, 29, 6, 34, 28, 472000), # award_contractperiod_enddate datetime.datetime(3714, 8, 9, 7, 21, 37, 544000), # award_contractperiod_maxextentdate decimal.Decimal('72802012'), # award_contractperiod_durationindays 2, # suppliers_count 4, # documents_count { 'Excepteur nisi et': 1, 'proident exercitation in': 1, 'ut magna dolore velit aute': 1, 'veniam enim aliqua d': 1, }, # documenttype_counts 5, # items_count ) assert len(rows) == 301 rows = db.all(""" SELECT party_index, release_type, collection_id, ocid, release_id, parties_id, roles, identifier, unique_identifier_attempt, parties_additionalidentifiers_ids, parties_additionalidentifiers_count FROM view_data_collection_1.parties_summary ORDER BY id, party_index """) assert rows[0] == ( 0, # party_index 'release', # release_type 1, # collection_id 'dolore', # ocid 'ex laborumsit autein magna veniam', # release_id 'voluptate officia tempor dolor', # parties_id [ 'ex ', 'in est exercitation nulla Excepteur', 'ipsum do', ], # roles 'ad proident dolor reprehenderit veniam-in quis exercitation reprehenderit', # identifier 'voluptate officia tempor dolor', # unique_identifier_attempt [ 'exercitation proident voluptate-sed culpa eamollit consectetur dolor l', 'magna-dolor ut indolorein in tempor magna mollit', 'ad occaecat amet anim-laboris ea Duisdeserunt quis sed pariatur mollit', 'elit mollit-officia proidentmagna', 'ex-minim Ut consectetur', ], # parties_additionalidentifiers_ids 5, # parties_additionalidentifiers_count ) assert len(rows) == 296 if field_counts: # Check contents of field_counts table. rows = db.all('SELECT * FROM view_data_collection_1.field_counts') assert len(rows) == 65235 assert rows[0] == (1, 'release', 'awards', 100, 301, 100) if field_lists: # Check the count of keys in the field_list field for the lowest primary keys in each summary relation. statement = """ SELECT count(*) FROM (SELECT jsonb_each(field_list) FROM ( SELECT field_list FROM view_data_collection_1.{table} ORDER BY {primary_keys} LIMIT 1) AS field_list ) AS each """ expected = { 'award_documents_summary': 11, 'award_items_summary': 26, 'award_suppliers_summary': 28, 'awards_summary': 140, 'buyer_summary': 28, 'contract_documents_summary': 11, 'contract_implementation_documents_summary': 11, 'contract_implementation_milestones_summary': 29, 'contract_implementation_transactions_summary': 83, 'contract_items_summary': 26, 'contract_milestones_summary': 27, 'contracts_summary': 328, 'parties_summary': 34, 'planning_documents_summary': 11, 'planning_milestones_summary': 29, 'planning_summary': 61, 'procuringentity_summary': 32, 'release_summary': 1046, 'tender_documents_summary': 15, 'tender_items_summary': 25, 'tender_milestones_summary': 23, 'tender_summary': 228, 'tenderers_summary': 31, } for table in SUMMARIES: count = db.one( db.format(statement, table=table.name, primary_keys=table.primary_keys))[0] assert count == expected[ table. name], f'{table.name}: {count} != {expected[table.name]}' # All columns have comments. assert not db.all( """ SELECT isc.table_name, isc.column_name, isc.data_type FROM information_schema.columns isc WHERE isc.table_schema = %(schema)s AND LOWER(isc.table_name) NOT IN ('selected_collections', 'note') AND LOWER(isc.table_name) NOT LIKE '%%_no_data' AND LOWER(isc.table_name) NOT LIKE '%%_field_list' AND pg_catalog.col_description(format('%%s.%%s',isc.table_schema,isc.table_name)::regclass::oid, isc.ordinal_position) IS NULL """, {'schema': 'view_data_collection_1'}) expected = [ f'Arguments: collections=(1,) note=Default name=None tables_only={tables_only!r}', 'Added collection_1', 'Running summary-tables routine', ] if field_counts: expected.append('Running field-counts routine') if field_lists: expected.append('Running field-lists routine') assert result.exit_code == 0 assert result.output == '' assert_log_records(caplog, command, expected)
def test_lookup_failure(self, mock_call): mock_call.return_value = fixture('empty-subnet.json', get_json=True) with self.assertRaisesRegex(VirgaException, 'Lookup subnets name no-subnet failed'): self.provider.lookup('subnets', 'name', 'no-subnet')
def test_flatten_items(self, *args): response = fixture('reservations-instances.json', get_json=True) result = self.provider.flatten_items(response, 'Reservations.Instances') expected = fixture('only-instances.json', get_json=True) self.assertListEqual(expected, result)
def test_sample_invokes_read_definition(self, mock_read_definition, *args): mock_read_definition.return_value = fixture('valid-definition.yaml', get_yaml=True) self.provider.sample('subnets', 'subnet-123456') mock_read_definition.assert_called_once_with()
def test_launch_tests(self, mock_apply, *args): self.provider.tests = fixture('tests.yaml', get_yaml=True) self.provider.action() self.assertEqual(3, mock_apply.call_count)
def test_lookup_success(self, mock_call): mock_call.return_value = fixture('subnet.json', get_json=True) resource_id = self.provider.lookup('subnets', 'name', 'my-subnet') self.assertEqual('subnet-0123456789', resource_id)
def test_evaluate_no_assertions_calls_assertion(self, mock_assertion, mock_call): mock_call.return_value = fixture('subnet.json', get_json=True) test = { 'name': 'my-subnet', 'assertions': [ "AvailabilityZone=='eu-west-2a'", "CidrBlock=='10.0.0.0/24'", ] } definition = { 'client': 'ec2', 'action': 'describe_subnets', 'context': 'Subnets', 'prefix': 'Subnets', 'resource_id': 'SubnetId', 'identifiers': { 'id': { 'key': 'subnet-id', 'type': 'filter' }, 'name': { 'key': 'tag:Name', 'type': 'filter' } } } self.provider.evaluate(test, definition, []) subnet_data = { 'AvailabilityZone': 'eu-west-2a', 'AvailableIpAddressCount': 248, 'CidrBlock': '10.0.0.0/24', 'DefaultForAz': False, 'MapPublicIpOnLaunch': True, 'State': 'available', 'SubnetId': 'subnet-0123456789', 'VpcId': 'vpc-0123456789', 'AssignIpv6AddressOnCreation': False, 'Ipv6CidrBlockAssociationSet': [], 'Tags': [{ 'Key': 'environment', 'Value': 'staging' }, { 'Key': 'Name', 'Value': 'my-subnet' }] } expected = [ call("AvailabilityZone=='eu-west-2a'", 'Subnets', subnet_data, 'subnet-0123456789'), call("CidrBlock=='10.0.0.0/24'", 'Subnets', subnet_data, 'subnet-0123456789') ] mock_assertion.assert_has_calls(expected, any_order=True)
}) scenario = fixture( autoparam=True, params=( # rule1 Scenario(PASS, rule_1, Item(_name, _category, _in_stock, _price, _id), 'a-ok'), Scenario(PASS, rule_1, Item(_name, _category, _in_stock, 10, _id), 'a-ok'), Scenario(PASS, rule_1, Item(_name, _category, _in_stock, 10.24, _id), 'a-ok'), Scenario(PASS, rule_1, Item(_name, _category, _in_stock, 0, _id), 'a-ok'), Scenario(FAIL, rule_1, Item(_name, _category, _in_stock, 10.25, _id), '10.25 is not < 10.25'), Scenario(FAIL, rule_1, Item(_name, _category, _in_stock, 10.249, _id), '10.249 is the wrong step'), Scenario(FAIL, rule_1, Item(_name, _category, _in_stock, 10.251, _id), '10.251 is the wrong step'), Scenario(FAIL, rule_1, Item(_name, _category, _in_stock, -1, _id), 'price of -1 is below min'), # rule2 Scenario(FAIL, rule_2, Item(_name, _category, _not_in_stock, _price, _id), 'meets no conditions'), Scenario(PASS, rule_2, Item(_name, _category, _in_stock, _price, '1111-1111-1111'), 'good id'), Scenario(FAIL, rule_2, Item(_name, _category, _not_in_stock, _price, '1111-1111-1111'), 'good id, but not in stock'), Scenario(FAIL, rule_2, Item(_name, _category, _not_in_stock, _price, '1111-1111-1112'), 'bad id'), Scenario(FAIL, rule_2, Item(_name, _category, _not_in_stock, _price, '111111111111'), 'bad id'), Scenario(PASS, rule_2, Item(_name, 4, _in_stock, _price, _id), 'category is tools'), Scenario(FAIL, rule_2, Item(_name, 4, _not_in_stock, _price, _id), 'category is tools, but not in stock'), Scenario(FAIL, rule_2, Item(_name, 5, _in_stock, _price, _id), 'category is not tools'), Scenario(PASS, rule_2, Item('henry', _category, _in_stock, _price, _id), 'good name'), Scenario(FAIL, rule_2, Item('bob ross', _category, _in_stock, _price, _id), 'bad name'), ) )