def start_pe(self, num, port): self.outfiles[num] = tempfile.NamedTemporaryFile( mode='a+', suffix='.out', prefix='congress-pe%d-%d-' % (num, port), dir='/tmp') self.errfiles[num] = tempfile.NamedTemporaryFile( mode='a+', suffix='.err', prefix='congress-pe%d-%d-' % (num, port), dir='/tmp') args = [ sys.executable, 'congress/server/congress_server.py', '--node-id', 'node_%d' % num, '--api', '--policy-engine', '--config-file', 'congress/tests/etc/congress.conf.test.ha_pe%d' % num ] pe = subprocess.Popen(args, stdout=self.outfiles[num], stderr=self.outfiles[num], cwd=helper.root_path()) self.addCleanup(pe.kill) pe = self.client(port) try: helper.retry_check_function_return_value( lambda: pe.get().status_code, 200) except tenacity.RetryError: out = self.read_output_file(self.outfiles[num]) LOG.error('PE%d failed to start. Process output:\n%s' % (num, out)) raise return pe
def test_policy_execute_data_first(self): class NovaClient(object): def __init__(self, testkey): self.testkey = testkey def disconnectNetwork(self, arg1): LOG.info("disconnectNetwork called on %s", arg1) self.testkey = "arg1=%s" % arg1 nova_client = NovaClient(None) nova = self.nova nova.nova_client = nova_client self.node.register_service(nova) # insert rule and data self.api['api-policy'].add_item({'name': 'alice'}, {}) self.api['api-rule'].add_item({'rule': 'q(1)'}, {}, context={'policy_id': 'alice'}) self.assertEqual(len(self.engine.logger.messages), 0) self.api['api-rule'].add_item( {'rule': 'execute[nova:disconnectNetwork(x)] :- q(x)'}, {}, context={'policy_id': 'alice'}) self.assertEqual(len(self.engine.logger.messages), 1) ans = "arg1=1" f = lambda: nova.nova_client.testkey helper.retry_check_function_return_value(f, ans)
def test_policy_api_model_execute(self): def _execute_api(client, action, action_args): LOG.info("_execute_api called on %s and %s", action, action_args) positional_args = action_args['positional'] named_args = action_args['named'] method = reduce(getattr, action.split('.'), client) method(*positional_args, **named_args) class NovaClient(object): def __init__(self, testkey): self.testkey = testkey def _get_testkey(self): return self.testkey def disconnectNetwork(self, arg1, arg2, arg3): self.testkey = "arg1=%s arg2=%s arg3=%s" % (arg1, arg2, arg3) nova_client = NovaClient("testing") nova = self.cage.service_object('nova') nova._execute_api = _execute_api nova.nova_client = nova_client api = self.api body = {'name': 'nova:disconnectNetwork', 'args': {'positional': ['value1', 'value2'], 'named': {'arg3': 'value3'}}} request = helper.FakeRequest(body) result = api['policy'].execute_action({}, {}, request) self.assertEqual(result, {}) expected_result = "arg1=value1 arg2=value2 arg3=value3" f = nova.nova_client._get_testkey helper.retry_check_function_return_value(f, expected_result)
def start_pe(self, num, port): self.outfiles[num] = tempfile.NamedTemporaryFile( mode='a+', suffix='.out', prefix='congress-pe%d-%d-' % (num, port), dir='/tmp') self.errfiles[num] = tempfile.NamedTemporaryFile( mode='a+', suffix='.err', prefix='congress-pe%d-%d-' % (num, port), dir='/tmp') args = [sys.executable, 'congress/server/congress_server.py', '--node-id', 'node_%d' % num, '--api', '--policy-engine', '--config-file', 'congress/tests/etc/congress.conf.test.ha_pe%d' % num] pe = subprocess.Popen(args, stdout=self.outfiles[num], stderr=self.outfiles[num], cwd=helper.root_path()) self.addCleanup(pe.kill) pe = self.client(port) try: helper.retry_check_function_return_value( lambda: pe.get().status_code, 200) except tenacity.RetryError: out = self.read_output_file(self.outfiles[num]) LOG.error('PE%d failed to start. Process output:\n%s' % (num, out)) raise return pe
def test_neutron_policy_execute(self): class NeutronClient(object): def __init__(self, testkey): self.testkey = testkey def disconnectNetwork(self, arg1): LOG.info("disconnectNetwork called on %s", arg1) self.testkey = "arg1=%s" % arg1 neutron_client = NeutronClient(None) neutron = self.neutronv2 neutron.neutron = neutron_client # insert rule and data self.api['api-policy'].add_item({'name': 'alice'}, {}) (id1, _) = self.api['api-rule'].add_item( {'rule': 'execute[neutron:disconnectNetwork(x)] :- q(x)'}, {}, context={'policy_id': 'alice'}) self.assertEqual(len(self.engine.logger.messages), 0) (id2, _) = self.api['api-rule'].add_item({'rule': 'q(1)'}, {}, context={'policy_id': 'alice'}) self.assertEqual(len(self.engine.logger.messages), 1) ans = "arg1=1" f = lambda: neutron.neutron.testkey helper.retry_check_function_return_value(f, ans)
def test_policy_execute_data_first(self): class NovaClient(object): def __init__(self, testkey): self.testkey = testkey def disconnectNetwork(self, arg1): LOG.info("disconnectNetwork called on %s", arg1) self.testkey = "arg1=%s" % arg1 nova_client = NovaClient(None) nova = self.nova nova.nova_client = nova_client # insert rule and data self.api['api-policy'].add_item({'name': 'alice'}, {}) self.api['api-rule'].add_item( {'rule': 'q(1)'}, {}, context={'policy_id': 'alice'}) self.assertEqual(len(self.engine.logger.messages), 0) self.api['api-rule'].add_item( {'rule': 'execute[nova:disconnectNetwork(x)] :- q(x)'}, {}, context={'policy_id': 'alice'}) self.assertEqual(len(self.engine.logger.messages), 1) ans = "arg1=1" f = lambda: nova.nova_client.testkey helper.retry_check_function_return_value(f, ans)
def test_policy_execute_dotted(self): class NovaClient(object): def __init__(self, testkey): self.testkey = testkey self.servers = ServersClass() class ServersClass(object): def __init__(self): self.ServerManager = ServerManagerClass() class ServerManagerClass(object): def __init__(self): self.testkey = None def pause(self, id_): self.testkey = "arg1=%s" % id_ nova_client = NovaClient(None) nova = self.cage.service_object('nova') nova.nova_client = nova_client self.api['policy'].add_item({'name': 'alice'}, {}) self.api['rule'].add_item( {'rule': 'execute[nova:servers.ServerManager.pause(x)] :- q(x)'}, {}, context={'policy_id': 'alice'}) self.assertEqual(len(self.engine.logger.messages), 0) self.api['rule'].add_item( {'rule': 'q(1)'}, {}, context={'policy_id': 'alice'}) self.assertEqual(len(self.engine.logger.messages), 1) ans = "arg1=1" f = lambda: nova.nova_client.servers.ServerManager.testkey helper.retry_check_function_return_value(f, ans)
def test_policy_execute_dotted(self): class NovaClient(object): def __init__(self, testkey): self.testkey = testkey self.servers = ServersClass() class ServersClass(object): def __init__(self): self.ServerManager = ServerManagerClass() class ServerManagerClass(object): def __init__(self): self.testkey = None def pause(self, id_): self.testkey = "arg1=%s" % id_ nova_client = NovaClient(None) nova = self.nova nova.nova_client = nova_client self.node.register_service(nova) self.api['api-policy'].add_item({'name': 'alice'}, {}) self.api['api-rule'].add_item( {'rule': 'execute[nova:servers.ServerManager.pause(x)] :- q(x)'}, {}, context={'policy_id': 'alice'}) self.assertEqual(len(self.engine.logger.messages), 0) self.api['api-rule'].add_item({'rule': 'q(1)'}, {}, context={'policy_id': 'alice'}) self.assertEqual(len(self.engine.logger.messages), 1) ans = "arg1=1" f = lambda: nova.nova_client.servers.ServerManager.testkey helper.retry_check_function_return_value(f, ans)
def test_neutron_policy_execute(self): class NeutronClient(object): def __init__(self, testkey): self.testkey = testkey def disconnectNetwork(self, arg1): LOG.info("disconnectNetwork called on %s", arg1) self.testkey = "arg1=%s" % arg1 neutron_client = NeutronClient(None) neutron = self.cage.service_object('neutron') neutron.neutron = neutron_client # insert rule and data self.api['policy'].add_item({'name': 'alice'}, {}) (id1, _) = self.api['rule'].add_item( {'rule': 'execute[neutron:disconnectNetwork(x)] :- q(x)'}, {}, context={'policy_id': 'alice'}) self.assertEqual(len(self.engine.logger.messages), 0) (id2, _) = self.api['rule'].add_item( {'rule': 'q(1)'}, {}, context={'policy_id': 'alice'}) self.assertEqual(len(self.engine.logger.messages), 1) ans = "arg1=1" f = lambda: neutron.neutron.testkey helper.retry_check_function_return_value(f, ans)
def test_policy_execute_no_args(self): class NovaClient(object): def __init__(self, testkey): self.testkey = testkey def disconnectNetwork(self): LOG.info("disconnectNetwork called") self.testkey = "noargs" nova_client = NovaClient(None) nova = self.nova nova.nova_client = nova_client self.node.register_service(nova) # Note: this probably isn't the behavior we really want. # But at least we have a test documenting that behavior. # insert rule and data self.api['api-policy'].add_item({'name': 'alice'}, {}) (id1, rule1) = self.api['api-rule'].add_item( {'rule': 'execute[nova:disconnectNetwork()] :- q(x)'}, {}, context={'policy_id': 'alice'}) self.assertEqual(len(self.engine.logger.messages), 0) (id2, rule2) = self.api['api-rule'].add_item({'rule': 'q(1)'}, {}, context={'policy_id': 'alice'}) self.assertEqual(len(self.engine.logger.messages), 1) ans = "noargs" f = lambda: nova.nova_client.testkey helper.retry_check_function_return_value(f, ans) # insert more data (which DOES NOT cause an execution) (id3, rule3) = self.api['api-rule'].add_item({'rule': 'q(2)'}, {}, context={'policy_id': 'alice'}) self.assertEqual(len(self.engine.logger.messages), 1) # delete all data self.api['api-rule'].delete_item(id2, {}, context={'policy_id': 'alice'}) self.assertEqual(len(self.engine.logger.messages), 1) self.api['api-rule'].delete_item(id3, {}, context={'policy_id': 'alice'}) self.assertEqual(len(self.engine.logger.messages), 1) # insert data (which now DOES cause an execution) (id4, rule3) = self.api['api-rule'].add_item({'rule': 'q(3)'}, {}, context={'policy_id': 'alice'}) self.assertEqual(len(self.engine.logger.messages), 2) ans = "noargs" f = lambda: nova.nova_client.testkey helper.retry_check_function_return_value(f, ans) # delete policy rule self.api['api-rule'].delete_item(id1, {}, context={'policy_id': 'alice'}) self.assertEqual(len(self.engine.logger.messages), 2)
def test_policy_table_publish(self): """Policy table result publish Test basic DSE functionality with policy engine and table result publish. """ node = helper.make_dsenode_new_partition('testnode') data = fake_datasource.FakeDataSource('data') policy = agnostic.DseRuntime('policy') policy2 = agnostic.DseRuntime('policy2') node.register_service(data) node.register_service(policy) node.register_service(policy2) policy.synchronizer = mock.MagicMock() policy2.synchronizer = mock.MagicMock() policy.create_policy('data', kind=datalog_base.DATASOURCE_POLICY_TYPE) policy.create_policy('classification') policy.set_schema('data', compile.Schema({'q': (1, )})) policy.insert('p(x):-data:q(x),gt(x,2)', target='classification') policy.insert('q(3)', target='data') # TODO(ekcs): test that no publish triggered (because no subscribers) policy2.create_policy('policy') policy2.subscribe('policy', 'classification:p') helper.retry_check_function_return_value( lambda: 'classification:p' in policy. _published_tables_with_subscriber, True) self.assertEqual(list(policy.policySubData.keys()), [('p', 'classification', None)]) helper.retry_check_db_equal(policy2, 'policy:classification:p(x)', 'policy:classification:p(3)') policy.insert('q(4)', target='data') helper.retry_check_db_equal(policy2, 'policy:classification:p(x)', ('policy:classification:p(3)' ' policy:classification:p(4)')) # test that no change to p means no publish triggered policy.insert('q(2)', target='data') # TODO(ekcs): test no publish triggered policy.delete('q(4)', target='data') helper.retry_check_db_equal(policy2, 'policy:classification:p(x)', 'policy:classification:p(3)') policy2.unsubscribe('policy', 'classification:p') # trigger removed helper.retry_check_function_return_value( lambda: len(policy._published_tables_with_subscriber) == 0, True) self.assertEqual(list(policy.policySubData.keys()), []) policy.insert('q(4)', target='data') # TODO(ekcs): test that no publish triggered (because no subscribers) node.stop()
def test_policy_table_publish(self): """Policy table result publish Test basic DSE functionality with policy engine and table result publish. """ node = helper.make_dsenode_new_partition('testnode') data = fake_datasource.FakeDataSource('data') policy = agnostic.DseRuntime('policy') policy2 = agnostic.DseRuntime('policy2') node.register_service(data) node.register_service(policy) node.register_service(policy2) policy.create_policy('data') policy.create_policy('classification') policy.set_schema('data', compile.Schema({'q': (1,)})) policy.insert('p(x):-data:q(x),gt(x,2)', target='classification') policy.insert('q(3)', target='data') # TODO(ekcs): test that no publish triggered (because no subscribers) policy2.create_policy('policy') policy2.subscribe('policy', 'classification:p') helper.retry_check_function_return_value( lambda: 'classification:p' in policy._published_tables_with_subscriber, True) self.assertEqual(list(policy.policySubData.keys()), [('p', 'classification', None)]) helper.retry_check_db_equal( policy2, 'policy:classification:p(x)', 'policy:classification:p(3)') policy.insert('q(4)', target='data') helper.retry_check_db_equal( policy2, 'policy:classification:p(x)', ('policy:classification:p(3)' ' policy:classification:p(4)')) # test that no change to p means no publish triggered policy.insert('q(2)', target='data') # TODO(ekcs): test no publish triggered policy.delete('q(4)', target='data') helper.retry_check_db_equal( policy2, 'policy:classification:p(x)', 'policy:classification:p(3)') policy2.unsubscribe('policy', 'classification:p') # trigger removed helper.retry_check_function_return_value( lambda: len(policy._published_tables_with_subscriber) == 0, True) self.assertEqual(list(policy.policySubData.keys()), []) policy.insert('q(4)', target='data')
def test_policy_execute_no_args(self): class NovaClient(object): def __init__(self, testkey): self.testkey = testkey def disconnectNetwork(self): LOG.info("disconnectNetwork called") self.testkey = "noargs" nova_client = NovaClient(None) nova = self.nova nova.nova_client = nova_client self.node.register_service(nova) # Note: this probably isn't the behavior we really want. # But at least we have a test documenting that behavior. # insert rule and data self.api['api-policy'].add_item({'name': 'alice'}, {}) (id1, rule1) = self.api['api-rule'].add_item( {'rule': 'execute[nova:disconnectNetwork()] :- q(x)'}, {}, context={'policy_id': 'alice'}) self.assertEqual(len(self.engine.logger.messages), 0) (id2, rule2) = self.api['api-rule'].add_item( {'rule': 'q(1)'}, {}, context={'policy_id': 'alice'}) self.assertEqual(len(self.engine.logger.messages), 1) ans = "noargs" f = lambda: nova.nova_client.testkey helper.retry_check_function_return_value(f, ans) # insert more data (which DOES NOT cause an execution) (id3, rule3) = self.api['api-rule'].add_item( {'rule': 'q(2)'}, {}, context={'policy_id': 'alice'}) self.assertEqual(len(self.engine.logger.messages), 1) # delete all data self.api['api-rule'].delete_item( id2, {}, context={'policy_id': 'alice'}) self.assertEqual(len(self.engine.logger.messages), 1) self.api['api-rule'].delete_item( id3, {}, context={'policy_id': 'alice'}) self.assertEqual(len(self.engine.logger.messages), 1) # insert data (which now DOES cause an execution) (id4, rule3) = self.api['api-rule'].add_item( {'rule': 'q(3)'}, {}, context={'policy_id': 'alice'}) self.assertEqual(len(self.engine.logger.messages), 2) ans = "noargs" f = lambda: nova.nova_client.testkey helper.retry_check_function_return_value(f, ans) # delete policy rule self.api['api-rule'].delete_item( id1, {}, context={'policy_id': 'alice'}) self.assertEqual(len(self.engine.logger.messages), 2)
def test_policy_datasource(self): self.create_policy('alpha') self.create_fake_datasource('fake') data = self.node.service_object('fake') data.state = {'fake_table': set([(1, 2)])} data.poll() self.insert_rule('q(x) :- fake:fake_table(x,y)', 'alpha') helper.retry_check_function_return_value( lambda: self.query('q', 'alpha'), {'results': [{'data': (1,)}]})
def test_policy(self): self.create_policy('alpha') self.insert_rule('q(1, 2) :- true', 'alpha') self.insert_rule('q(2, 3) :- true', 'alpha') helper.retry_check_function_return_value( lambda: sorted(self.query('q', 'alpha')['results']), sorted([{'data': (1, 2)}, {'data': (2, 3)}])) helper.retry_check_function_return_value( lambda: list(self.query('q', 'alpha').keys()), ['results'])
def test_subscribe_snapshot(self): node = helper.make_dsenode_new_partition('testnode') test1 = fake_datasource.FakeDataSource('test1') test2 = fake_datasource.FakeDataSource('test2') node.register_service(test1) node.register_service(test2) test1.subscribe('test2', 'fake_table') helper.retry_check_function_return_value( lambda: hasattr(test1, 'last_msg'), True) self.assertEqual(test1.last_msg['data'], test2.state['fake_table'])
def test_subscribe_snapshot(self): node = helper.make_dsenode_new_partition('testnode') test1 = fake_datasource.FakeDataSource('test1') test2 = fake_datasource.FakeDataSource('test2') node.register_service(test1) node.register_service(test2) test1.subscribe('test2', 'fake_table') helper.retry_check_function_return_value( lambda: hasattr(test1, 'last_msg'), True) self.assertEqual(test1.last_msg['data'], test2.state['fake_table']) node.stop()
def test_policy(self): self.create_policy('alpha') self.insert_rule('q(1, 2) :- true', 'alpha') self.insert_rule('q(2, 3) :- true', 'alpha') helper.retry_check_function_return_value( lambda: sorted(self.query('q', 'alpha')['results'], key=lambda x: x['data']), sorted([{'data': (1, 2)}, {'data': (2, 3)}], key=lambda x: x['data'])) helper.retry_check_function_return_value( lambda: list(self.query('q', 'alpha').keys()), ['results'])
def test_datasource_poll(self): node = helper.make_dsenode_new_partition('testnode') pub = FakeDataSource('pub') sub = FakeDataSource('sub') node.register_service(pub) node.register_service(sub) sub.subscribe('pub', 'fake_table') pub.state = {'fake_table': set([(1, 2)])} pub.poll() helper.retry_check_function_return_value( lambda: sub.last_msg['data'], set(pub.state['fake_table'])) self.assertFalse(hasattr(pub, "last_msg"))
def test_auto_resub(self): node = helper.make_dsenode_new_partition('testnode') sub = fake_datasource.FakeDataSource('sub') pub = fake_datasource.FakeDataSource('pub') node.register_service(sub) node.register_service(pub) node.start_periodic_tasks() sub.subscribe('pub', 'p') helper.retry_check_function_return_value( lambda: hasattr(sub, 'last_msg'), True) helper.retry_check_function_return_value( lambda: sub.last_msg['data'], set([])) sub.receive_data_sequenced( 'pub', 'p', [[1, 1]], 1, is_snapshot=True) helper.retry_check_function_return_value( lambda: sub.last_msg['data'], set([(1, 1)])) # skipping seqnum 2 sub.receive_data_sequenced( 'pub', 'p', [[3, 3]], 3, is_snapshot=True) # check that out-of-sequence update not applied self.assertRaises( tenacity.RetryError, helper.retry_check_function_return_value, lambda: sub.last_msg['data'], set([(3, 3)])) # check that resub takes place, setting data to initial state helper.retry_check_function_return_value( lambda: sub.last_msg['data'], set([]))
def test_datasource_request_refresh(self): # neutron polls automatically here, which is why register_service # starts its service. neutron = self.neutronv2 neutron.stop() self.assertEqual(neutron.refresh_request_queue.qsize(), 0) neutron.request_refresh() self.assertEqual(neutron.refresh_request_queue.qsize(), 1) neutron.start() neutron.request_refresh() f = lambda: neutron.refresh_request_queue.qsize() helper.retry_check_function_return_value(f, 0)
def test_policy_create_delete(self): # create policy alice in PE1 self.assertEqual(self.pe1.post( suffix='policies', json={'name': 'alice'}).status_code, 201) # check policy alice in PE1 self.assertEqual(self.pe1.get('policies/alice').status_code, 200) # check policy alice in PE2 helper.retry_check_function_return_value( lambda: self.pe2.get('policies/alice').status_code, 200) # create policy bob in PE2 self.assertEqual(self.pe2.post( suffix='policies', json={'name': 'bob'}).status_code, 201) # check policy bob in PE2 self.assertEqual(self.pe2.get('policies/bob').status_code, 200) # check policy bob in PE1 helper.retry_check_function_return_value( lambda: self.pe1.get('policies/bob').status_code, 200) # check policy listings self.assertEqual(len(self.pe1.get('policies').json()['results']), 4) self.assertEqual(len(self.pe2.get('policies').json()['results']), 4) # delete policy alice in PE2, and check deleted on both PE self.assertEqual(self.pe2.delete('policies/alice').status_code, 200) self.assertEqual(self.pe2.get('policies/alice').status_code, 404) helper.retry_check_function_return_value( lambda: self.pe1.get('policies/alice').status_code, 404) # delete policy bob in PE2, and check deleted on both PE self.assertEqual(self.pe2.delete('policies/bob').status_code, 200) self.assertEqual(self.pe2.get('policies/bob').status_code, 404) helper.retry_check_function_return_value( lambda: self.pe1.get('policies/bob').status_code, 404)
def test_auto_resub(self): node = helper.make_dsenode_new_partition('testnode') sub = fake_datasource.FakeDataSource('sub') pub = fake_datasource.FakeDataSource('pub') node.register_service(sub) node.register_service(pub) node.start_periodic_tasks() sub.subscribe('pub', 'p') helper.retry_check_function_return_value( lambda: hasattr(sub, 'last_msg'), True) helper.retry_check_function_return_value(lambda: sub.last_msg['data'], set([])) sub.receive_data_sequenced('pub', 'p', [[1, 1]], 1, is_snapshot=True) helper.retry_check_function_return_value(lambda: sub.last_msg['data'], set([(1, 1)])) # skipping seqnum 2 sub.receive_data_sequenced('pub', 'p', [[3, 3]], 3, is_snapshot=True) # check that out-of-sequence update not applied self.assertRaises(tenacity.RetryError, helper.retry_check_function_return_value, lambda: sub.last_msg['data'], set([(3, 3)])) # check that resub takes place, setting data to initial state helper.retry_check_function_return_value(lambda: sub.last_msg['data'], set([])) node.stop()
def test_auto_resub(self): config = test_api_base.setup_config(with_fake_datasource=False, api=False, policy=False) node = config['node'] config['ds_manager'].synchronizer.start() sub = fake_datasource.FakeDataSource('sub') pub = fake_datasource.FakeDataSource('pub') node.register_service(sub) node.register_service(pub) sub.subscribe('pub', 'p') helper.retry_check_function_return_value( lambda: hasattr(sub, 'last_msg'), True) helper.retry_check_function_return_value( lambda: sub.last_msg['data'], set([])) sub.receive_data_sequenced( 'pub', 'p', [[1, 1]], 1, is_snapshot=True) helper.retry_check_function_return_value( lambda: sub.last_msg['data'], set([(1, 1)])) # skipping seqnum 2 sub.receive_data_sequenced( 'pub', 'p', [[3, 3]], 3, is_snapshot=True) # check that out-of-sequence update not applied self.assertRaises( tenacity.RetryError, helper.retry_check_function_return_value, lambda: sub.last_msg['data'], set([(3, 3)])) # check that resub takes place, setting data to initial state helper.retry_check_function_return_value( lambda: sub.last_msg['data'], set([])) node.stop()
def test_auto_resub(self): config = test_api_base.setup_config(with_fake_datasource=False, api=False, policy=False) node = config['node'] config['ds_manager'].synchronizer.start() sub = fake_datasource.FakeDataSource('sub') pub = fake_datasource.FakeDataSource('pub') node.register_service(sub) node.register_service(pub) sub.subscribe('pub', 'p') helper.retry_check_function_return_value( lambda: hasattr(sub, 'last_msg'), True) helper.retry_check_function_return_value(lambda: sub.last_msg['data'], set([])) sub.receive_data_sequenced('pub', 'p', [[1, 1]], 1, is_snapshot=True) helper.retry_check_function_return_value(lambda: sub.last_msg['data'], set([(1, 1)])) # skipping seqnum 2 sub.receive_data_sequenced('pub', 'p', [[3, 3]], 3, is_snapshot=True) # check that out-of-sequence update not applied self.assertRaises(tenacity.RetryError, helper.retry_check_function_return_value, lambda: sub.last_msg['data'], set([(3, 3)])) # check that resub takes place, setting data to initial state helper.retry_check_function_return_value(lambda: sub.last_msg['data'], set([])) node.stop()
def test_intranode_pubsub(self): node = helper.make_dsenode_new_partition('testnode') test1 = fake_datasource.FakeDataSource('test1') test2 = fake_datasource.FakeDataSource('test2') node.register_service(test1) node.register_service(test2) test1.subscribe('test2', 'p') helper.retry_check_function_return_value( lambda: hasattr(test1, 'last_msg'), True) test2.publish('p', 42) helper.retry_check_function_return_value( lambda: test1.last_msg['data'], 42) self.assertFalse(hasattr(test2, "last_msg"))
def test_policy_execute(self): class NovaClient(object): def __init__(self, testkey): self.testkey = testkey def disconnectNetwork(self, arg1): LOG.info("disconnectNetwork called on %s", arg1) self.testkey = "arg1=%s" % arg1 nova_client = NovaClient("testing") nova = self.nova nova.nova_client = nova_client self.node.register_service(nova) # insert rule and data self.api['api-policy'].add_item({'name': 'alice'}, {}) (id1, _) = self.api['api-rule'].add_item( {'rule': 'execute[nova:disconnectNetwork(x)] :- q(x)'}, {}, context={'policy_id': 'alice'}) self.assertEqual(len(self.engine.logger.messages), 0) (id2, _) = self.api['api-rule'].add_item( {'rule': 'q(1)'}, {}, context={'policy_id': 'alice'}) self.assertEqual(len(self.engine.logger.messages), 1) ans = "arg1=1" f = lambda: nova.nova_client.testkey helper.retry_check_function_return_value(f, ans) # insert more data self.api['api-rule'].add_item( {'rule': 'q(2)'}, {}, context={'policy_id': 'alice'}) self.assertEqual(len(self.engine.logger.messages), 2) ans = "arg1=2" f = lambda: nova.nova_client.testkey helper.retry_check_function_return_value(f, ans) # insert irrelevant data self.api['api-rule'].add_item( {'rule': 'r(3)'}, {}, context={'policy_id': 'alice'}) self.assertEqual(len(self.engine.logger.messages), 2) # delete relevant data self.api['api-rule'].delete_item( id2, {}, context={'policy_id': 'alice'}) self.assertEqual(len(self.engine.logger.messages), 2) # delete policy rule self.api['api-rule'].delete_item( id1, {}, context={'policy_id': 'alice'}) self.assertEqual(len(self.engine.logger.messages), 2)
def test_sub_before_service_exists(self): node = helper.make_dsenode_new_partition('testnode') test1 = fake_datasource.FakeDataSource('test1') node.register_service(test1) test1.subscribe('test2', 'p') self.assertFalse(hasattr(test1, "last_msg")) test2 = fake_datasource.FakeDataSource('test2') node.register_service(test2) test2.publish('p', [42], use_snapshot=True) helper.retry_check_function_return_value( lambda: test1.last_msg['data'], [42]) self.assertFalse(hasattr(test2, "last_msg")) node.stop() node.wait()
def test_intranode_pubsub(self): node = helper.make_dsenode_new_partition('testnode') test1 = fake_datasource.FakeDataSource('test1') test2 = fake_datasource.FakeDataSource('test2') node.register_service(test1) node.register_service(test2) test1.subscribe('test2', 'p') helper.retry_check_function_return_value( lambda: hasattr(test1, 'last_msg'), True) test2.publish('p', 42) helper.retry_check_function_return_value( lambda: test1.last_msg['data'], 42) self.assertFalse(hasattr(test2, "last_msg")) node.stop()
def test_sub_before_service_exists(self): node = helper.make_dsenode_new_partition('testnode') test1 = fake_datasource.FakeDataSource('test1') node.register_service(test1) test1.subscribe('test2', 'p') self.assertFalse(hasattr(test1, "last_msg")) test2 = fake_datasource.FakeDataSource('test2') node.register_service(test2) test2.publish('p', 42, use_snapshot=True) helper.retry_check_function_return_value( lambda: test1.last_msg['data'], 42) self.assertFalse(hasattr(test2, "last_msg")) node.stop() node.wait()
def test_datasource_poll(self): node = helper.make_dsenode_new_partition('testnode') node.always_snapshot = True # Note(ekcs): this test expects snapshot pub = fake_datasource.FakeDataSource('pub') sub = fake_datasource.FakeDataSource('sub') node.register_service(pub) node.register_service(sub) sub.subscribe('pub', 'fake_table') pub.state = {'fake_table': set([(1, 2)])} pub.poll() helper.retry_check_function_return_value(lambda: sub.last_msg['data'], set(pub.state['fake_table'])) self.assertFalse(hasattr(pub, "last_msg")) node.stop()
def test_datasource_pub(self, nova_mock): node = helper.make_dsenode_new_partition('testnode') nova = nova_driver.NovaDriver( name='nova', args=helper.datasource_openstack_args()) test = fake_datasource.FakeDataSource('test') node.register_service(nova) node.register_service(test) test.subscribe('nova', 'p') helper.retry_check_function_return_value( lambda: hasattr(test, 'last_msg'), True) nova.publish('p', [42], use_snapshot=True) helper.retry_check_function_return_value( lambda: test.last_msg['data'], [42]) self.assertFalse(hasattr(nova, "last_msg")) node.stop()
def test_intranode_pubsub2(self): # same as test_intranode_pubsub but with opposite ordering. # (Ordering does matter with internode_pubsub). node = helper.make_dsenode_new_partition('testnode') test1 = fake_datasource.FakeDataSource('test1') test2 = fake_datasource.FakeDataSource('test2') node.register_service(test1) node.register_service(test2) test2.subscribe('test1', 'p') helper.retry_check_function_return_value( lambda: hasattr(test2, 'last_msg'), True) test1.publish('p', 42) helper.retry_check_function_return_value( lambda: test2.last_msg['data'], 42) self.assertFalse(hasattr(test1, "last_msg"))
def test_datasource_pub(self, nova_mock): node = helper.make_dsenode_new_partition('testnode') nova = nova_driver.NovaDriver(name='nova', args=helper.datasource_openstack_args()) test = fake_datasource.FakeDataSource('test') node.register_service(nova) node.register_service(test) test.subscribe('nova', 'p') helper.retry_check_function_return_value( lambda: hasattr(test, 'last_msg'), True) nova.publish('p', 42, use_snapshot=True) helper.retry_check_function_return_value(lambda: test.last_msg['data'], 42) self.assertFalse(hasattr(nova, "last_msg")) node.stop()
def test_replicated_pe_exec(self): """Test correct local leader behavior with 2 PEs requesting exec""" node1 = helper.make_dsenode_new_partition('testnode1') node2 = helper.make_dsenode_same_partition(node1, 'testnode2') dsd = fake_datasource.FakeDataSource('dsd') # faster time-out for testing dsd.LEADER_TIMEOUT = 2 pe1 = agnostic.DseRuntime('pe1') pe2 = agnostic.DseRuntime('pe2') node1.register_service(pe1) node2.register_service(pe2) node1.register_service(dsd) assert dsd._running assert node1._running assert node2._running assert node1._control_bus._running # first exec request obeyed and leader set pe2.rpc('dsd', 'request_execute', {'action': 'fake_act', 'action_args': {'name': 'testnode2'}, 'wait': True}) helper.retry_check_function_return_value( lambda: len(dsd.exec_history), 1) self.assertEqual(dsd._leader_node_id, 'testnode2') # second exec request from leader obeyed and leader remains pe2.rpc('dsd', 'request_execute', {'action': 'fake_act', 'action_args': {'name': 'testnode2'}, 'wait': True}) helper.retry_check_function_return_value( lambda: len(dsd.exec_history), 2) self.assertEqual(dsd._leader_node_id, 'testnode2') # exec request from non-leader not obeyed pe1.rpc('dsd', 'request_execute', {'action': 'fake_act', 'action_args': {'name': 'testnode1'}, 'wait': True}) self.assertRaises( tenacity.RetryError, helper.retry_check_function_return_value, lambda: len(dsd.exec_history), 3) # leader vacated after heartbeat stops node2.stop() node2.wait() helper.retry_check_function_return_value( lambda: dsd._leader_node_id, None) # next exec request obeyed and new leader set pe1.rpc('dsd', 'request_execute', {'action': 'fake_act', 'action_args': {'name': 'testnode1'}, 'wait': True}) helper.retry_check_function_return_value( lambda: len(dsd.exec_history), 3) self.assertEqual(dsd._leader_node_id, 'testnode1') node1.stop() node2.stop()
def test_internode_pubsub(self): node1 = helper.make_dsenode_new_partition('testnode1') test1 = fake_datasource.FakeDataSource('test1') node1.register_service(test1) node2 = helper.make_dsenode_same_partition(node1, 'testnode2') test2 = fake_datasource.FakeDataSource('test2') node2.register_service(test2) test1.subscribe('test2', 'p') helper.retry_check_function_return_value( lambda: hasattr(test1, 'last_msg'), True) test2.publish('p', [42], use_snapshot=True) helper.retry_check_function_return_value( lambda: test1.last_msg['data'], [42]) self.assertFalse(hasattr(test2, "last_msg")) node1.stop() node2.stop()
def test_internode_partial_unsub(self): node1 = helper.make_dsenode_new_partition('testnode1') node2 = helper.make_dsenode_same_partition(node1, 'testnode2') test1 = fake_datasource.FakeDataSource('test1') test2 = fake_datasource.FakeDataSource('test2') node1.register_service(test1) node2.register_service(test2) test1.subscribe('test2', 'p') test1.subscribe('test2', 'q') test1.unsubscribe('test2', 'q') # unsub from q should not affect p helper.retry_check_function_return_value( lambda: hasattr(test1, 'last_msg'), True) test2.publish('p', 42) helper.retry_check_function_return_value( lambda: test1.last_msg['data'], 42) self.assertFalse(hasattr(test2, "last_msg"))
def test_intranode_partial_unsub(self): node = helper.make_dsenode_new_partition('testnode') test1 = fake_datasource.FakeDataSource('test1') test2 = fake_datasource.FakeDataSource('test2') node.register_service(test1) node.register_service(test2) test1.subscribe('test2', 'p') test1.subscribe('test2', 'q') test1.unsubscribe('test2', 'q') # unsub from q should not affect p helper.retry_check_function_return_value( lambda: hasattr(test1, 'last_msg'), True) test2.publish('p', [42], use_snapshot=True) helper.retry_check_function_return_value( lambda: test1.last_msg['data'], [42]) self.assertFalse(hasattr(test2, "last_msg")) node.stop()
def test_intranode_pubsub2(self): # same as test_intranode_pubsub but with opposite ordering. # (Ordering does matter with internode_pubsub). node = helper.make_dsenode_new_partition('testnode') test1 = fake_datasource.FakeDataSource('test1') test2 = fake_datasource.FakeDataSource('test2') node.register_service(test1) node.register_service(test2) test2.subscribe('test1', 'p') helper.retry_check_function_return_value( lambda: hasattr(test2, 'last_msg'), True) test1.publish('p', 42, use_snapshot=True) helper.retry_check_function_return_value( lambda: test2.last_msg['data'], 42) self.assertFalse(hasattr(test1, "last_msg")) node.stop()
def test_multiservice_pubsub(self): node1 = helper.make_dsenode_new_partition('testnode1') test1 = FakeDataSource('test1') test2 = FakeDataSource('test2') node1.register_service(test1) node1.register_service(test2) node2 = helper.make_dsenode_same_partition(node1, 'testnode2') test3 = FakeDataSource('test3') node2.register_service(test3) test1.subscribe('test3', 'p') helper.retry_check_function_return_value( lambda: hasattr(test1, 'last_msg'), True) test3.publish('p', 42) helper.retry_check_function_return_value( lambda: test1.last_msg['data'], 42) self.assertFalse(hasattr(test2, "last_msg")) self.assertFalse(hasattr(test3, "last_msg"))
def test_datasource_pub(self): node = helper.make_dsenode_new_partition('testnode') nova_client = mock.MagicMock() with mock.patch.object(novaclient.client.Client, '__init__', return_value=nova_client): nova = nova_driver.NovaDriver( name='nova', args=helper.datasource_openstack_args()) test = fake_datasource.FakeDataSource('test') node.register_service(nova) node.register_service(test) test.subscribe('nova', 'p') helper.retry_check_function_return_value( lambda: hasattr(test, 'last_msg'), True) nova.publish('p', 42) helper.retry_check_function_return_value( lambda: test.last_msg['data'], 42) self.assertFalse(hasattr(nova, "last_msg"))
def test_datasource_poll(self): node = helper.make_dsenode_new_partition('testnode') pub = fake_datasource.FakeDataSource('pub') sub = fake_datasource.FakeDataSource('sub') node.register_service(pub) node.register_service(sub) sub.subscribe('pub', 'fake_table') pub.state = {'fake_table': set([(1, 2)])} pub.poll() helper.retry_check_function_return_value( lambda: sub.last_msg, { 'publisher': 'pub', 'data': (set(pub.state['fake_table']), set([])), 'table': 'fake_table' }) self.assertFalse(hasattr(pub, "last_msg")) node.stop()
def test_datasource_unsub(self, nova_mock): node = helper.make_dsenode_new_partition('testnode') nova = nova_driver.NovaDriver( name='nova', args=helper.datasource_openstack_args()) test = fake_datasource.FakeDataSource('test') node.register_service(nova) node.register_service(test) nova.subscribe('test', 'p') helper.retry_check_function_return_value( lambda: hasattr(nova, 'last_msg'), True) test.publish('p', [42], use_snapshot=True) helper.retry_check_function_return_value( lambda: nova.last_msg['data'], [42]) self.assertFalse(hasattr(test, "last_msg")) nova.unsubscribe('test', 'p') test.publish('p', [43], use_snapshot=True) # hard to test that the message is never delivered time.sleep(0.2) self.assertEqual(nova.last_msg['data'], [42]) node.stop()
def test_datasource_unsub(self, nova_mock): node = helper.make_dsenode_new_partition('testnode') nova = nova_driver.NovaDriver(name='nova', args=helper.datasource_openstack_args()) test = fake_datasource.FakeDataSource('test') node.register_service(nova) node.register_service(test) nova.subscribe('test', 'p') helper.retry_check_function_return_value( lambda: hasattr(nova, 'last_msg'), True) test.publish('p', 42, use_snapshot=True) helper.retry_check_function_return_value(lambda: nova.last_msg['data'], 42) self.assertFalse(hasattr(test, "last_msg")) nova.unsubscribe('test', 'p') test.publish('p', 43, use_snapshot=True) # hard to test that the message is never delivered time.sleep(0.2) self.assertEqual(nova.last_msg['data'], 42) node.stop()
def test_policy_api_model_execute(self): def _execute_api(client, action, action_args): positional_args = action_args['positional'] named_args = action_args['named'] method = reduce(getattr, action.split('.'), client) method(*positional_args, **named_args) class NovaClient(object): def __init__(self, testkey): self.testkey = testkey def _get_testkey(self): return self.testkey def disconnectNetwork(self, arg1, arg2, arg3): self.testkey = "arg1=%s arg2=%s arg3=%s" % (arg1, arg2, arg3) nova_client = NovaClient("testing") cservices = self.cage.services cservices['nova']['object']._execute_api = _execute_api cservices['nova']['object'].nova_client = nova_client api = self.api body = { 'name': 'nova:disconnectNetwork', 'args': { 'positional': ['value1', 'value2'], 'named': { 'arg3': 'value3' } } } request = helper.FakeRequest(body) result = api['policy'].execute_action({}, {}, request) self.assertEqual(result, {}) expected_result = "arg1=value1 arg2=value2 arg3=value3" f = cservices['nova']['object'].nova_client._get_testkey helper.retry_check_function_return_value(f, expected_result)
def test_datasource_unsub(self): node = helper.make_dsenode_new_partition('testnode') nova_client = mock.MagicMock() with mock.patch.object(novaclient.client.Client, '__init__', return_value=nova_client): nova = NovaDriver( name='nova', args=helper.datasource_openstack_args()) test = FakeDataSource('test') node.register_service(nova) node.register_service(test) nova.subscribe('test', 'p') helper.retry_check_function_return_value( lambda: hasattr(nova, 'last_msg'), True) test.publish('p', 42) helper.retry_check_function_return_value( lambda: nova.last_msg['data'], 42) self.assertFalse(hasattr(test, "last_msg")) nova.unsubscribe('test', 'p') test.publish('p', 43) # hard to test that the message is never delivered time.sleep(0.2) self.assertEqual(nova.last_msg['data'], 42)
def test_datasource_api_model_execute(self): def _execute_api(client, action, action_args): positional_args = action_args.get('positional', []) named_args = action_args.get('named', {}) method = reduce(getattr, action.split('.'), client) method(*positional_args, **named_args) class NovaClient(object): def __init__(self, testkey): self.testkey = testkey def _get_testkey(self): return self.testkey def disconnect(self, arg1, arg2, arg3): self.testkey = "arg1=%s arg2=%s arg3=%s" % (arg1, arg2, arg3) def disconnect_all(self): self.testkey = "action_has_no_args" nova_client = NovaClient("testing") args = helper.datasource_openstack_args() nova = nova_driver.NovaDriver('nova', args=args) self.node.register_service(nova) nova.update_from_datasource = mock.MagicMock() nova._execute_api = _execute_api nova.nova_client = nova_client execute_action = self.datasource_model.execute_action # Positive test: valid body args, ds_id context = {'ds_id': 'nova'} body = {'name': 'disconnect', 'args': {'positional': ['value1', 'value2'], 'named': {'arg3': 'value3'}}} request = helper.FakeRequest(body) result = execute_action({}, context, request) self.assertEqual(result, {}) expected_result = "arg1=value1 arg2=value2 arg3=value3" f = nova.nova_client._get_testkey helper.retry_check_function_return_value(f, expected_result) # Positive test: no body args context = {'ds_id': 'nova'} body = {'name': 'disconnect_all'} request = helper.FakeRequest(body) result = execute_action({}, context, request) self.assertEqual(result, {}) expected_result = "action_has_no_args" f = nova.nova_client._get_testkey helper.retry_check_function_return_value(f, expected_result) # Negative test: invalid ds_id context = {'ds_id': 'unknown_ds'} self.assertRaises(webservice.DataModelException, execute_action, {}, context, request) # Negative test: no ds_id context = {} self.assertRaises(webservice.DataModelException, execute_action, {}, context, request) # Negative test: empty body context = {'ds_id': 'nova'} bad_request = helper.FakeRequest({}) self.assertRaises(webservice.DataModelException, execute_action, {}, context, bad_request) # Negative test: no body name/action context = {'ds_id': 'nova'} body = {'args': {'positional': ['value1', 'value2'], 'named': {'arg3': 'value3'}}} bad_request = helper.FakeRequest(body) self.assertRaises(webservice.DataModelException, execute_action, {}, context, bad_request) # Positive test with retry: no body args cfg.CONF.dse.execute_action_retry = True context = {'ds_id': 'nova'} body = {'name': 'disconnect_all'} request = helper.FakeRequest(body) result = execute_action({}, context, request) self.assertEqual(result, {}) expected_result = "action_has_no_args" f = nova.nova_client._get_testkey helper.retry_check_function_return_value(f, expected_result)
def test_policy_rule_evaluation(self): try: # create policy alice in PE1 self.assertEqual(self.pe1.post( suffix='policies', json={'name': 'alice'}).status_code, 201) # add rule to PE1 j = {'rule': 'p(x) :- q(x)', 'name': 'rule0'} res = self.pe1.post( suffix='policies/alice/rules', json=j) self.assertEqual(res.status_code, 201) r_id = res.json()['id'] # add data to PE1 j = {'rule': ' q( 1 ) ', 'name': 'rule1'} res = self.pe1.post( suffix='policies/alice/rules', json=j) self.assertEqual(res.status_code, 201) q1_id = res.json()['id'] # # add data to PE2 j = {'rule': ' q ( 2 ) ', 'name': 'rule2'} self.assertEqual(self.pe2.post( suffix='policies/alice/rules', json=j).status_code, 201) # time.sleep(6) # print(self.pe1.get('policies/alice/tables/p/rows').json()) # print(self.pe2.get('policies/alice/tables/p/rows').json()) # # time.sleep(6) # print(self.pe1.get('policies/alice/tables/p/rows').json()) # print(self.pe2.get('policies/alice/tables/p/rows').json()) # # self.assertEqual(self.pe1.delete( # suffix='policies/alice/rules/%s' % q1_id).status_code, 200) # # time.sleep(6) # print(self.pe1.get('policies/alice/tables/p/rows').json()) # print(self.pe2.get('policies/alice/tables/p/rows').json()) # assert False # eval on PE1 helper.retry_check_function_return_value_table( lambda: [x['data'] for x in self.pe1.get('policies/alice/tables/p/rows').json()[ 'results']], [[1], [2]]) # eval on PE2 helper.retry_check_function_return_value_table( lambda: [x['data'] for x in self.pe2.get('policies/alice/tables/p/rows').json()[ 'results']], [[1], [2]]) self.assertEqual(self.pe1.delete( suffix='policies/alice/rules/%s' % q1_id).status_code, 200) # eval on PE1 helper.retry_check_function_return_value_table( lambda: [x['data'] for x in self.pe1.get('policies/alice/tables/p/rows').json()[ 'results']], [[2]]) # eval on PE2 helper.retry_check_function_return_value_table( lambda: [x['data'] for x in self.pe2.get('policies/alice/tables/p/rows').json()[ 'results']], [[2]]) self.assertEqual(self.pe2.delete( suffix='policies/alice/rules/%s' % r_id).status_code, 200) helper.retry_check_function_return_value(lambda: self.pe1.get( 'policies/alice/tables/p/rows').status_code, 404) helper.retry_check_function_return_value(lambda: self.pe2.get( 'policies/alice/tables/p/rows').status_code, 404) except Exception: self.dump_nodes_logs() raise
def test_subs_list_update_aggregated_by_service(self): part = helper.get_new_partition() nodes = [] services = [] num_nodes = 3 for i in range(num_nodes): n = self._create_node_with_services(nodes, services, i, part) n.start() # add subscriptions for i in range(2, num_nodes): for s2 in services[i]: for s1 in services[i-1]: s1.subscribe(s2.service_id, 'table-A') s2.subscribe(s1.service_id, 'table-B') services[1][0].subscribe(services[2][0].service_id, 'table-C') services[2][1].subscribe(services[2][0].service_id, 'table-D') # constructed expected results expected_subbed_tables = {} expected_subbed_tables[nodes[1].node_id] = {} expected_subbed_tables[nodes[2].node_id] = {} expected_subbed_tables[nodes[1].node_id][ services[1][0].service_id] = set(['table-B']) expected_subbed_tables[nodes[2].node_id][ services[2][0].service_id] = set(['table-A', 'table-C', 'table-D']) expected_subbed_tables[nodes[2].node_id][ services[2][1].service_id] = set(['table-A']) # validate def _validate_subbed_tables(node): for s in node.get_services(): sid = s.service_id subscribed_tables = node.service_object( sid)._published_tables_with_subscriber self.assertEqual( subscribed_tables, expected_subbed_tables[node.node_id][sid], '%s has incorrect subscribed tables list' % sid) return True for n in nodes: helper.retry_check_function_return_value( lambda: _validate_subbed_tables(n), True) # selectively unsubscribe services[1][0].unsubscribe(services[2][0].service_id, 'table-A') # note that services[2][1] still subscribes to 'table-B' services[2][0].unsubscribe(services[1][0].service_id, 'table-B') # extraneous unsubscribe services[2][0].unsubscribe(services[1][0].service_id, 'table-None') # update expected results expected_subbed_tables[nodes[2].node_id][ services[2][0].service_id] = set(['table-C', 'table-D']) for n in nodes: helper.retry_check_function_return_value( lambda: _validate_subbed_tables(n), True) # resubscribe services[1][0].subscribe(services[2][0].service_id, 'table-A') services[2][0].subscribe(services[1][0].service_id, 'table-B') # update expected results expected_subbed_tables[nodes[2].node_id][ services[2][0].service_id] = set(['table-A', 'table-C', 'table-D']) for n in nodes: helper.retry_check_function_return_value( lambda: _validate_subbed_tables(n), True)
def test_replicated_pe_exec(self): """Test correct local leader behavior with 2 PEs requesting exec""" node1 = helper.make_dsenode_new_partition('testnode1') node2 = helper.make_dsenode_same_partition(node1, 'testnode2') dsd = fake_datasource.FakeDataSource('dsd') # faster time-out for testing dsd.LEADER_TIMEOUT = 2 pe1 = agnostic.DseRuntime('pe1') pe2 = agnostic.DseRuntime('pe2') node1.register_service(pe1) node2.register_service(pe2) node1.register_service(dsd) assert dsd._running assert node1._running assert node2._running assert node1._control_bus._running # first exec request obeyed and leader set pe2.rpc( 'dsd', 'request_execute', { 'action': 'fake_act', 'action_args': { 'name': 'testnode2' }, 'wait': True }) helper.retry_check_function_return_value(lambda: len(dsd.exec_history), 1) self.assertEqual(dsd._leader_node_id, 'testnode2') # second exec request from leader obeyed and leader remains pe2.rpc( 'dsd', 'request_execute', { 'action': 'fake_act', 'action_args': { 'name': 'testnode2' }, 'wait': True }) helper.retry_check_function_return_value(lambda: len(dsd.exec_history), 2) self.assertEqual(dsd._leader_node_id, 'testnode2') # exec request from non-leader not obeyed pe1.rpc( 'dsd', 'request_execute', { 'action': 'fake_act', 'action_args': { 'name': 'testnode1' }, 'wait': True }) self.assertRaises(tenacity.RetryError, helper.retry_check_function_return_value, lambda: len(dsd.exec_history), 3) # leader vacated after heartbeat stops node2.stop() node2.wait() helper.retry_check_function_return_value(lambda: dsd._leader_node_id, None) # next exec request obeyed and new leader set pe1.rpc( 'dsd', 'request_execute', { 'action': 'fake_act', 'action_args': { 'name': 'testnode1' }, 'wait': True }) helper.retry_check_function_return_value(lambda: len(dsd.exec_history), 3) self.assertEqual(dsd._leader_node_id, 'testnode1') node1.stop() node2.stop()
def test_subs_list_update_aggregated_by_service(self): part = helper.get_new_partition() nodes = [] services = [] num_nodes = 3 for i in range(num_nodes): n = self._create_node_with_services(nodes, services, i, part) n.start() # add subscriptions for i in range(2, num_nodes): for s2 in services[i]: for s1 in services[i - 1]: s1.subscribe(s2.service_id, 'table-A') s2.subscribe(s1.service_id, 'table-B') services[1][0].subscribe(services[2][0].service_id, 'table-C') services[2][1].subscribe(services[2][0].service_id, 'table-D') # constructed expected results expected_subbed_tables = {} expected_subbed_tables[nodes[1].node_id] = {} expected_subbed_tables[nodes[2].node_id] = {} expected_subbed_tables[nodes[1].node_id][ services[1][0].service_id] = set(['table-B']) expected_subbed_tables[nodes[2].node_id][ services[2][0].service_id] = set(['table-A', 'table-C', 'table-D']) expected_subbed_tables[nodes[2].node_id][ services[2][1].service_id] = set(['table-A']) # validate def _validate_subbed_tables(node): for s in node.get_services(): sid = s.service_id subscribed_tables = node.service_object( sid)._published_tables_with_subscriber self.assertEqual( subscribed_tables, expected_subbed_tables[node.node_id][sid], '%s has incorrect subscribed tables list' % sid) return True for n in nodes: helper.retry_check_function_return_value( lambda: _validate_subbed_tables(n), True) # selectively unsubscribe services[1][0].unsubscribe(services[2][0].service_id, 'table-A') # note that services[2][1] still subscribes to 'table-B' services[2][0].unsubscribe(services[1][0].service_id, 'table-B') # extraneous unsubscribe services[2][0].unsubscribe(services[1][0].service_id, 'table-None') # update expected results expected_subbed_tables[nodes[2].node_id][ services[2][0].service_id] = set(['table-C', 'table-D']) for n in nodes: helper.retry_check_function_return_value( lambda: _validate_subbed_tables(n), True) # resubscribe services[1][0].subscribe(services[2][0].service_id, 'table-A') services[2][0].subscribe(services[1][0].service_id, 'table-B') # update expected results expected_subbed_tables[nodes[2].node_id][ services[2][0].service_id] = set(['table-A', 'table-C', 'table-D']) for n in nodes: helper.retry_check_function_return_value( lambda: _validate_subbed_tables(n), True)
def test_policy_rule_crud(self): try: # create policy alice in PE1 self.assertEqual( self.pe1.post(suffix='policies', json={ 'name': 'alice' }).status_code, 201) # add rule to PE1 j = {'rule': 'p(x) :- q(x)', 'name': 'rule1'} self.assertEqual( self.pe1.post(suffix='policies/alice/rules', json=j).status_code, 201) self.assertEqual( self.pe1.get('policies/alice/rules').status_code, 200) self.assertEqual( len(self.pe1.get('policies/alice/rules').json()['results']), 1) # retry necessary because of synchronization helper.retry_check_function_return_value( lambda: len( self.pe2.get('policies/alice/rules').json()['results']), 1) # add rule to PE2 j = {'rule': 'q(1)', 'name': 'rule2'} self.assertEqual( self.pe2.post(suffix='policies/alice/rules', json=j).status_code, 201) # check 2 rule in each pe self.assertEqual( len(self.pe2.get('policies/alice/rules').json()['results']), 2) self.assertEqual( len(self.pe1.get('policies/alice/rules').json()['results']), 2) # grab rule IDs rules = self.pe2.get('policies/alice/rules').json()['results'] id1 = next(x['id'] for x in rules if x['name'] == 'rule1') id2 = next(x['id'] for x in rules if x['name'] == 'rule2') # show rules by id self.assertEqual( self.pe1.get('policies/alice/rules/%s' % id1).status_code, 200) self.assertEqual( self.pe2.get('policies/alice/rules/%s' % id1).status_code, 200) self.assertEqual( self.pe1.get('policies/alice/rules/%s' % id2).status_code, 200) self.assertEqual( self.pe2.get('policies/alice/rules/%s' % id2).status_code, 200) # list tables self.assertEqual( len(self.pe1.get('policies/alice/tables').json()['results']), 2) self.assertEqual( len(self.pe2.get('policies/alice/tables').json()['results']), 2) # show tables self.assertEqual( self.pe1.get('policies/alice/tables/p').status_code, 200) self.assertEqual( self.pe2.get('policies/alice/tables/p').status_code, 200) self.assertEqual( self.pe1.get('policies/alice/tables/q').status_code, 200) self.assertEqual( self.pe2.get('policies/alice/tables/q').status_code, 200) # delete from PE1 and check both have 1 rule left self.assertEqual( self.pe1.delete(suffix='policies/alice/rules/%s' % id1).status_code, 200) self.assertEqual( len(self.pe1.get('policies/alice/rules').json()['results']), 1) self.assertEqual( len(self.pe2.get('policies/alice/rules').json()['results']), 1) # delete from PE2 and check both have 0 rules left self.assertEqual( self.pe2.delete(suffix='policies/alice/rules/%s' % id2).status_code, 200) self.assertEqual( len(self.pe1.get('policies/alice/rules').json()['results']), 0) self.assertEqual( len(self.pe2.get('policies/alice/rules').json()['results']), 0) except Exception: self.dump_nodes_logs() raise
def test_policy_rule_evaluation(self): try: # create policy alice in PE1 self.assertEqual( self.pe1.post(suffix='policies', json={ 'name': 'alice' }).status_code, 201) # add rule to PE1 j = {'rule': 'p(x) :- q(x)', 'name': 'rule0'} res = self.pe1.post(suffix='policies/alice/rules', json=j) self.assertEqual(res.status_code, 201) r_id = res.json()['id'] # add data to PE1 j = {'rule': ' q( 1 ) ', 'name': 'rule1'} res = self.pe1.post(suffix='policies/alice/rules', json=j) self.assertEqual(res.status_code, 201) q1_id = res.json()['id'] # # add data to PE2 j = {'rule': ' q ( 2 ) ', 'name': 'rule2'} self.assertEqual( self.pe2.post(suffix='policies/alice/rules', json=j).status_code, 201) # eval on PE1 helper.retry_check_function_return_value_table( lambda: [ x['data'] for x in self.pe1.get('policies/alice/tables/p/rows').json( )['results'] ], [[1], [2]]) # eval on PE2 helper.retry_check_function_return_value_table( lambda: [ x['data'] for x in self.pe2.get('policies/alice/tables/p/rows').json( )['results'] ], [[1], [2]]) self.assertEqual( self.pe1.delete(suffix='policies/alice/rules/%s' % q1_id).status_code, 200) # eval on PE1 helper.retry_check_function_return_value_table( lambda: [ x['data'] for x in self.pe1.get('policies/alice/tables/p/rows').json( )['results'] ], [[2]]) # eval on PE2 helper.retry_check_function_return_value_table( lambda: [ x['data'] for x in self.pe2.get('policies/alice/tables/p/rows').json( )['results'] ], [[2]]) self.assertEqual( self.pe2.delete(suffix='policies/alice/rules/%s' % r_id).status_code, 200) helper.retry_check_function_return_value( lambda: self.pe1.get('policies/alice/tables/p/rows'). status_code, 404) helper.retry_check_function_return_value( lambda: self.pe2.get('policies/alice/tables/p/rows'). status_code, 404) except Exception: self.dump_nodes_logs() raise
def test_datasource_api_model_execute(self): def _execute_api(client, action, action_args): positional_args = action_args.get('positional', []) named_args = action_args.get('named', {}) method = reduce(getattr, action.split('.'), client) method(*positional_args, **named_args) class NovaClient(object): def __init__(self, testkey): self.testkey = testkey def _get_testkey(self): return self.testkey def disconnect(self, arg1, arg2, arg3): self.testkey = "arg1=%s arg2=%s arg3=%s" % (arg1, arg2, arg3) def disconnect_all(self): self.testkey = "action_has_no_args" nova_client = NovaClient("testing") args = helper.datasource_openstack_args() nova = nova_driver.NovaDriver('nova', args=args) nova.nova_client = nova_client nova.update_from_datasource = mock.MagicMock() nova._execute_api = _execute_api self.node.register_service(nova) execute_action = self.datasource_model.execute_action # Positive test: valid body args, ds_id context = {'ds_id': 'nova'} body = { 'name': 'disconnect', 'args': { 'positional': ['value1', 'value2'], 'named': { 'arg3': 'value3' } } } request = helper.FakeRequest(body) result = execute_action({}, context, request) self.assertEqual(result, {}) expected_result = "arg1=value1 arg2=value2 arg3=value3" f = nova.nova_client._get_testkey helper.retry_check_function_return_value(f, expected_result) # Positive test: no body args context = {'ds_id': 'nova'} body = {'name': 'disconnect_all'} request = helper.FakeRequest(body) result = execute_action({}, context, request) self.assertEqual(result, {}) expected_result = "action_has_no_args" f = nova.nova_client._get_testkey helper.retry_check_function_return_value(f, expected_result) # Negative test: invalid ds_id context = {'ds_id': 'unknown_ds'} self.assertRaises(webservice.DataModelException, execute_action, {}, context, request) # Negative test: no ds_id context = {} self.assertRaises(webservice.DataModelException, execute_action, {}, context, request) # Negative test: empty body context = {'ds_id': 'nova'} bad_request = helper.FakeRequest({}) self.assertRaises(webservice.DataModelException, execute_action, {}, context, bad_request) # Negative test: no body name/action context = {'ds_id': 'nova'} body = { 'args': { 'positional': ['value1', 'value2'], 'named': { 'arg3': 'value3' } } } bad_request = helper.FakeRequest(body) self.assertRaises(webservice.DataModelException, execute_action, {}, context, bad_request) # Positive test with retry: no body args cfg.CONF.dse.execute_action_retry = True context = {'ds_id': 'nova'} body = {'name': 'disconnect_all'} request = helper.FakeRequest(body) result = execute_action({}, context, request) self.assertEqual(result, {}) expected_result = "action_has_no_args" f = nova.nova_client._get_testkey helper.retry_check_function_return_value(f, expected_result)