def test_auto_resub(self): node = helper.make_dsenode_new_partition('testnode') sub = fake_datasource.FakeDataSource('sub') pub = fake_datasource.FakeDataSource('pub') node.register_service(sub) node.register_service(pub) node.start_periodic_tasks() sub.subscribe('pub', 'p') helper.retry_check_function_return_value( lambda: hasattr(sub, 'last_msg'), True) helper.retry_check_function_return_value( lambda: sub.last_msg['data'], set([])) sub.receive_data_sequenced( 'pub', 'p', [[1, 1]], 1, is_snapshot=True) helper.retry_check_function_return_value( lambda: sub.last_msg['data'], set([(1, 1)])) # skipping seqnum 2 sub.receive_data_sequenced( 'pub', 'p', [[3, 3]], 3, is_snapshot=True) # check that out-of-sequence update not applied self.assertRaises( tenacity.RetryError, helper.retry_check_function_return_value, lambda: sub.last_msg['data'], set([(3, 3)])) # check that resub takes place, setting data to initial state helper.retry_check_function_return_value( lambda: sub.last_msg['data'], set([]))
def test_auto_resub(self): node = helper.make_dsenode_new_partition('testnode') sub = fake_datasource.FakeDataSource('sub') pub = fake_datasource.FakeDataSource('pub') node.register_service(sub) node.register_service(pub) node.start_periodic_tasks() sub.subscribe('pub', 'p') helper.retry_check_function_return_value( lambda: hasattr(sub, 'last_msg'), True) helper.retry_check_function_return_value(lambda: sub.last_msg['data'], set([])) sub.receive_data_sequenced('pub', 'p', [[1, 1]], 1, is_snapshot=True) helper.retry_check_function_return_value(lambda: sub.last_msg['data'], set([(1, 1)])) # skipping seqnum 2 sub.receive_data_sequenced('pub', 'p', [[3, 3]], 3, is_snapshot=True) # check that out-of-sequence update not applied self.assertRaises(tenacity.RetryError, helper.retry_check_function_return_value, lambda: sub.last_msg['data'], set([(3, 3)])) # check that resub takes place, setting data to initial state helper.retry_check_function_return_value(lambda: sub.last_msg['data'], set([])) node.stop()
def test_policy_data_update(self): """Test policy correctly processes initial data snapshot and update.""" node = helper.make_dsenode_new_partition('testnode') node.always_snapshot = False data = fake_datasource.FakeDataSource('data') engine = agnostic.DseRuntime(api_base.ENGINE_SERVICE_ID) node.register_service(data) node.register_service(engine) engine.create_policy('policy1') engine.create_policy('data') self.insert_rule(engine, 'p(x) :- data:fake_table(x)', 'policy1') data.state = {'fake_table': set([(1, ), (2, )])} data.poll() helper.retry_check_db_equal(engine, 'p(x)', 'p(1) p(2)', target='policy1') data.state = {'fake_table': set([(1, ), (2, ), (3, )])} data.poll() helper.retry_check_db_equal(engine, 'p(x)', 'p(1) p(2) p(3)', target='policy1') self.assertFalse(hasattr(engine, "last_msg")) node.stop()
def test_policy_data_late_sub(self): """Test policy correctly processes data on late subscribe.""" node = helper.make_dsenode_new_partition('testnode') data = fake_datasource.FakeDataSource('data') engine = agnostic.DseRuntime(api_base.ENGINE_SERVICE_ID) node.register_service(data) node.register_service(engine) engine.create_policy('policy1') engine.create_policy('data', kind=datalog_base.DATASOURCE_POLICY_TYPE) data.state = {'fake_table': set([(1, ), (2, )])} data.poll() self.insert_rule(engine, 'p(x) :- data:fake_table(x)', 'policy1') helper.retry_check_db_equal(engine, 'p(x)', 'p(1) p(2)', target='policy1') data.state = {'fake_table': set([(1, ), (2, ), (3, )])} data.poll() helper.retry_check_db_equal(engine, 'p(x)', 'p(1) p(2) p(3)', target='policy1') self.assertFalse(hasattr(engine, "last_msg")) node.stop()
def setup_config(with_fake_datasource=True): """Setup DseNode for testing. :param services is an array of DataServices :param api is a dictionary mapping api name to API model instance """ cfg.CONF.set_override('distributed_architecture', True) # Load the fake driver. cfg.CONF.set_override( 'drivers', ['congress.tests.fake_datasource.FakeDataSource']) node = helper.make_dsenode_new_partition("testnode") services = harness.create2(node=node) # Always register engine and fake datasource # engine = Dse2Runtime('engine') # node.register_service(engine) data = None if with_fake_datasource: data = fake_datasource.FakeDataSource('data') node.register_service(data) # Register provided apis (and no others) # (ResourceManager inherits from DataService) # api_map = {a.name: a for a in api} # api_resource_mgr = application.ResourceManager() # router.APIRouterV1(api_resource_mgr, api) # node.register_service(api_resource_mgr) engine = services[harness.ENGINE_SERVICE_NAME] api = services['api'] return {'node': node, 'engine': engine, 'data': data, 'api': api}
def setup_config(with_fake_datasource=True, node_id='testnode', same_partition_as_node=None, api=True, policy=True, datasources=True): """Setup DseNode for testing. :param: services is an array of DataServices :param: api is a dictionary mapping api name to API model instance """ config.set_config_defaults() # Load the fake driver. cfg.CONF.set_override('drivers', ['congress.tests.fake_datasource.FakeDataSource']) if same_partition_as_node is None: node = helper.make_dsenode_new_partition(node_id) else: node = helper.make_dsenode_same_partition(same_partition_as_node, node_id) if datasources: cfg.CONF.set_override('datasources', True) with mock.patch.object(periodics, 'PeriodicWorker', autospec=True): services = harness.create2(existing_node=node, policy_engine=policy, api=api, datasources=datasources) data = None if with_fake_datasource: data = fake_datasource.FakeDataSource('data') # FIXME(ekcs): this is a hack to prevent the synchronizer from # attempting to delete this DSD because it's not in DB data.type = 'no_sync_datasource_driver' node.register_service(data) engine_service = None library_service = None api_service = None if policy: engine_service = services[api_base.ENGINE_SERVICE_ID] library_service = services[api_base.LIBRARY_SERVICE_ID] if api: api_service = services['api'] if datasources: ds_manager = services['ds_manager'] return { 'node': node, 'engine': engine_service, 'library': library_service, 'data': data, 'api': api_service, 'ds_manager': ds_manager }
def test_policy_table_publish(self): """Policy table result publish Test basic DSE functionality with policy engine and table result publish. """ node = helper.make_dsenode_new_partition('testnode') data = fake_datasource.FakeDataSource('data') policy = agnostic.DseRuntime('policy') policy2 = agnostic.DseRuntime('policy2') node.register_service(data) node.register_service(policy) node.register_service(policy2) policy.synchronizer = mock.MagicMock() policy2.synchronizer = mock.MagicMock() policy.create_policy('data', kind=datalog_base.DATASOURCE_POLICY_TYPE) policy.create_policy('classification') policy.set_schema('data', compile.Schema({'q': (1, )})) policy.insert('p(x):-data:q(x),gt(x,2)', target='classification') policy.insert('q(3)', target='data') # TODO(ekcs): test that no publish triggered (because no subscribers) policy2.create_policy('policy') policy2.subscribe('policy', 'classification:p') helper.retry_check_function_return_value( lambda: 'classification:p' in policy. _published_tables_with_subscriber, True) self.assertEqual(list(policy.policySubData.keys()), [('p', 'classification', None)]) helper.retry_check_db_equal(policy2, 'policy:classification:p(x)', 'policy:classification:p(3)') policy.insert('q(4)', target='data') helper.retry_check_db_equal(policy2, 'policy:classification:p(x)', ('policy:classification:p(3)' ' policy:classification:p(4)')) # test that no change to p means no publish triggered policy.insert('q(2)', target='data') # TODO(ekcs): test no publish triggered policy.delete('q(4)', target='data') helper.retry_check_db_equal(policy2, 'policy:classification:p(x)', 'policy:classification:p(3)') policy2.unsubscribe('policy', 'classification:p') # trigger removed helper.retry_check_function_return_value( lambda: len(policy._published_tables_with_subscriber) == 0, True) self.assertEqual(list(policy.policySubData.keys()), []) policy.insert('q(4)', target='data') # TODO(ekcs): test that no publish triggered (because no subscribers) node.stop()
def test_get_global_service_names(self): node = helper.make_dsenode_new_partition('test_node', self.messaging_config, []) test1 = _PingRpcService('test1', 'test1') test2 = _PingRpcService('test2', 'test2') node.register_service(test1) node.register_service(test2) actual = set(node.get_global_service_names()) self.assertEqual(actual, set(['test1', 'test2']))
def test_get_global_service_names(self): node = helper.make_dsenode_new_partition('test_node', self.messaging_config, []) test1 = _PingRpcService('test1', 'test1') test2 = _PingRpcService('test2', 'test2') node.register_service(test1) node.register_service(test2) actual = set(node.get_global_service_names()) self.assertEqual(actual, set(['test1', 'test2']))
def test_replicated_pe_exec(self): """Test correct local leader behavior with 2 PEs requesting exec""" node1 = helper.make_dsenode_new_partition('testnode1') node2 = helper.make_dsenode_same_partition(node1, 'testnode2') dsd = fake_datasource.FakeDataSource('dsd') # faster time-out for testing dsd.LEADER_TIMEOUT = 2 pe1 = agnostic.DseRuntime('pe1') pe2 = agnostic.DseRuntime('pe2') node1.register_service(pe1) node2.register_service(pe2) node1.register_service(dsd) assert dsd._running assert node1._running assert node2._running assert node1._control_bus._running # first exec request obeyed and leader set pe2.rpc('dsd', 'request_execute', {'action': 'fake_act', 'action_args': {'name': 'testnode2'}, 'wait': True}) helper.retry_check_function_return_value( lambda: len(dsd.exec_history), 1) self.assertEqual(dsd._leader_node_id, 'testnode2') # second exec request from leader obeyed and leader remains pe2.rpc('dsd', 'request_execute', {'action': 'fake_act', 'action_args': {'name': 'testnode2'}, 'wait': True}) helper.retry_check_function_return_value( lambda: len(dsd.exec_history), 2) self.assertEqual(dsd._leader_node_id, 'testnode2') # exec request from non-leader not obeyed pe1.rpc('dsd', 'request_execute', {'action': 'fake_act', 'action_args': {'name': 'testnode1'}, 'wait': True}) self.assertRaises( tenacity.RetryError, helper.retry_check_function_return_value, lambda: len(dsd.exec_history), 3) # leader vacated after heartbeat stops node2.stop() node2.wait() helper.retry_check_function_return_value( lambda: dsd._leader_node_id, None) # next exec request obeyed and new leader set pe1.rpc('dsd', 'request_execute', {'action': 'fake_act', 'action_args': {'name': 'testnode1'}, 'wait': True}) helper.retry_check_function_return_value( lambda: len(dsd.exec_history), 3) self.assertEqual(dsd._leader_node_id, 'testnode1') node1.stop() node2.stop()
def test_policy_table_publish(self): """Policy table result publish Test basic DSE functionality with policy engine and table result publish. """ node = helper.make_dsenode_new_partition('testnode') data = fake_datasource.FakeDataSource('data') policy = agnostic.DseRuntime('policy') policy2 = agnostic.DseRuntime('policy2') node.register_service(data) node.register_service(policy) node.register_service(policy2) policy.create_policy('data') policy.create_policy('classification') policy.set_schema('data', compile.Schema({'q': (1,)})) policy.insert('p(x):-data:q(x),gt(x,2)', target='classification') policy.insert('q(3)', target='data') # TODO(ekcs): test that no publish triggered (because no subscribers) policy2.create_policy('policy') policy2.subscribe('policy', 'classification:p') helper.retry_check_function_return_value( lambda: 'classification:p' in policy._published_tables_with_subscriber, True) self.assertEqual(list(policy.policySubData.keys()), [('p', 'classification', None)]) helper.retry_check_db_equal( policy2, 'policy:classification:p(x)', 'policy:classification:p(3)') policy.insert('q(4)', target='data') helper.retry_check_db_equal( policy2, 'policy:classification:p(x)', ('policy:classification:p(3)' ' policy:classification:p(4)')) # test that no change to p means no publish triggered policy.insert('q(2)', target='data') # TODO(ekcs): test no publish triggered policy.delete('q(4)', target='data') helper.retry_check_db_equal( policy2, 'policy:classification:p(x)', 'policy:classification:p(3)') policy2.unsubscribe('policy', 'classification:p') # trigger removed helper.retry_check_function_return_value( lambda: len(policy._published_tables_with_subscriber) == 0, True) self.assertEqual(list(policy.policySubData.keys()), []) policy.insert('q(4)', target='data')
def setup_config(with_fake_datasource=True, node_id='testnode', same_partition_as_node=None, api=True, policy=True, datasources=True, with_fake_json_ingester=False): """Setup DseNode for testing. :param: services is an array of DataServices :param: api is a dictionary mapping api name to API model instance """ config.set_config_defaults() # Load the fake driver. cfg.CONF.set_override( 'drivers', ['congress.tests.fake_datasource.FakeDataSource']) if same_partition_as_node is None: node = helper.make_dsenode_new_partition(node_id) else: node = helper.make_dsenode_same_partition( same_partition_as_node, node_id) if datasources: cfg.CONF.set_override('datasources', True) with mock.patch.object(periodics, 'PeriodicWorker', autospec=True): services = harness.create2( existing_node=node, policy_engine=policy, api=api, datasources=datasources) data = None if with_fake_datasource: data = fake_datasource.FakeDataSource('data') # FIXME(ekcs): this is a hack to prevent the synchronizer from # attempting to delete this DSD because it's not in DB data.type = 'no_sync_datasource_driver' node.register_service(data) ingester = None if with_fake_json_ingester: ingester = fake_datasource.FakeJsonIngester() node.register_service(ingester) engine_service = None library_service = None api_service = None if policy: engine_service = services[api_base.ENGINE_SERVICE_ID] library_service = services[api_base.LIBRARY_SERVICE_ID] if api: api_service = services['api'] if datasources: ds_manager = services['ds_manager'] return {'node': node, 'engine': engine_service, 'library': library_service, 'data': data, 'api': api_service, 'ds_manager': ds_manager, 'json_ingester': ingester}
def test_subscribe_snapshot(self): node = helper.make_dsenode_new_partition('testnode') test1 = fake_datasource.FakeDataSource('test1') test2 = fake_datasource.FakeDataSource('test2') node.register_service(test1) node.register_service(test2) test1.subscribe('test2', 'fake_table') helper.retry_check_function_return_value( lambda: hasattr(test1, 'last_msg'), True) self.assertEqual(test1.last_msg['data'], test2.state['fake_table'])
def test_subscribe_snapshot(self): node = helper.make_dsenode_new_partition('testnode') test1 = fake_datasource.FakeDataSource('test1') test2 = fake_datasource.FakeDataSource('test2') node.register_service(test1) node.register_service(test2) test1.subscribe('test2', 'fake_table') helper.retry_check_function_return_value( lambda: hasattr(test1, 'last_msg'), True) self.assertEqual(test1.last_msg['data'], test2.state['fake_table']) node.stop()
def test_unregister(self): node = helper.make_dsenode_new_partition('testnode') test1 = fake_datasource.FakeDataSource('test1') node.register_service(test1) obj = node.invoke_service_rpc( 'test1', 'get_status', {'source_id': None, 'params': None}) self.assertIsNotNone(obj) node.unregister_service('test1') helper.retry_til_exception( congressException.NotFound, lambda: node.invoke_service_rpc( 'test1', 'get_status', {'source_id': None, 'params': None}))
def test_datasource_poll(self): node = helper.make_dsenode_new_partition('testnode') pub = FakeDataSource('pub') sub = FakeDataSource('sub') node.register_service(pub) node.register_service(sub) sub.subscribe('pub', 'fake_table') pub.state = {'fake_table': set([(1, 2)])} pub.poll() helper.retry_check_function_return_value( lambda: sub.last_msg['data'], set(pub.state['fake_table'])) self.assertFalse(hasattr(pub, "last_msg"))
def test_intranode_pubsub(self): node = helper.make_dsenode_new_partition('testnode') test1 = fake_datasource.FakeDataSource('test1') test2 = fake_datasource.FakeDataSource('test2') node.register_service(test1) node.register_service(test2) test1.subscribe('test2', 'p') helper.retry_check_function_return_value( lambda: hasattr(test1, 'last_msg'), True) test2.publish('p', 42) helper.retry_check_function_return_value( lambda: test1.last_msg['data'], 42) self.assertFalse(hasattr(test2, "last_msg"))
def setUp(self): super(TestDataSourceManager, self).setUp() cfg.CONF.set_override( 'drivers', ['congress.tests.fake_datasource.FakeDataSource']) self.datasource_mgr = datasource_manager.DataSourceManager self.datasource_mgr.validate_configured_drivers() node = helper.make_dsenode_new_partition('testnode') self.dseNode = node engine = Dse2Runtime('engine') node.register_service(engine) self.datasource_mgr.set_dseNode(node)
def test_policy(self): node = helper.make_dsenode_new_partition('testnode') data = FakeDataSource('data') engine = Dse2Runtime('engine') node.register_service(data) node.register_service(engine) engine.create_policy('alpha') engine.create_policy('data') self.insert_rule(engine, 'p(x) :- data:fake_table(x)', 'alpha') data.state = {'fake_table': set([(1,), (2,)])} data.poll() helper.retry_check_db_equal( engine, 'p(x)', 'p(1) p(2)', target='alpha') self.assertFalse(hasattr(engine, "last_msg"))
def test_datasource_poll(self): node = helper.make_dsenode_new_partition('testnode') node.always_snapshot = True # Note(ekcs): this test expects snapshot pub = fake_datasource.FakeDataSource('pub') sub = fake_datasource.FakeDataSource('sub') node.register_service(pub) node.register_service(sub) sub.subscribe('pub', 'fake_table') pub.state = {'fake_table': set([(1, 2)])} pub.poll() helper.retry_check_function_return_value(lambda: sub.last_msg['data'], set(pub.state['fake_table'])) self.assertFalse(hasattr(pub, "last_msg")) node.stop()
def test_intranode_pubsub(self): node = helper.make_dsenode_new_partition('testnode') test1 = fake_datasource.FakeDataSource('test1') test2 = fake_datasource.FakeDataSource('test2') node.register_service(test1) node.register_service(test2) test1.subscribe('test2', 'p') helper.retry_check_function_return_value( lambda: hasattr(test1, 'last_msg'), True) test2.publish('p', 42) helper.retry_check_function_return_value( lambda: test1.last_msg['data'], 42) self.assertFalse(hasattr(test2, "last_msg")) node.stop()
def test_sub_before_service_exists(self): node = helper.make_dsenode_new_partition('testnode') test1 = fake_datasource.FakeDataSource('test1') node.register_service(test1) test1.subscribe('test2', 'p') self.assertFalse(hasattr(test1, "last_msg")) test2 = fake_datasource.FakeDataSource('test2') node.register_service(test2) test2.publish('p', [42], use_snapshot=True) helper.retry_check_function_return_value( lambda: test1.last_msg['data'], [42]) self.assertFalse(hasattr(test2, "last_msg")) node.stop() node.wait()
def test_sub_before_service_exists(self): node = helper.make_dsenode_new_partition('testnode') test1 = fake_datasource.FakeDataSource('test1') node.register_service(test1) test1.subscribe('test2', 'p') self.assertFalse(hasattr(test1, "last_msg")) test2 = fake_datasource.FakeDataSource('test2') node.register_service(test2) test2.publish('p', 42, use_snapshot=True) helper.retry_check_function_return_value( lambda: test1.last_msg['data'], 42) self.assertFalse(hasattr(test2, "last_msg")) node.stop() node.wait()
def setUp(self): super(TestDsePerformance, self).setUp() cfg.CONF.set_override( 'drivers', [('congress.tests.datasources.performance_datasource_driver' '.PerformanceTestDriver')]) self.cage = helper.make_dsenode_new_partition("perf") harness.create2(existing_node=self.cage) self.api = {'policy': self.cage.service_object('api-policy'), 'rule': self.cage.service_object('api-rule'), 'table': self.cage.service_object('api-table'), 'row': self.cage.service_object('api-row'), 'datasource': self.cage.service_object('api-datasource'), 'status': self.cage.service_object('api-status'), 'schema': self.cage.service_object('api-schema')} self.engine = self.cage.service_object(api_base.ENGINE_SERVICE_ID)
def test_datasource_pub(self, nova_mock): node = helper.make_dsenode_new_partition('testnode') nova = nova_driver.NovaDriver( name='nova', args=helper.datasource_openstack_args()) test = fake_datasource.FakeDataSource('test') node.register_service(nova) node.register_service(test) test.subscribe('nova', 'p') helper.retry_check_function_return_value( lambda: hasattr(test, 'last_msg'), True) nova.publish('p', [42], use_snapshot=True) helper.retry_check_function_return_value( lambda: test.last_msg['data'], [42]) self.assertFalse(hasattr(nova, "last_msg")) node.stop()
def test_datasource_pub(self, nova_mock): node = helper.make_dsenode_new_partition('testnode') nova = nova_driver.NovaDriver(name='nova', args=helper.datasource_openstack_args()) test = fake_datasource.FakeDataSource('test') node.register_service(nova) node.register_service(test) test.subscribe('nova', 'p') helper.retry_check_function_return_value( lambda: hasattr(test, 'last_msg'), True) nova.publish('p', 42, use_snapshot=True) helper.retry_check_function_return_value(lambda: test.last_msg['data'], 42) self.assertFalse(hasattr(nova, "last_msg")) node.stop()
def test_intranode_pubsub2(self): # same as test_intranode_pubsub but with opposite ordering. # (Ordering does matter with internode_pubsub). node = helper.make_dsenode_new_partition('testnode') test1 = fake_datasource.FakeDataSource('test1') test2 = fake_datasource.FakeDataSource('test2') node.register_service(test1) node.register_service(test2) test2.subscribe('test1', 'p') helper.retry_check_function_return_value( lambda: hasattr(test2, 'last_msg'), True) test1.publish('p', 42) helper.retry_check_function_return_value( lambda: test2.last_msg['data'], 42) self.assertFalse(hasattr(test1, "last_msg"))
def test_internode_partial_unsub(self): node1 = helper.make_dsenode_new_partition('testnode1') node2 = helper.make_dsenode_same_partition(node1, 'testnode2') test1 = fake_datasource.FakeDataSource('test1') test2 = fake_datasource.FakeDataSource('test2') node1.register_service(test1) node2.register_service(test2) test1.subscribe('test2', 'p') test1.subscribe('test2', 'q') test1.unsubscribe('test2', 'q') # unsub from q should not affect p helper.retry_check_function_return_value( lambda: hasattr(test1, 'last_msg'), True) test2.publish('p', 42) helper.retry_check_function_return_value( lambda: test1.last_msg['data'], 42) self.assertFalse(hasattr(test2, "last_msg"))
def test_intranode_pubsub2(self): # same as test_intranode_pubsub but with opposite ordering. # (Ordering does matter with internode_pubsub). node = helper.make_dsenode_new_partition('testnode') test1 = fake_datasource.FakeDataSource('test1') test2 = fake_datasource.FakeDataSource('test2') node.register_service(test1) node.register_service(test2) test2.subscribe('test1', 'p') helper.retry_check_function_return_value( lambda: hasattr(test2, 'last_msg'), True) test1.publish('p', 42, use_snapshot=True) helper.retry_check_function_return_value( lambda: test2.last_msg['data'], 42) self.assertFalse(hasattr(test1, "last_msg")) node.stop()
def test_intranode_partial_unsub(self): node = helper.make_dsenode_new_partition('testnode') test1 = fake_datasource.FakeDataSource('test1') test2 = fake_datasource.FakeDataSource('test2') node.register_service(test1) node.register_service(test2) test1.subscribe('test2', 'p') test1.subscribe('test2', 'q') test1.unsubscribe('test2', 'q') # unsub from q should not affect p helper.retry_check_function_return_value( lambda: hasattr(test1, 'last_msg'), True) test2.publish('p', [42], use_snapshot=True) helper.retry_check_function_return_value( lambda: test1.last_msg['data'], [42]) self.assertFalse(hasattr(test2, "last_msg")) node.stop()
def test_unregister(self): node = helper.make_dsenode_new_partition('testnode') test1 = fake_datasource.FakeDataSource('test1') node.register_service(test1) obj = node.invoke_service_rpc('test1', 'get_status', { 'source_id': None, 'params': None }) self.assertIsNotNone(obj) node.unregister_service('test1') helper.retry_til_exception( congressException.NotFound, lambda: node.invoke_service_rpc('test1', 'get_status', { 'source_id': None, 'params': None })) node.stop()
def test_internode_pubsub(self): node1 = helper.make_dsenode_new_partition('testnode1') test1 = fake_datasource.FakeDataSource('test1') node1.register_service(test1) node2 = helper.make_dsenode_same_partition(node1, 'testnode2') test2 = fake_datasource.FakeDataSource('test2') node2.register_service(test2) test1.subscribe('test2', 'p') helper.retry_check_function_return_value( lambda: hasattr(test1, 'last_msg'), True) test2.publish('p', [42], use_snapshot=True) helper.retry_check_function_return_value( lambda: test1.last_msg['data'], [42]) self.assertFalse(hasattr(test2, "last_msg")) node1.stop() node2.stop()
def test_internode_pubsub(self): node1 = helper.make_dsenode_new_partition('testnode1') test1 = fake_datasource.FakeDataSource('test1') node1.register_service(test1) node2 = helper.make_dsenode_same_partition(node1, 'testnode2') test2 = fake_datasource.FakeDataSource('test2') node2.register_service(test2) test1.subscribe('test2', 'p') helper.retry_check_function_return_value( lambda: hasattr(test1, 'last_msg'), True) test2.publish('p', [42], use_snapshot=True) helper.retry_check_function_return_value( lambda: test1.last_msg['data'], [42]) self.assertFalse(hasattr(test2, "last_msg")) node1.stop() node2.stop()
def test_policy_data(self): """Test policy correctly processes initial data snapshot.""" node = helper.make_dsenode_new_partition('testnode') node.always_snapshot = False data = fake_datasource.FakeDataSource('data') engine = agnostic.DseRuntime(api_base.ENGINE_SERVICE_ID) node.register_service(data) node.register_service(engine) engine.create_policy('policy1') engine.create_policy('data') self.insert_rule(engine, 'p(x) :- data:fake_table(x)', 'policy1') data.state = {'fake_table': set([(1,), (2,)])} data.poll() helper.retry_check_db_equal( engine, 'p(x)', 'p(1) p(2)', target='policy1') self.assertFalse(hasattr(engine, "last_msg"))
def setUp(self): super(TestDsePerformance, self).setUp() cfg.CONF.set_override( 'drivers', [('congress.tests.datasources.performance_datasource_driver' '.PerformanceTestDriver')]) self.cage = helper.make_dsenode_new_partition("perf") harness.create2(existing_node=self.cage) self.api = { 'policy': self.cage.service_object('api-policy'), 'rule': self.cage.service_object('api-rule'), 'table': self.cage.service_object('api-table'), 'row': self.cage.service_object('api-row'), 'datasource': self.cage.service_object('api-datasource'), 'status': self.cage.service_object('api-status'), 'schema': self.cage.service_object('api-schema') } self.engine = self.cage.service_object(api_base.ENGINE_SERVICE_ID)
def test_datasource_pub(self): node = helper.make_dsenode_new_partition('testnode') nova_client = mock.MagicMock() with mock.patch.object(novaclient.client.Client, '__init__', return_value=nova_client): nova = nova_driver.NovaDriver( name='nova', args=helper.datasource_openstack_args()) test = fake_datasource.FakeDataSource('test') node.register_service(nova) node.register_service(test) test.subscribe('nova', 'p') helper.retry_check_function_return_value( lambda: hasattr(test, 'last_msg'), True) nova.publish('p', 42) helper.retry_check_function_return_value( lambda: test.last_msg['data'], 42) self.assertFalse(hasattr(nova, "last_msg"))
def test_multiservice_pubsub(self): node1 = helper.make_dsenode_new_partition('testnode1') test1 = FakeDataSource('test1') test2 = FakeDataSource('test2') node1.register_service(test1) node1.register_service(test2) node2 = helper.make_dsenode_same_partition(node1, 'testnode2') test3 = FakeDataSource('test3') node2.register_service(test3) test1.subscribe('test3', 'p') helper.retry_check_function_return_value( lambda: hasattr(test1, 'last_msg'), True) test3.publish('p', 42) helper.retry_check_function_return_value( lambda: test1.last_msg['data'], 42) self.assertFalse(hasattr(test2, "last_msg")) self.assertFalse(hasattr(test3, "last_msg"))
def test_start_stop(self): # create node and register services node = helper.make_dsenode_new_partition('test_node', self.messaging_config, []) services = [] for i in range(2): service = DataService('test-service-%s' % i) node.register_service(service) services.append(service) for s in node.get_services(True): self.assertTrue(s._running, "Service '%s' started" % str(s)) self.assertEqual(set(services), set(node.get_services()), "All services accounted for on node.") self.assertTrue(node._rpcserver._started, "RPC server is started") self.assertTrue(node._control_bus._running, "Control Bus is started") # stop node node.stop() node.wait() self.assertFalse(node._running, "Node is stopped after node start") for idx, s in enumerate(node.get_services(True)): self.assertFalse(s._running, "Service '%s' stopped after node stop" % str(s)) # TODO(pballand): fix bug # self.assertFalse(node._rpcserver._started, # "RPC server is stopped after node stop") self.assertFalse(node._control_bus._running, "Control Bus is stopped after node stop") # restart node node.start() for s in node.get_services(True): self.assertTrue(s._running, "Service '%s' started" % str(s)) self.assertEqual(set(services), set(node.get_services()), "All services accounted for on node.") self.assertTrue(node._rpcserver._started, "RPC server is started") self.assertTrue(node._control_bus._running, "Control Bus is started")
def test_start_stop(self): # create node and register services node = helper.make_dsenode_new_partition('test_node', self.messaging_config, []) services = [] for i in range(2): service = data_service.DataService('test-service-%s' % i) node.register_service(service) services.append(service) for s in node.get_services(True): self.assertTrue(s._running, "Service '%s' started" % str(s)) self.assertEqual(set(services), set(node.get_services()), "All services accounted for on node.") self.assertTrue(node._rpc_server._started, "RPC server is started") self.assertTrue(node._control_bus._running, "Control Bus is started") # stop node node.stop() node.wait() self.assertFalse(node._running, "Node is stopped after node start") for idx, s in enumerate(node.get_services(True)): self.assertFalse(s._running, "Service '%s' stopped after node stop" % str(s)) # TODO(pballand): fix bug # self.assertFalse(node._rpc_server._started, # "RPC server is stopped after node stop") self.assertFalse(node._control_bus._running, "Control Bus is stopped after node stop") # restart node node.start() for s in node.get_services(True): self.assertTrue(s._running, "Service '%s' started" % str(s)) self.assertEqual(set(services), set(node.get_services()), "All services accounted for on node.") self.assertTrue(node._rpc_server._started, "RPC server is started") self.assertTrue(node._control_bus._running, "Control Bus is started")
def test_datasource_poll(self): node = helper.make_dsenode_new_partition('testnode') pub = fake_datasource.FakeDataSource('pub') sub = fake_datasource.FakeDataSource('sub') node.register_service(pub) node.register_service(sub) sub.subscribe('pub', 'fake_table') pub.state = {'fake_table': set([(1, 2)])} pub.poll() helper.retry_check_function_return_value( lambda: sub.last_msg, { 'publisher': 'pub', 'data': (set(pub.state['fake_table']), set([])), 'table': 'fake_table' }) self.assertFalse(hasattr(pub, "last_msg")) node.stop()
def test_policy_data_late_sub(self): """Test policy correctly processes data on late subscribe.""" node = helper.make_dsenode_new_partition('testnode') data = fake_datasource.FakeDataSource('data') engine = agnostic.DseRuntime(api_base.ENGINE_SERVICE_ID) node.register_service(data) node.register_service(engine) engine.create_policy('policy1') engine.create_policy('data', kind=datalog_base.DATASOURCE_POLICY_TYPE) data.state = {'fake_table': set([(1,), (2,)])} data.poll() self.insert_rule(engine, 'p(x) :- data:fake_table(x)', 'policy1') helper.retry_check_db_equal( engine, 'p(x)', 'p(1) p(2)', target='policy1') data.state = {'fake_table': set([(1,), (2,), (3,)])} data.poll() helper.retry_check_db_equal( engine, 'p(x)', 'p(1) p(2) p(3)', target='policy1') self.assertFalse(hasattr(engine, "last_msg")) node.stop()
def test_datasource_unsub(self, nova_mock): node = helper.make_dsenode_new_partition('testnode') nova = nova_driver.NovaDriver( name='nova', args=helper.datasource_openstack_args()) test = fake_datasource.FakeDataSource('test') node.register_service(nova) node.register_service(test) nova.subscribe('test', 'p') helper.retry_check_function_return_value( lambda: hasattr(nova, 'last_msg'), True) test.publish('p', [42], use_snapshot=True) helper.retry_check_function_return_value( lambda: nova.last_msg['data'], [42]) self.assertFalse(hasattr(test, "last_msg")) nova.unsubscribe('test', 'p') test.publish('p', [43], use_snapshot=True) # hard to test that the message is never delivered time.sleep(0.2) self.assertEqual(nova.last_msg['data'], [42]) node.stop()
def test_policy_data_late_sub(self): """Test policy correctly processes data on late subscribe.""" node = helper.make_dsenode_new_partition('testnode') node.always_snapshot = False data = FakeDataSource('data') engine = Dse2Runtime('engine') node.register_service(data) node.register_service(engine) engine.create_policy('policy1') engine.create_policy('data') data.state = {'fake_table': set([(1,), (2,)])} data.poll() self.insert_rule(engine, 'p(x) :- data:fake_table(x)', 'policy1') helper.retry_check_db_equal( engine, 'p(x)', 'p(1) p(2)', target='policy1') data.state = {'fake_table': set([(1,), (2,), (3,)])} data.poll() helper.retry_check_db_equal( engine, 'p(x)', 'p(1) p(2) p(3)', target='policy1') self.assertFalse(hasattr(engine, "last_msg"))
def test_datasource_unsub(self, nova_mock): node = helper.make_dsenode_new_partition('testnode') nova = nova_driver.NovaDriver(name='nova', args=helper.datasource_openstack_args()) test = fake_datasource.FakeDataSource('test') node.register_service(nova) node.register_service(test) nova.subscribe('test', 'p') helper.retry_check_function_return_value( lambda: hasattr(nova, 'last_msg'), True) test.publish('p', 42, use_snapshot=True) helper.retry_check_function_return_value(lambda: nova.last_msg['data'], 42) self.assertFalse(hasattr(test, "last_msg")) nova.unsubscribe('test', 'p') test.publish('p', 43, use_snapshot=True) # hard to test that the message is never delivered time.sleep(0.2) self.assertEqual(nova.last_msg['data'], 42) node.stop()
def setUp(self): # create DSE and add vm-placement engine and fake datasource super(TestSetPolicy, self).setUp() self.cage = helper.make_dsenode_new_partition("perf") kwds = {} kwds['name'] = 'fake' kwds['args'] = helper.datasource_openstack_args() self.fake = self.cage.create_service( "congress.tests.fake_datasource.FakeDataSource", kwds) self.fake.poll_time = 0 self.cage.register_service(self.fake) kwds = {} kwds['name'] = 'vmplace' kwds['args'] = helper.datasource_openstack_args() self.vmplace = self.cage.create_service( "congress.policy_engines.vm_placement.ComputePlacementEngine", kwds) self.vmplace.debug_mode() self.vmplace.poll_time = 0 self.cage.register_service(self.vmplace)
def test_unregister_service(self): node = helper.make_dsenode_new_partition('test_node', self.messaging_config, []) test1 = _PingRpcService('test1', 'test1') uuid1 = '1c5d6da0-64ae-11e6-8852-000c29242e6f' test1.ds_id = uuid1 test2 = _PingRpcService('test2', 'test2') uuid2 = 'd36d3781-e9e4-4278-bbf4-9f5fef7c5101' test2.ds_id = uuid2 node.register_service(test1) node.register_service(test2) actual = set(node.get_global_service_names()) self.assertEqual(actual, set(['test1', 'test2'])) # unregister by service_id node.unregister_service(service_id='test1') actual = set(node.get_global_service_names()) self.assertEqual(actual, set(['test2'])) # unregister by uuid node.unregister_service(uuid_=uuid2) actual = set(node.get_global_service_names()) self.assertEqual(actual, set())
def test_unregister_service(self): node = helper.make_dsenode_new_partition('test_node', self.messaging_config, []) test1 = _PingRpcService('test1', 'test1') uuid1 = '1c5d6da0-64ae-11e6-8852-000c29242e6f' test1.ds_id = uuid1 test2 = _PingRpcService('test2', 'test2') uuid2 = 'd36d3781-e9e4-4278-bbf4-9f5fef7c5101' test2.ds_id = uuid2 node.register_service(test1) node.register_service(test2) actual = set(node.get_global_service_names()) self.assertEqual(actual, set(['test1', 'test2'])) # unregister by service_id node.unregister_service(service_id='test1') actual = set(node.get_global_service_names()) self.assertEqual(actual, set(['test2'])) # unregister by uuid node.unregister_service(uuid_=uuid2) actual = set(node.get_global_service_names()) self.assertEqual(actual, set())
def test_datasource_unsub(self): node = helper.make_dsenode_new_partition('testnode') nova_client = mock.MagicMock() with mock.patch.object(novaclient.client.Client, '__init__', return_value=nova_client): nova = NovaDriver( name='nova', args=helper.datasource_openstack_args()) test = FakeDataSource('test') node.register_service(nova) node.register_service(test) nova.subscribe('test', 'p') helper.retry_check_function_return_value( lambda: hasattr(nova, 'last_msg'), True) test.publish('p', 42) helper.retry_check_function_return_value( lambda: nova.last_msg['data'], 42) self.assertFalse(hasattr(test, "last_msg")) nova.unsubscribe('test', 'p') test.publish('p', 43) # hard to test that the message is never delivered time.sleep(0.2) self.assertEqual(nova.last_msg['data'], 42)
def setUp(self): super(TestDataSource, self).setUp() cfg.CONF.set_override( 'drivers', ['congress.tests.fake_datasource.FakeDataSource']) self.dseNode = helper.make_dsenode_new_partition('testnode')
def setUp(self): """Setup polling tests.""" super(TestDataSourceDriver, self).setUp() cfg.CONF.set_override( 'drivers', ['congress.datasources.neutron_driver.NeutronDriver']) # Create mock of Neutron client so we can control data mock_factory = mox.Mox() neutron_client = mock_factory.CreateMock( neutronclient.v2_0.client.Client) neutron_client.list_networks().InAnyOrder(1).AndReturn(network1) neutron_client.list_ports().InAnyOrder(1).AndReturn(port_response) neutron_client.list_routers().InAnyOrder(1).AndReturn(router_response) neutron_client.list_security_groups().InAnyOrder(1).AndReturn( security_group_response) neutron_client.list_networks().InAnyOrder(2).AndReturn(network2) neutron_client.list_ports().InAnyOrder(2).AndReturn(port_response) neutron_client.list_routers().InAnyOrder(2).AndReturn(router_response) neutron_client.list_security_groups().InAnyOrder(2).AndReturn( security_group_response) mock_factory.ReplayAll() node = helper.make_dsenode_new_partition('neutron_ds_node') engine = harness.create_policy_engine() node.register_service(engine) neutron_args = { 'name': 'neutron', 'driver': 'neutron', 'description': None, 'type': None, 'enabled': '1' } neutron_args['config'] = helper.datasource_openstack_args() neutron_args['config']['poll_time'] = 0 neutron_args['config']['client'] = neutron_client neutron_ds = node.create_datasource_service(neutron_args) node.register_service(neutron_ds) engine.create_policy('neutron') engine.set_schema('neutron', neutron_ds.get_schema()) neutron_ds.neutron = neutron_client engine.debug_mode() # insert rule into policy to make testing easier. # (Some of the IDs are auto-generated each time we convert) engine.insert(create_network_group('p')) # create some garbage data args = helper.datasource_openstack_args() driver = neutron_driver.NeutronDriver(args=args) network_key_to_index = driver.get_column_map( neutron_driver.NeutronDriver.NETWORKS) network_max_index = max(network_key_to_index.values()) args1 = ['1'] * (network_max_index + 1) args2 = ['2'] * (network_max_index + 1) args1 = ",".join(args1) args2 = ",".join(args2) fake_networks = [ 'neutron:networks({})'.format(args1), 'neutron:networks({})'.format(args2)] # answer to query above for network1 datalog1 = ( 'p("240ff9df-df35-43ae-9df5-27fae87f2492") ' 'p("340ff9df-df35-43ae-9df5-27fae87f2492") ' 'p("440ff9df-df35-43ae-9df5-27fae87f2492")') # answer to query above for network2 datalog2 = ( 'p("240ff9df-df35-43ae-9df5-27fae87f2492") ' 'p("640ff9df-df35-43ae-9df5-27fae87f2492") ' 'p("540ff9df-df35-43ae-9df5-27fae87f2492")') # return value self.info = {} self.info['node'] = node self.info['datalog1'] = datalog1 self.info['datalog2'] = datalog2 self.info['fake_networks'] = fake_networks
def test_replicated_pe_exec(self): """Test correct local leader behavior with 2 PEs requesting exec""" node1 = helper.make_dsenode_new_partition('testnode1') node2 = helper.make_dsenode_same_partition(node1, 'testnode2') dsd = fake_datasource.FakeDataSource('dsd') # faster time-out for testing dsd.LEADER_TIMEOUT = 2 pe1 = agnostic.DseRuntime('pe1') pe2 = agnostic.DseRuntime('pe2') node1.register_service(pe1) node2.register_service(pe2) node1.register_service(dsd) assert dsd._running assert node1._running assert node2._running assert node1._control_bus._running # first exec request obeyed and leader set pe2.rpc( 'dsd', 'request_execute', { 'action': 'fake_act', 'action_args': { 'name': 'testnode2' }, 'wait': True }) helper.retry_check_function_return_value(lambda: len(dsd.exec_history), 1) self.assertEqual(dsd._leader_node_id, 'testnode2') # second exec request from leader obeyed and leader remains pe2.rpc( 'dsd', 'request_execute', { 'action': 'fake_act', 'action_args': { 'name': 'testnode2' }, 'wait': True }) helper.retry_check_function_return_value(lambda: len(dsd.exec_history), 2) self.assertEqual(dsd._leader_node_id, 'testnode2') # exec request from non-leader not obeyed pe1.rpc( 'dsd', 'request_execute', { 'action': 'fake_act', 'action_args': { 'name': 'testnode1' }, 'wait': True }) self.assertRaises(tenacity.RetryError, helper.retry_check_function_return_value, lambda: len(dsd.exec_history), 3) # leader vacated after heartbeat stops node2.stop() node2.wait() helper.retry_check_function_return_value(lambda: dsd._leader_node_id, None) # next exec request obeyed and new leader set pe1.rpc( 'dsd', 'request_execute', { 'action': 'fake_act', 'action_args': { 'name': 'testnode1' }, 'wait': True }) helper.retry_check_function_return_value(lambda: len(dsd.exec_history), 3) self.assertEqual(dsd._leader_node_id, 'testnode1') node1.stop() node2.stop()