def test_static_resources_partitioning(self): p_coord = self.mgr.partition_coordinator static_resources = ['static_1', 'static_2'] static_resources2 = ['static_3', 'static_4'] self.pipeline_cfg['sources'][0]['resources'] = static_resources self.pipeline_cfg['sources'].append({ 'name': 'test_pipeline2', 'interval': 60, 'meters': ['test', 'test2'], 'resources': static_resources2, 'sinks': ['test_sink'] }) # have one pipeline without static resources defined self.pipeline_cfg['sources'].append({ 'name': 'test_pipeline3', 'interval': 60, 'meters': ['test', 'test2'], 'resources': [], 'sinks': ['test_sink'] }) self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(polling_tasks.get(60)) # Only two groups need to be created, one for each pipeline, # even though counter test is used twice expected = [ mock.call( self.mgr.construct_group_id(utils.hash_of_set(resources)), resources) for resources in [static_resources, static_resources2] ] self.assertEqual(len(expected), len(p_coord.extract_my_subset.call_args_list)) for c in expected: self.assertIn(c, p_coord.extract_my_subset.call_args_list)
def test_static_resources_partitioning(self): p_coord = self.mgr.partition_coordinator static_resources = ['static_1', 'static_2'] static_resources2 = ['static_3', 'static_4'] self.pipeline_cfg['sources'][0]['resources'] = static_resources self.pipeline_cfg['sources'].append({ 'name': 'test_pipeline2', 'interval': 60, 'meters': ['test', 'test2'], 'resources': static_resources2, 'sinks': ['test_sink'] }) # have one pipeline without static resources defined self.pipeline_cfg['sources'].append({ 'name': 'test_pipeline3', 'interval': 60, 'meters': ['test', 'test2'], 'resources': [], 'sinks': ['test_sink'] }) self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(polling_tasks.get(60)) # Only two groups need to be created, one for each pipeline, # even though counter test is used twice expected = [mock.call(self.mgr.construct_group_id( utils.hash_of_set(resources)), resources) for resources in [static_resources, static_resources2]] self.assertEqual(len(expected), len(p_coord.extract_my_subset.call_args_list)) for c in expected: self.assertIn(c, p_coord.extract_my_subset.call_args_list)
def get(self, discovery_cache=None): source_discovery = self.agent_manager.discover(self._discovery, discovery_cache) if self._discovery else [] static_resources = [] if self._resources: static_resources_group = self.agent_manager.construct_group_id(utils.hash_of_set(self._resources)) p_coord = self.agent_manager.partition_coordinator static_resources = p_coord.extract_my_subset(static_resources_group, self._resources) return static_resources + source_discovery
def test_join_partitioning_groups(self): self.mgr.discoveries = self.create_discoveries() self.mgr.join_partitioning_groups() p_coord = self.mgr.partition_coordinator static_group_ids = [utils.hash_of_set(p["resources"]) for p in self.pipeline_cfg["sources"] if p["resources"]] expected = [mock.call(self.mgr.construct_group_id(g)) for g in ["another_group", "global"] + static_group_ids] self.assertEqual(len(expected), len(p_coord.join_group.call_args_list)) for c in expected: self.assertIn(c, p_coord.join_group.call_args_list)
def get(self, discovery_cache=None): source_discovery = (self.agent_manager.discover( self._discovery, discovery_cache) if self._discovery else []) static_resources = [] if self._resources: static_resources_group = self.agent_manager.construct_group_id( utils.hash_of_set(self._resources)) p_coord = self.agent_manager.partition_coordinator static_resources = p_coord.extract_my_subset( static_resources_group, self._resources) return static_resources + source_discovery
def join_partitioning_groups(self): self.groups = set([self.construct_group_id(d.obj.group_id) for d in self.discovery_manager]) # let each set of statically-defined resources have its own group static_resource_groups = set([ self.construct_group_id(utils.hash_of_set(p.resources)) for p in self.polling_manager.sources if p.resources ]) self.groups.update(static_resource_groups) for group in self.groups: self.partition_coordinator.join_group(group)
def test_join_partitioning_groups(self): self.mgr.discovery_manager = self.create_discovery_manager() self.mgr.join_partitioning_groups() p_coord = self.mgr.partition_coordinator static_group_ids = [utils.hash_of_set(p['resources']) for p in self.pipeline_cfg if p['resources']] expected = [mock.call(self.mgr.construct_group_id(g)) for g in ['another_group', 'global'] + static_group_ids] self.assertEqual(len(expected), len(p_coord.join_group.call_args_list)) for c in expected: self.assertIn(c, p_coord.join_group.call_args_list)
def join_partitioning_groups(self): groups = set([self.construct_group_id(d.obj.group_id) for d in self.discovery_manager]) # let each set of statically-defined resources have its own group static_resource_groups = set([ self.construct_group_id(utils.hash_of_set(p.resources)) for p in self.pipeline_manager.pipelines if p.resources ]) groups.update(static_resource_groups) for group in groups: self.partition_coordinator.join_group(group)
def test_hash_of_set(self): x = ['a', 'b'] y = ['a', 'b', 'a'] z = ['a', 'c'] self.assertEqual(utils.hash_of_set(x), utils.hash_of_set(y)) self.assertNotEqual(utils.hash_of_set(x), utils.hash_of_set(z)) self.assertNotEqual(utils.hash_of_set(y), utils.hash_of_set(z))
def get(self, discovery_cache=None): source_discovery = (self.agent_manager.discover( self._discovery, discovery_cache) if self._discovery else []) if self._resources: static_resources_group = self.agent_manager.construct_group_id( utils.hash_of_set(self._resources)) return list( filter( self.agent_manager.hashrings[static_resources_group]. belongs_to_self, self._resources)) + source_discovery return source_discovery
def get(self, discovery_cache=None): source_discovery = (self.agent_manager.discover( self._discovery, discovery_cache) if self._discovery else []) if self._resources: static_resources_group = self.agent_manager.construct_group_id( utils.hash_of_set(self._resources)) return [ v for v in self._resources if self.agent_manager.hashrings[static_resources_group]. belongs_to_self(six.text_type(v)) ] + source_discovery return source_discovery
def join_partitioning_groups(self): self.groups = set([self.construct_group_id(d.obj.group_id) for d in self.discoveries]) # let each set of statically-defined resources have its own group static_resource_groups = set([ self.construct_group_id(utils.hash_of_set(p.resources)) for p in self.polling_manager.sources if p.resources ]) self.groups.update(static_resource_groups) if not self.groups and self.partition_coordinator.is_active(): self.partition_coordinator.stop() self.heartbeat_timer.stop() if self.groups and not self.partition_coordinator.is_active(): self.partition_coordinator.start() utils.spawn_thread(self.heartbeat_timer.start) for group in self.groups: self.partition_coordinator.join_group(group)
def test_static_resources_partitioning(self): p_coord = self.mgr.partition_coordinator static_resources = ["static_1", "static_2"] static_resources2 = ["static_3", "static_4"] self.pipeline_cfg["sources"][0]["resources"] = static_resources self.pipeline_cfg["sources"].append( { "name": "test_pipeline2", "interval": 60, "meters": ["test", "test2"], "resources": static_resources2, "sinks": ["test_sink"], } ) # have one pipeline without static resources defined self.pipeline_cfg["sources"].append( { "name": "test_pipeline3", "interval": 60, "meters": ["test", "test2"], "resources": [], "sinks": ["test_sink"], } ) self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(polling_tasks.get(60)) # Only two groups need to be created, one for each pipeline, # even though counter test is used twice expected = [ mock.call(self.mgr.construct_group_id(utils.hash_of_set(resources)), resources) for resources in [static_resources, static_resources2] ] self.assertEqual(len(expected), len(p_coord.extract_my_subset.call_args_list)) for c in expected: self.assertIn(c, p_coord.extract_my_subset.call_args_list)