def topology_to_graph(topology): graph = NXGraph() nodes = topology['nodes'] for n in nodes: graph.add_vertex(Vertex(n['vitrage_id'], n)) edges = topology['links'] for i in range(len(edges)): s_id = nodes[edges[i]['source']]['vitrage_id'] t_id = nodes[edges[i]['target']]['vitrage_id'] graph.add_edge(Edge(s_id, t_id, edges[i]['relationship_type'])) return graph
def _create_graph_from_graph_dictionary(self, api_graph): self.assertIsNotNone(api_graph) graph = NXGraph() nodes = api_graph['nodes'] for i in range(len(nodes)): graph.add_vertex(Vertex(str(i), nodes[i])) edges = api_graph['links'] for i in range(len(edges)): graph.add_edge(Edge(str(edges[i]['source']), str(edges[i]['target']), edges[i][EdgeProperties.RELATIONSHIP_TYPE])) return graph
def create_graph(self): graph = NXGraph() v1 = self._file_to_vertex('openstack-cluster.json') graph.add_vertex(v1) networks = self._create_n_vertices(graph, self._num_of_networks, 'neutron.network.json') zones = self._create_n_neighbors(graph, self._num_of_zones_per_cluster, [v1], 'nova.zone.json', 'contains.json') hosts = self._create_n_neighbors(graph, self._num_of_hosts_per_zone, zones, 'nova.host.json', 'contains.json') self._create_n_neighbors(graph, self._num_of_zabbix_alarms_per_host, hosts, 'zabbix.json', 'on.json', Direction.IN) instances = self._create_n_neighbors(graph, self._num_of_instances_per_host, hosts, 'nova.instance.json', 'contains.json') ports = self._create_n_neighbors(graph, self._num_of_ports_per_instance, instances, 'neutron.port.json', 'attached.json', direction=Direction.IN) self._round_robin_edges(graph, networks, ports, 'contains.json') self._create_n_neighbors(graph, self._num_of_volumes_per_instance, instances, 'cinder.volume.json', 'attached.json', Direction.IN) self._create_n_neighbors(graph, self._num_of_vitrage_alarms_per_instance, instances, 'vitrage.alarm.json', 'on.json', Direction.IN) # Also create non connected components: tripleo_controller = \ self._create_n_vertices(graph, self._num_of_tripleo_controllers, 'tripleo.controller.json') self._create_n_neighbors(graph, self._num_of_zabbix_alarms_per_controller, tripleo_controller, 'zabbix.json', 'on.json', Direction.IN) return graph
def from_clause(cls, clause, extract_var): condition_g = NXGraph("scenario condition") for term in clause: variable, var_type = extract_var(term.symbol_name) if var_type == ENTITY: vertex = variable.copy() vertex[VProps.VITRAGE_IS_DELETED] = False vertex[VProps.VITRAGE_IS_PLACEHOLDER] = False condition_g.add_vertex(vertex) else: # type = relationship # prevent overwritten of NEG_CONDITION and # VITRAGE_IS_DELETED property when there are both "not A" # and "A" in same template edge_desc = cls._copy_edge_desc(variable) cls._set_edge_relationship_info(edge_desc, term.positive) cls._add_edge_relationship(condition_g, edge_desc) return condition_g
def _create_graph_from_tree_dictionary(self, api_graph, graph=None, ancestor=None): children = [] graph = NXGraph() if not graph else graph if 'children' in api_graph: children = api_graph.copy()['children'] del api_graph['children'] vertex = Vertex(api_graph[VProps.VITRAGE_ID], api_graph) graph.add_vertex(vertex) if ancestor: graph.add_edge(Edge(ancestor[VProps.VITRAGE_ID], vertex[VProps.VITRAGE_ID], 'label')) for entity in children: self._create_graph_from_tree_dictionary(entity, graph, vertex) return graph
def _create_entity_graph(cls, name, num_of_alarms_per_host, num_of_alarms_per_vm, num_of_hosts_per_node, num_of_vms_per_host, num_of_tests_per_host): start = time.time() g = NXGraph(name) g.add_vertex(v_node) g.add_vertex(v_switch) g.add_edge(e_node_to_switch) # Add Hosts for host_id in range(num_of_hosts_per_node): host_to_add = add_connected_vertex(g, RESOURCE, NOVA_HOST_DATASOURCE, host_id, ELabel.CONTAINS, v_node, True) g.add_edge(graph_utils.create_edge(host_to_add.vertex_id, v_switch.vertex_id, 'USES')) # Add Host Alarms for j in range(num_of_alarms_per_host): add_connected_vertex(g, ALARM, ALARM_ON_HOST, cls.host_alarm_id, ELabel.ON, host_to_add, False, {VProps.RESOURCE_ID: host_id, VProps.NAME: host_id}) cls.host_alarm_id += 1 # Add Host Tests for j in range(num_of_tests_per_host): add_connected_vertex(g, TEST, TEST_ON_HOST, cls.host_test_id, ELabel.ON, host_to_add) cls.host_test_id += 1 # Add Host Vms for j in range(num_of_vms_per_host): vm_to_add = add_connected_vertex(g, RESOURCE, NOVA_INSTANCE_DATASOURCE, cls.vm_id, ELabel.CONTAINS, host_to_add, True) cls.vm_id += 1 cls.vms.append(vm_to_add) # Add Instance Alarms for k in range(num_of_alarms_per_vm): add_connected_vertex(g, ALARM, ALARM_ON_VM, cls.vm_alarm_id, ELabel.ON, vm_to_add, False, {VProps.RESOURCE_ID: cls.vm_id - 1, VProps.NAME: cls.vm_id - 1}) cls.vm_alarm_id += 1 end = time.time() LOG.debug('Graph creation took ' + str(end - start) + ' seconds, size is: ' + str(len(g))) expected_graph_size = \ 2 + num_of_hosts_per_node + num_of_hosts_per_node * \ num_of_alarms_per_host + num_of_hosts_per_node * \ num_of_vms_per_host + num_of_hosts_per_node * \ num_of_vms_per_host * num_of_alarms_per_vm + \ num_of_tests_per_host * num_of_hosts_per_node if not expected_graph_size == len(g): raise VitrageError('Init failed, graph size unexpected {0} != {1}' .format(expected_graph_size, len(g))) return g
class TestConsistencyFunctional(TestFunctionalBase, TestConfiguration): CONSISTENCY_OPTS = [ cfg.IntOpt('min_time_to_delete', default=1, min=1), ] EVALUATOR_OPTS = [ cfg.StrOpt( 'templates_dir', default=utils.get_resources_dir() + '/templates/consistency', ), cfg.StrOpt( 'equivalences_dir', default='equivalences', ), cfg.StrOpt( 'notifier_topic', default='vitrage.evaluator', ), ] def setUp(self): super(TestConsistencyFunctional, self).setUp() self.conf_reregister_opts(self.CONSISTENCY_OPTS, 'consistency') self.conf_reregister_opts(self.EVALUATOR_OPTS, 'evaluator') self.add_db() self.load_datasources() self.graph = NXGraph("Entity Graph") self.processor = Processor(self.graph) self.event_queue = queue.Queue() def actions_callback(event_type, data): """Mock notify method Mocks vitrage.messaging.VitrageNotifier.notify(event_type, data) :param event_type: is currently always the same and is ignored :param data: """ self.event_queue.put(data) scenario_repo = ScenarioRepository() self.evaluator = ScenarioEvaluator(self.processor.entity_graph, scenario_repo, actions_callback) self.consistency_enforcer = ConsistencyEnforcer( self.processor.entity_graph, actions_callback) def test_periodic_process(self): # Setup consistency_interval = self.conf.datasources.snapshots_interval self._periodic_process_setup_stage(consistency_interval) self._add_alarms_by_type(consistency_interval=consistency_interval, alarm_type='prometheus') # Action time.sleep(2 * consistency_interval + 1) self.consistency_enforcer.periodic_process() self._process_events() # Test Assertions instance_vertices = self.processor.entity_graph.get_vertices({ VProps.VITRAGE_CATEGORY: EntityCategory.RESOURCE, VProps.VITRAGE_TYPE: NOVA_INSTANCE_DATASOURCE }) deleted_instance_vertices = \ self.processor.entity_graph.get_vertices({ VProps.VITRAGE_CATEGORY: EntityCategory.RESOURCE, VProps.VITRAGE_TYPE: NOVA_INSTANCE_DATASOURCE, VProps.VITRAGE_IS_DELETED: True }) self.assertThat(instance_vertices, matchers.HasLength(self.NUM_INSTANCES - 3)) # number of resources: # number of vertices - 3 (deleted instances) # number of nics - 1 # number of volumes - 1 # number of prometheus alarms - 1 self.assertThat( self.processor.entity_graph.get_vertices(), matchers.HasLength( # 3 instances deleted self._num_total_expected_vertices() - 3 + 3 - 1 + # one nic deleted 3 - 1 + # one cinder.volume deleted 3 - 1) # one prometheus deleted ) self.assertThat(deleted_instance_vertices, matchers.HasLength(3)) # one nic was deleted, one marked as deleted, one untouched # same for cinder.volume self._assert_vertices_status(EntityCategory.RESOURCE, 'nic', 2, 1) self._assert_vertices_status(EntityCategory.RESOURCE, 'cinder.volume', 2, 1) # one prometheus deleted, other two are untouched # prometheus vertices should not be marked as deleted, since the # datasource did not ask to delete outdated vertices. self._assert_vertices_status(EntityCategory.ALARM, 'prometheus', 2, 0) def test_should_delete_vertex(self): # should be deleted because the static datasource asks to delete its # outdated vertices static_vertex = {VProps.VITRAGE_DATASOURCE_NAME: 'static'} self.assertTrue( self.consistency_enforcer._should_delete_vertex(static_vertex)) # should be deleted because the cinder datasource asks to delete its # outdated vertices volume_vertex = {VProps.VITRAGE_DATASOURCE_NAME: 'cinder.volume'} self.assertTrue( self.consistency_enforcer._should_delete_vertex(volume_vertex)) # should not be deleted because the prometheus datasource does not ask # to delete its outdated vertices prometheus_vertex = {VProps.VITRAGE_DATASOURCE_NAME: 'prometheus'} self.assertFalse( self.consistency_enforcer._should_delete_vertex(prometheus_vertex)) # should be deleted because it is a placeholder placeholder_vertex = { VProps.VITRAGE_IS_PLACEHOLDER: True, VProps.VITRAGE_TYPE: 'prometheus' } self.assertTrue( self.consistency_enforcer._should_delete_vertex( placeholder_vertex)) # should not be deleted because it is an openstack.cluster cluster_vertex = { VProps.VITRAGE_IS_PLACEHOLDER: True, VProps.VITRAGE_TYPE: 'openstack.cluster' } self.assertFalse( self.consistency_enforcer._should_delete_vertex(cluster_vertex)) vertices = [ static_vertex, volume_vertex, prometheus_vertex, placeholder_vertex, cluster_vertex ] vertices_to_mark_deleted = self.consistency_enforcer.\ _filter_vertices_to_be_marked_as_deleted(vertices) self.assertThat(vertices_to_mark_deleted, matchers.HasLength(3)) self.assertTrue(static_vertex in vertices_to_mark_deleted) self.assertTrue(placeholder_vertex in vertices_to_mark_deleted) self.assertTrue(volume_vertex in vertices_to_mark_deleted) self.assertFalse(prometheus_vertex in vertices_to_mark_deleted) self.assertFalse(cluster_vertex in vertices_to_mark_deleted) def _assert_vertices_status(self, category, vitrage_type, num_vertices, num_marked_deleted): vertices = \ self.processor.entity_graph.get_vertices({ VProps.VITRAGE_CATEGORY: category, VProps.VITRAGE_TYPE: vitrage_type, }) self.assertThat(vertices, matchers.HasLength(num_vertices)) marked_deleted_vertices = \ self.processor.entity_graph.get_vertices({ VProps.VITRAGE_CATEGORY: category, VProps.VITRAGE_TYPE: vitrage_type, VProps.VITRAGE_IS_DELETED: True }) self.assertThat(marked_deleted_vertices, matchers.HasLength(num_marked_deleted)) def _periodic_process_setup_stage(self, consistency_interval): self._create_processor_with_graph(processor=self.processor) current_time = utcnow() # set all vertices to be have timestamp that consistency won't get self._update_timestamp( self.processor.entity_graph.get_vertices(), current_time + timedelta(seconds=1.5 * consistency_interval)) # check number of instances in graph instance_vertices = self.processor.entity_graph.get_vertices({ VProps.VITRAGE_CATEGORY: EntityCategory.RESOURCE, VProps.VITRAGE_TYPE: NOVA_INSTANCE_DATASOURCE }) self.assertThat(instance_vertices, matchers.HasLength(self.NUM_INSTANCES)) # set current timestamp of part of the instances self._update_timestamp(instance_vertices[0:3], current_time) # set part of the instances as deleted for i in range(3, 6): instance_vertices[i][VProps.VITRAGE_IS_DELETED] = True self.processor.entity_graph.update_vertex(instance_vertices[i]) # set part of the instances as deleted for i in range(6, 9): instance_vertices[i][VProps.VITRAGE_IS_DELETED] = True instance_vertices[i][VProps.VITRAGE_SAMPLE_TIMESTAMP] = str( current_time + timedelta(seconds=2 * consistency_interval + 1)) self.processor.entity_graph.update_vertex(instance_vertices[i]) self._add_resources_by_type(consistency_interval=consistency_interval, datasource_name='static', resource_type='nic') self._add_resources_by_type(consistency_interval=consistency_interval, datasource_name='cinder.volume', resource_type='cinder.volume') def _update_timestamp(self, lst, timestamp): for vertex in lst: vertex[VProps.VITRAGE_SAMPLE_TIMESTAMP] = str(timestamp) self.processor.entity_graph.update_vertex(vertex) def _process_events(self): num_retries = 0 while True: if self.event_queue.empty(): time.sleep(0.3) if not self.event_queue.empty(): time.sleep(1) count = 0 while not self.event_queue.empty(): count += 1 data = self.event_queue.get() if isinstance(data, list): for event in data: self.processor.process_event(event) else: self.processor.process_event(data) return num_retries += 1 if num_retries == 30: return def _add_resources_by_type(self, consistency_interval, resource_type, datasource_name): def _create_resource_by_type(v_id, v_type, ds_name, timestamp, is_deleted=False): return self._create_resource(vitrage_id=v_id, resource_type=v_type, datasource_name=ds_name, sample_timestamp=timestamp, is_deleted=is_deleted) self._add_entities_with_different_timestamps( consistency_interval=consistency_interval, create_func=_create_resource_by_type, category=EntityCategory.RESOURCE, datasource_name=datasource_name, resource_type=resource_type) def _add_alarms_by_type(self, consistency_interval, alarm_type): def _create_alarm_by_type(v_id, v_type, ds_name, timestamp, is_deleted=False): return self._create_alarm(vitrage_id=v_id, alarm_type=v_type, datasource_name=ds_name, project_id=None, vitrage_resource_project_id=None, metadata=None, vitrage_sample_timestamp=timestamp, is_deleted=is_deleted) self._add_entities_with_different_timestamps( consistency_interval=consistency_interval, create_func=_create_alarm_by_type, category=EntityCategory.ALARM, datasource_name=alarm_type, resource_type=alarm_type) def _add_entities_with_different_timestamps(self, consistency_interval, create_func, category, datasource_name, resource_type): # add resources to the graph: # - updated_resource # - outdated_resource with an old timestamp # - deleted_resource with an old timestamp and is_deleted==true future_timestamp = \ str(utcnow() + timedelta(seconds=2 * consistency_interval)) past_timestamp = \ str(utcnow() - timedelta(seconds=2 * consistency_interval - 1)) updated_resource = create_func(v_id=resource_type + '1234', v_type=resource_type, ds_name=datasource_name, timestamp=future_timestamp) outdated_resource = create_func(v_id=resource_type + '5678', v_type=resource_type, ds_name=datasource_name, timestamp=past_timestamp) deleted_resource = create_func(v_id=resource_type + '9999', v_type=resource_type, ds_name=datasource_name, timestamp=past_timestamp, is_deleted=True) self.graph.add_vertex(updated_resource) self.graph.add_vertex(outdated_resource) self.graph.add_vertex(deleted_resource) # get the list of vertices resource_vertices = self.processor.entity_graph.get_vertices({ VProps.VITRAGE_CATEGORY: category, VProps.VITRAGE_TYPE: resource_type }) self.assertThat(resource_vertices, matchers.HasLength(3), 'Wrong number of vertices of type %s', resource_type)
def _create_graph(self): graph = NXGraph('Multi tenancy graph') self._add_alarm_persistency_subscription(graph) # create vertices cluster_vertex = create_cluster_placeholder_vertex() zone_vertex = self._create_resource('zone_1', NOVA_ZONE_DATASOURCE) host_vertex = self._create_resource('host_1', NOVA_HOST_DATASOURCE) instance_1_vertex = self._create_resource('instance_1', NOVA_INSTANCE_DATASOURCE, project_id='project_1') instance_2_vertex = self._create_resource('instance_2', NOVA_INSTANCE_DATASOURCE, project_id='project_1') instance_3_vertex = self._create_resource('instance_3', NOVA_INSTANCE_DATASOURCE, project_id='project_2') instance_4_vertex = self._create_resource('instance_4', NOVA_INSTANCE_DATASOURCE, project_id='project_2') alarm_on_host_vertex = self._create_alarm( 'alarm_on_host', 'alarm_on_host', metadata={ VProps.VITRAGE_TYPE: NOVA_HOST_DATASOURCE, VProps.NAME: 'host_1', VProps.RESOURCE_ID: 'host_1', VProps.VITRAGE_OPERATIONAL_SEVERITY: OperationalAlarmSeverity.SEVERE, VProps.VITRAGE_AGGREGATED_SEVERITY: OperationalAlarmSeverity.SEVERE }) alarm_on_instance_1_vertex = self._create_alarm( 'alarm_on_instance_1', 'deduced_alarm', project_id='project_1', vitrage_resource_project_id='project_1', metadata={ VProps.VITRAGE_TYPE: NOVA_INSTANCE_DATASOURCE, VProps.NAME: 'instance_1', VProps.RESOURCE_ID: 'sdg7849ythksjdg', VProps.VITRAGE_OPERATIONAL_SEVERITY: OperationalAlarmSeverity.SEVERE, VProps.VITRAGE_AGGREGATED_SEVERITY: OperationalAlarmSeverity.SEVERE }) alarm_on_instance_2_vertex = self._create_alarm( 'alarm_on_instance_2', 'deduced_alarm', vitrage_resource_project_id='project_1', metadata={ VProps.VITRAGE_TYPE: NOVA_INSTANCE_DATASOURCE, VProps.NAME: 'instance_2', VProps.RESOURCE_ID: 'nbfhsdugf', VProps.VITRAGE_OPERATIONAL_SEVERITY: OperationalAlarmSeverity.WARNING, VProps.VITRAGE_AGGREGATED_SEVERITY: OperationalAlarmSeverity.WARNING }) alarm_on_instance_3_vertex = self._create_alarm( 'alarm_on_instance_3', 'deduced_alarm', project_id='project_2', vitrage_resource_project_id='project_2', metadata={ VProps.VITRAGE_TYPE: NOVA_INSTANCE_DATASOURCE, VProps.NAME: 'instance_3', VProps.RESOURCE_ID: 'nbffhsdasdugf', VProps.VITRAGE_OPERATIONAL_SEVERITY: OperationalAlarmSeverity.CRITICAL, VProps.VITRAGE_AGGREGATED_SEVERITY: OperationalAlarmSeverity.CRITICAL }) alarm_on_instance_4_vertex = self._create_alarm( 'alarm_on_instance_4', 'deduced_alarm', vitrage_resource_project_id='project_2', metadata={ VProps.VITRAGE_TYPE: NOVA_INSTANCE_DATASOURCE, VProps.NAME: 'instance_4', VProps.RESOURCE_ID: 'ngsuy76hgd87f', VProps.VITRAGE_OPERATIONAL_SEVERITY: OperationalAlarmSeverity.WARNING, VProps.VITRAGE_AGGREGATED_SEVERITY: OperationalAlarmSeverity.WARNING }) # create links edges = list() edges.append( graph_utils.create_edge(cluster_vertex.vertex_id, zone_vertex.vertex_id, EdgeLabel.CONTAINS, update_timestamp=str(utcnow()))) edges.append( graph_utils.create_edge(zone_vertex.vertex_id, host_vertex.vertex_id, EdgeLabel.CONTAINS, update_timestamp=str(utcnow()))) edges.append( graph_utils.create_edge(host_vertex.vertex_id, instance_1_vertex.vertex_id, EdgeLabel.CONTAINS, update_timestamp=str(utcnow()))) edges.append( graph_utils.create_edge(host_vertex.vertex_id, instance_2_vertex.vertex_id, EdgeLabel.CONTAINS, update_timestamp=str(utcnow()))) edges.append( graph_utils.create_edge(host_vertex.vertex_id, instance_3_vertex.vertex_id, EdgeLabel.CONTAINS, update_timestamp=str(utcnow()))) edges.append( graph_utils.create_edge(host_vertex.vertex_id, instance_4_vertex.vertex_id, EdgeLabel.CONTAINS, update_timestamp=str(utcnow()))) edges.append( graph_utils.create_edge(alarm_on_host_vertex.vertex_id, host_vertex.vertex_id, EdgeLabel.ON, update_timestamp=str(utcnow()))) edges.append( graph_utils.create_edge(alarm_on_instance_1_vertex.vertex_id, instance_1_vertex.vertex_id, EdgeLabel.ON, update_timestamp=str(utcnow()))) edges.append( graph_utils.create_edge(alarm_on_instance_2_vertex.vertex_id, instance_2_vertex.vertex_id, EdgeLabel.ON, update_timestamp=str(utcnow()))) edges.append( graph_utils.create_edge(alarm_on_instance_3_vertex.vertex_id, instance_3_vertex.vertex_id, EdgeLabel.ON, update_timestamp=str(utcnow()))) edges.append( graph_utils.create_edge(alarm_on_instance_4_vertex.vertex_id, instance_4_vertex.vertex_id, EdgeLabel.ON, update_timestamp=str(utcnow()))) edges.append( graph_utils.create_edge(alarm_on_host_vertex.vertex_id, alarm_on_instance_1_vertex.vertex_id, EdgeLabel.CAUSES, update_timestamp=str(utcnow()))) edges.append( graph_utils.create_edge(alarm_on_host_vertex.vertex_id, alarm_on_instance_2_vertex.vertex_id, EdgeLabel.CAUSES, update_timestamp=str(utcnow()))) edges.append( graph_utils.create_edge(alarm_on_host_vertex.vertex_id, alarm_on_instance_3_vertex.vertex_id, EdgeLabel.CAUSES, update_timestamp=str(utcnow()))) edges.append( graph_utils.create_edge(alarm_on_host_vertex.vertex_id, alarm_on_instance_4_vertex.vertex_id, EdgeLabel.CAUSES, update_timestamp=str(utcnow()))) # add vertices to graph graph.add_vertex(cluster_vertex) graph.add_vertex(zone_vertex) graph.add_vertex(host_vertex) graph.add_vertex(instance_1_vertex) graph.add_vertex(instance_2_vertex) graph.add_vertex(instance_3_vertex) graph.add_vertex(instance_4_vertex) graph.add_vertex(alarm_on_host_vertex) graph.add_vertex(alarm_on_instance_1_vertex) graph.add_vertex(alarm_on_instance_2_vertex) graph.add_vertex(alarm_on_instance_3_vertex) graph.add_vertex(alarm_on_instance_4_vertex) # add links to graph for edge in edges: graph.add_edge(edge) return graph
def _create_graph(self): graph = NXGraph('Multi tenancy graph', uuid=True) # create vertices cluster_vertex = create_cluster_placeholder_vertex() zone_vertex = self._create_resource('zone_1', NOVA_ZONE_DATASOURCE) host_vertex = self._create_resource('host_1', NOVA_HOST_DATASOURCE) instance_1_vertex = self._create_resource('instance_1', NOVA_INSTANCE_DATASOURCE, project_id='project_1') instance_2_vertex = self._create_resource('instance_2', NOVA_INSTANCE_DATASOURCE, project_id='project_1') instance_3_vertex = self._create_resource('instance_3', NOVA_INSTANCE_DATASOURCE, project_id='project_2') instance_4_vertex = self._create_resource('instance_4', NOVA_INSTANCE_DATASOURCE, project_id='project_2') alarm_on_host_vertex = self._create_alarm( 'alarm_on_host', 'alarm_on_host', metadata={'type': 'nova.host', 'name': 'host_1', 'resource_id': 'host_1'}) alarm_on_instance_1_vertex = self._create_alarm( 'alarm_on_instance_1', 'deduced_alarm', project_id='project_1', metadata={'type': 'nova.instance', 'name': 'instance_1', 'resource_id': 'sdg7849ythksjdg'}) alarm_on_instance_2_vertex = self._create_alarm( 'alarm_on_instance_2', 'deduced_alarm', metadata={'type': 'nova.instance', 'name': 'instance_2', 'resource_id': 'nbfhsdugf'}) alarm_on_instance_3_vertex = self._create_alarm( 'alarm_on_instance_3', 'deduced_alarm', project_id='project_2', metadata={'type': 'nova.instance', 'name': 'instance_3', 'resource_id': 'nbffhsdasdugf'}) alarm_on_instance_4_vertex = self._create_alarm( 'alarm_on_instance_4', 'deduced_alarm', metadata={'type': 'nova.instance', 'name': 'instance_4', 'resource_id': 'ngsuy76hgd87f'}) # create links edges = list() edges.append(graph_utils.create_edge( cluster_vertex.vertex_id, zone_vertex.vertex_id, 'contains')) edges.append(graph_utils.create_edge( zone_vertex.vertex_id, host_vertex.vertex_id, 'contains')) edges.append(graph_utils.create_edge( host_vertex.vertex_id, instance_1_vertex.vertex_id, 'contains')) edges.append(graph_utils.create_edge( host_vertex.vertex_id, instance_2_vertex.vertex_id, 'contains')) edges.append(graph_utils.create_edge( host_vertex.vertex_id, instance_3_vertex.vertex_id, 'contains')) edges.append(graph_utils.create_edge( host_vertex.vertex_id, instance_4_vertex.vertex_id, 'contains')) edges.append(graph_utils.create_edge( alarm_on_host_vertex.vertex_id, host_vertex.vertex_id, 'on')) edges.append(graph_utils.create_edge( alarm_on_instance_1_vertex.vertex_id, instance_1_vertex.vertex_id, 'on')) edges.append(graph_utils.create_edge( alarm_on_instance_2_vertex.vertex_id, instance_2_vertex.vertex_id, 'on')) edges.append(graph_utils.create_edge( alarm_on_instance_3_vertex.vertex_id, instance_3_vertex.vertex_id, 'on')) edges.append(graph_utils.create_edge( alarm_on_instance_4_vertex.vertex_id, instance_4_vertex.vertex_id, 'on')) edges.append(graph_utils.create_edge( alarm_on_host_vertex.vertex_id, alarm_on_instance_1_vertex.vertex_id, 'causes')) edges.append(graph_utils.create_edge( alarm_on_host_vertex.vertex_id, alarm_on_instance_2_vertex.vertex_id, 'causes')) edges.append(graph_utils.create_edge( alarm_on_host_vertex.vertex_id, alarm_on_instance_3_vertex.vertex_id, 'causes')) edges.append(graph_utils.create_edge( alarm_on_host_vertex.vertex_id, alarm_on_instance_4_vertex.vertex_id, 'causes')) # add vertices to graph graph.add_vertex(cluster_vertex) graph.add_vertex(zone_vertex) graph.add_vertex(host_vertex) graph.add_vertex(instance_1_vertex) graph.add_vertex(instance_2_vertex) graph.add_vertex(instance_3_vertex) graph.add_vertex(instance_4_vertex) graph.add_vertex(alarm_on_host_vertex) graph.add_vertex(alarm_on_instance_1_vertex) graph.add_vertex(alarm_on_instance_2_vertex) graph.add_vertex(alarm_on_instance_3_vertex) graph.add_vertex(alarm_on_instance_4_vertex) # add links to graph for edge in edges: graph.add_edge(edge) return graph
class GraphCloneWorkerBase(coord.Service): def __init__(self, worker_id, conf, task_queues): super(GraphCloneWorkerBase, self).__init__(worker_id, conf) self._conf = conf self._task_queue = task_queues[worker_id] self._entity_graph = NXGraph() name = 'GraphCloneWorkerBase' @abc.abstractmethod def _init_instance(self): """This method is executed in the newly created process""" raise NotImplementedError def run(self): super(GraphCloneWorkerBase, self).run() self._entity_graph.notifier._subscriptions = [] # Quick n dirty self._init_instance() if self._entity_graph.num_vertices(): LOG.info("%s - Started %s (%s vertices)", self.__class__.__name__, self.worker_id, self._entity_graph.num_vertices()) else: LOG.info("%s - Started empty %s", self.__class__.__name__, self.worker_id) self._read_queue() def _read_queue(self): LOG.debug("%s - reading queue %s", self.__class__.__name__, self.worker_id) while True: try: next_task = self._task_queue.get() self.do_task(next_task) except Exception: LOG.exception("Graph may not be in sync.") if isinstance(self._task_queue, multiprocessing.queues.JoinableQueue): self._task_queue.task_done() def do_task(self, task): action = task[0] if action == GRAPH_UPDATE: (action, before, current, is_vertex) = task self._graph_update(before, current, is_vertex) elif action == READ_DB_GRAPH: self._read_db_graph() elif action == WAIT_FOR_WORKER_START: # Nothing to do, manager is just verifying this worker is alive pass def _graph_update(self, before, current, is_vertex): if current: if is_vertex: self._entity_graph.add_vertex(current) else: self._entity_graph.add_edge(current) else: if is_vertex: self._entity_graph.remove_vertex(before) else: self._entity_graph.remove_edge(before) def _read_db_graph(self): db = storage.get_connection_from_config(self._conf) graph_snapshot = db.graph_snapshots.query() NXGraph.read_gpickle(graph_snapshot.graph_snapshot, self._entity_graph) GraphPersistency.do_replay_events(db, self._entity_graph, graph_snapshot.event_id) self._entity_graph.ready = True