def tearDown(self): super(TestTemplateActions, self).tearDown() time.sleep(10) self._trigger_undo_action(TRIGGER_ALARM_1) if self.added_template is not None: v_util.delete_template(self.added_template['uuid']) self.added_template = None
def setUpClass(cls): super(TestLongProcessing, cls).setUpClass() logger = logging.getLogger('vitrageclient.v1.client').logger logger.setLevel(logging.INFO) if v_utils.get_first_template(name=TEMPLATE_NAME): v_utils.delete_template(name=TEMPLATE_NAME) time.sleep(SLEEP)
def test_db_init(self): try: v_utils.add_template(TEMPLATE_NAME) time.sleep(SLEEP) # 1. check template works well self._check_template_instance_3rd_degree_scenarios() # 2. check fast fail-over - start from database topo1 = TempestClients.vitrage().topology.get(all_tenants=True) v_utils.restart_graph() time.sleep(MAX_FAIL_OVER_TIME) for i in range(5): self._check_template_instance_3rd_degree_scenarios() topo2 = TempestClients.vitrage().topology.get(all_tenants=True) self.assert_graph_equal( topo1, topo2, 'comparing graph items iteration ' + str(i)) time.sleep(self.conf.datasources.snapshots_interval) v_utils.delete_template(name=TEMPLATE_NAME) time.sleep(SLEEP) self._check_template_instance_3rd_degree_scenarios_deleted() except Exception as e: self._handle_exception(e) if v_utils.get_first_template(name=TEMPLATE_NAME): v_utils.delete_template(name=TEMPLATE_NAME) time.sleep(SLEEP) raise
def _rollback_to_default(self, templates): try: for t in templates: db_row = vitrage_utils.get_first_template(name=t) vitrage_utils.delete_template(db_row['uuid']) except Exception as e: raise VitrageError('Rollback to default failed %s', e)
def test_evaluator_reload_with_existing_template_v2(self): """Test reload new template e2e v2 1.add the relevant template 2.delete the template 2.raise trigger alarm. 3. check no deduced alarm This checks that template deleted properly and no action executed. :return: """ host_id = self.orig_host.get(VProps.VITRAGE_ID) self.added_template = v_util.add_template(TEST_TEMPLATE, folder=FOLDER_PATH) v_util.delete_template(self.added_template['uuid']) self.added_template = None self._trigger_do_action(TRIGGER_ALARM_1) self._check_deduced(0, DEDUCED_PROPS, host_id)
def test_template_show_with_name(self): """Compare template content from file to DB""" # add standard template template_path = \ g_utils.tempest_resources_dir() + '/templates/api/' \ + STANDARD_TEMPLATE v_utils.add_template(STANDARD_TEMPLATE, template_type=TTypes.STANDARD) name = 'host_high_memory_usage_scenarios' db_row = v_utils.get_first_template(name=name, type=TTypes.STANDARD, status=TemplateStatus.ACTIVE) payload_from_db = self.vitrage_client.template.show(name) with open(template_path, 'r') as stream: payload_from_file = yaml.load(stream, Loader=yaml.BaseLoader) self.assert_dict_equal(payload_from_file, payload_from_db, "Template content doesn't match") v_utils.delete_template(db_row['uuid'])
def test_high_availability_events(self): """The purpose of the test is to check that events are stored That is, during different stages in vitrage-graph lifetime: before graph read from db (during init) after graph read from db (during init) during get_all after get_all """ try: # adding a template just to create more load (to slow things down) v_utils.add_template(TEMPLATE_NAME) time.sleep(SLEEP) self.keep_sending_events = True self.num_of_sent_events = 0 doctor_events_thread = self._async_doctor_events() time.sleep(10) v_utils.stop_graph() time.sleep(10) v_utils.restart_graph() v_utils.delete_template(name=TEMPLATE_NAME) # sleep to allow get_all to start and finish at least once: time.sleep(4 * self.conf.datasources.snapshots_interval) v_utils.restart_graph() self.keep_sending_events = False time.sleep(MAX_FAIL_OVER_TIME) doctor_events_thread.join(timeout=10) alarm_count = TempestClients.vitrage().alarm.count( all_tenants=True) self.assertTrue(self.num_of_sent_events > 0, 'Test did not create events') self.assertEqual(self.num_of_sent_events, alarm_count['CRITICAL'], 'CRITICAL doctor events expected') except Exception as e: self._handle_exception(e) raise finally: self._remove_doctor_events() if v_utils.get_first_template(name=TEMPLATE_NAME): v_utils.delete_template(name=TEMPLATE_NAME) time.sleep(SLEEP)
def test_execute_mistral_more_than_once(self): executions = self.mistral_client.executions.list() self.assertIsNotNone(executions, 'Failed to get the list of workflow executions') num_executions = len(executions) # Make sure there are at least two instances in the environment nova_utils.create_instances(num_instances=2, set_public_network=True) num_instances = len(TempestClients.nova().servers.list()) # Add a template that executes the same Mistral workflow for every # instance. This should immediately trigger execute_mistral actions. template = None try: template = v_utils.add_template('v3_execute_mistral_twice.yaml') finally: if template: v_utils.delete_template(template['uuid']) # no longer needed time.sleep(2) # wait for the evaluator to process the new template # Verify that there is an execution for every instance executions = self.mistral_client.executions.list() self.assertIsNotNone(executions, 'Failed to get the list of workflow executions') msg = "There are %d executions. Expected number of executions: %d " \ "(old number of executions) + %d (number of instances)" % \ (len(executions), num_executions, num_instances) self.assertThat(executions, HasLength(num_executions + num_instances), msg) executed_on_instances = set() for i in range(num_instances): # There may be many old executions in the list. The relevant ones # are at the end. Check the last `num_instances` executions. execution = \ self.mistral_client.executions.get(executions[-i].id) execution_input = jsonutils.loads(execution.input) executed_on_instances.add(execution_input['farewell']) msg = "There are %d instances in the graph but only %d distinct " \ "executions" % (num_instances, len(executed_on_instances)) self.assertThat(executed_on_instances, HasLength(num_instances), msg)
def _check_template_instance_3rd_degree_scenarios(self): try: alarm_count = TempestClients.vitrage().alarm.count( all_tenants=True) self.assertEqual( self.conf.mock_graph_datasource.instances_per_host, alarm_count['SEVERE'], 'Each instance should have one SEVERE deduced alarm') self.assertEqual( self.conf.mock_graph_datasource.instances_per_host, alarm_count['CRITICAL'], 'Each instance should have one CRITICAL deduced alarm') expected_rca = [{VertexProperties.VITRAGE_TYPE: 'zabbix'}] * self.\ conf.mock_graph_datasource.zabbix_alarms_per_host expected_rca.extend([{'name': DEDUCED_1}, {'name': DEDUCED_2}]) def check_rca(alarm): rca = TempestClients.vitrage().rca.get(alarm['vitrage_id'], all_tenants=True) try: self._check_rca(rca, expected_rca, alarm) return True except Exception as e: LOG.exception('check_rca failed', e) return False # 10 threads calling rca api alarms = TempestClients.vitrage().alarm.list(all_tenants=True, vitrage_id='all') deduced_alarms = g_utils.all_matches(alarms, vitrage_type='vitrage', name=DEDUCED_2) workers = futures.ThreadPoolExecutor(max_workers=10) workers_result = [ r for r in workers.map(check_rca, deduced_alarms) ] self.assertTrue(all(workers_result)) except Exception as e: v_utils.delete_template(name=TEMPLATE_NAME) self._handle_exception(e) raise
def test_high_availability_events(self): """The purpose of the test is to check that events are stored That is, during different stages in vitrage-graph lifetime: before graph read from db (during init) after graph read from db (during init) during get_all after get_all """ try: # adding a template just to create more load (to slow things down) v_utils.add_template(TEMPLATE_NAME) time.sleep(SLEEP) self.keep_sending_events = True self.num_of_sent_events = 0 doctor_events_thread = self._async_doctor_events() time.sleep(10) v_utils.stop_graph() time.sleep(10) v_utils.restart_graph() time.sleep(MAX_FAIL_OVER_TIME) v_utils.delete_template(name=TEMPLATE_NAME) # sleep to allow get_all to start and finish at least once: time.sleep(4 * CONF.root_cause_analysis_service.snapshots_interval) v_utils.restart_graph() self.keep_sending_events = False time.sleep(MAX_FAIL_OVER_TIME) doctor_events_thread.join(timeout=10) alarm_count = self.vitrage_client.alarm.count(all_tenants=True) self.assertTrue(self.num_of_sent_events > 0, 'Test did not create events') self.assertAlmostEqual(self.num_of_sent_events, alarm_count['CRITICAL'], msg='CRITICAL doctor events expected', delta=1) finally: self._remove_doctor_events()
def test_template_show(self): """Compare template content from file to DB""" try: # add standard template template_path = \ g_utils.tempest_resources_dir() + '/templates/api/'\ + STANDARD_TEMPLATE v_utils.add_template(STANDARD_TEMPLATE, template_type=TTypes.STANDARD) db_row = v_utils.get_first_template( name='host_high_memory_usage_scenarios', type=TTypes.STANDARD, status=TemplateStatus.ACTIVE) payload_from_db = self.client.template.show(db_row['uuid']) payload_from_file = file.load_yaml_file(template_path) self.assertEqual(payload_from_file, payload_from_db, "Template content doesn't match") v_utils.delete_template(db_row['uuid']) except Exception as e: self._handle_exception(e) raise
def test_db_init(self): v_utils.add_template(TEMPLATE_NAME) time.sleep(SLEEP) # 1. check template works well self._check_template_instance_3rd_degree_scenarios() # 2. check fast fail-over - start from database topo1 = self.vitrage_client.topology.get(all_tenants=True) v_utils.restart_graph() time.sleep(MAX_FAIL_OVER_TIME) for i in range(5): self._check_template_instance_3rd_degree_scenarios() topo2 = self.vitrage_client.topology.get(all_tenants=True) self._assert_graph_equal(topo1, topo2, 'comparing graph items iteration %s' % i) time.sleep(CONF.root_cause_analysis_service.snapshots_interval) v_utils.delete_template(name=TEMPLATE_NAME) time.sleep(SLEEP) self._check_template_instance_3rd_degree_scenarios_deleted()
def test_evaluator_reload_with_new_template_v2(self): """Test reload new template e2e v2 1. raise trigger alarm 2. add the relevant template 3. delete the template. 4. check action - should be not active. This checks that the evaluators are reloaded and run on all existing vertices. Checks temporary worker that was added to delete template. """ host_id = self.orig_host.get(VProps.VITRAGE_ID) self._trigger_do_action(TRIGGER_ALARM_1) self.added_template = v_util.add_template(TEST_TEMPLATE, folder=FOLDER_PATH) time.sleep(2) self._check_deduced(1, DEDUCED_PROPS, host_id) v_util.delete_template(self.added_template['uuid']) self.added_template = None time.sleep(2) self._check_deduced(0, DEDUCED_PROPS, host_id)
def test_template_delete(self): try: # add standard template v_utils.add_template(STANDARD_TEMPLATE, template_type=TTypes.STANDARD) db_row = v_utils.get_first_template( name='host_high_memory_usage_scenarios', type=TTypes.STANDARD, status=TemplateStatus.ACTIVE) self.assertIsNotNone(db_row, 'Template should appear in templates list') # delete template uuid = db_row['uuid'] v_utils.delete_template(uuid) db_row = v_utils.get_first_template( name='host_high_memory_usage_scenarios', type=TTypes.STANDARD) self.assertIsNone(db_row, 'Template should not appear in list') except Exception as e: self._handle_exception(e) raise
def tearDownClass(cls): if cls._templates is not None: v_utils.delete_template(cls._templates[0]['uuid']) v_utils.delete_template(cls._templates[1]['uuid']) v_utils.delete_template(cls._templates[2]['uuid']) # Delete the workflow cls.mistral_client.workflows.delete(WF_NAME) nova_utils.delete_all_instances()
def _add_delete_template(self): """A helper function: Adds and deletes a template. Returns its uuid. """ # add a template v_utils.add_template(STANDARD_TEMPLATE, template_type=TTypes.STANDARD) db_row = v_utils.get_first_template( name='host_high_memory_usage_scenarios', type=TTypes.STANDARD, status=TemplateStatus.ACTIVE) self.assertIsNotNone(db_row, 'Template should appear in templates list') # delete it uuid = db_row['uuid'] v_utils.delete_template(uuid) db_row = v_utils.get_first_template( name='host_high_memory_usage_scenarios', type=TTypes.STANDARD) self.assertIsNone(db_row, 'Template should not appear in list') return uuid
def test_evaluator_reload_with_multiple_new_template(self): """Test reload new template e2e 1. raise trigger alarm (template is not loaded yet) 2. add 2 new templates. 3. check both actions are executed This checks that the evaluators are reloaded for both templates and run on all existing vertices. """ second_template = None try: host_id = self.orig_host.get(VProps.VITRAGE_ID) self._trigger_do_action(TRIGGER_ALARM_1) self._trigger_do_action(TRIGGER_ALARM_2) v_util.add_template(folder=FOLDER_PATH) self.added_template = v_util.get_first_template(name=INFILE_NAME) second_template = v_util.get_first_template(name=INFILE_NAME_2) self._check_deduced(1, DEDUCED_PROPS, host_id) self._check_deduced(1, DEDUCED_PROPS_2, host_id) finally: if second_template: v_util.delete_template(second_template['uuid']) self._trigger_undo_action(TRIGGER_ALARM_1) self._trigger_undo_action(TRIGGER_ALARM_2)
def tearDownClass(cls): super(TestValidate, cls).tearDownClass() if cls._template is not None: v_utils.delete_template(cls._template['uuid'])
def tearDown(self): super(TestLongProcessing, self).tearDown() if v_utils.get_first_template(name=TEMPLATE_NAME): v_utils.delete_template(name=TEMPLATE_NAME) time.sleep(SLEEP)
def tearDownClass(cls): if cls._template is not None: v_utils.delete_template(cls._template['uuid'])
def tearDownClass(cls): for t in cls._templates: v_utils.delete_template(t['uuid'])
def _rollback_to_default(templates): for t in templates: db_row = vitrage_utils.get_first_template(name=t) vitrage_utils.delete_template(db_row['uuid'])
def tearDownClass(cls): if cls._templates is not None: v_utils.delete_template(cls._templates[0]['uuid']) v_utils.delete_template(cls._templates[1]['uuid'])
def setUpClass(cls): super(TestLongProcessing, cls).setUpClass() if v_utils.get_first_template(name=TEMPLATE_NAME): v_utils.delete_template(name=TEMPLATE_NAME) time.sleep(SLEEP)