def test_check_log_for_errors(self): logfile = os.path.join(TESTS_DIR, 'assets/20131007_devstack_export.log') with open(os.path.join(TESTS_DIR, 'datasets/some_dataset_example/config.json'), 'r') as config_stream: dataset_config = json.load(config_stream) gitpath = '' handle_results.find_schemas = lambda x: [123] result, msg = handle_results.check_log_for_errors(logfile, gitpath, dataset_config) self.assertFalse(result) self.assertEqual(msg, 'Final schema version does not match expectation') handle_results.find_schemas = lambda x: [228] result, msg = handle_results.check_log_for_errors(logfile, gitpath, dataset_config) self.assertTrue(result) self.assertEqual(msg, 'SUCCESS') dataset_config['maximum_migration_times']['152'] = 3 result, msg = handle_results.check_log_for_errors(logfile, gitpath, dataset_config) self.assertFalse(result) self.assertEqual(msg, ('WARNING: Migration 152 took too long, ' 'WARNING: Migration 152 took too long')) dataset_config['maximum_migration_times']['152'] = 10 result, msg = handle_results.check_log_for_errors(logfile, gitpath, dataset_config) self.assertTrue(result) self.assertEqual(msg, 'SUCCESS')
def _check_all_dataset_logs_for_errors(self): self.log.debug("Check logs for errors") success = True for i, dataset in enumerate(self.job_datasets): # Look for the beginning of the migration start dataset_success, message = \ handle_results.check_log_for_errors( dataset['job_log_file_path'], self.git_path, dataset['config']) self.job_datasets[i]['result'] = message success = False if not dataset_success else success if success: self.work_data['result'] = "SUCCESS" else: self.work_data['result'] = "Failed: errors found in dataset log(s)"