class ExperimentII: def __init__(self, path, model): self.rec = Recommender(path, model) self.test_scenarios = [] def conduct_experiment(self, k=20): self.prepare_experiment() test_steps, test_blocks_GT, method_blocks_R = self.run_experiment() self.visualize_experiment(test_steps, test_blocks_GT, method_blocks_R) def prepare_experiment(self): from data_container import ModelDataReader get_files = ModelDataReader.get_files test_scenarios_dir_path = "../data/advanced/evaluation_experiments/implemented_test_cases/experiments_test_scenarios/atomic/" test_scenarios_files = get_files(test_scenarios_dir_path, ".xlsx") for path in test_scenarios_files: test_scenario = self.load_evaluation_test_scenario(path) self.test_scenarios.append(test_scenario) def load_evaluation_test_scenario(self, path): from ast import literal_eval from data_container import ModelDataReader read_excel = ModelDataReader.read_excel step_ids = read_excel(path, "Steps_n_Blocks_Simple", "Step ID", 0) step_descriptions = read_excel(path, "Steps_n_Blocks_Simple", "Step Description", 0) step_expected_results = read_excel(path, "Steps_n_Blocks_Simple", "Expected Result", 0) step_test_blocks_names = read_excel(path, "Steps_n_Blocks_Simple", "Test Block Name", 0) step_test_block_params_n_vals = read_excel(path, "Steps_n_Blocks_Simple", "Test Block Parameters", 0) return EvaluationTestScenario('tid', 'title', 'description', step_ids, step_descriptions, step_expected_results, step_test_blocks_names, step_test_block_params_n_vals, [], []) def run_experiment(self, k=20): method_blocks_R = {} all_test_steps = [ step for scenario in self.test_scenarios for step in scenario.steps ] for method in ["avg", "sta", "tf-idf", "jac", "lsi"]: test_blocks_R = [] for test_step in all_test_steps: test_blocks_R.append( self.rec.find_top_blocks(test_step.description, k, method, text_weight=0.9, param_weight=0.1)) method_blocks_R[method] = test_blocks_R test_steps = [ step.id for scenario in self.test_scenarios for step in scenario.steps ] block_id = self.rec.data.test_blocks_names.index test_blocks_GT = [ block_id(block[0].name) for scenario in self.test_scenarios for block in scenario.blocks_GT ] return test_steps, test_blocks_GT, method_blocks_R def visualize_experiment(self, test_steps, test_blocks_GT, method_blocks_R): print("Experiment 2a, k = 1") experiment_1_2_a(1, test_steps, test_blocks_GT, method_blocks_R) print("Experiment 2a, k = 5") experiment_1_2_a(5, test_steps, test_blocks_GT, method_blocks_R) print("Experiment 2a, k = 10") experiment_1_2_a(10, test_steps, test_blocks_GT, method_blocks_R) print("Experiment 2b") experiment_1_2_b(test_steps, test_blocks_GT, method_blocks_R)
class ExperimentTestCoverage: def __init__(self, path, model): self.rec = Recommender(path, model) self.test_scenarios = [] def conduct_experiment(self): self.prepare_experiment() automated_test_steps_GT, traced_requirements_GT, automated_test_steps_S, traced_requirements_S = self.run_experiment( ) self.visualize_experiment(automated_test_steps_GT, traced_requirements_GT, automated_test_steps_S, traced_requirements_S) def prepare_experiment(self): from data_container import ModelDataReader get_files = ModelDataReader.get_files test_scenarios_dir_path = "../data/advanced/evaluation_experiments/implemented_test_cases/experiments_test_scenarios/atomic/" test_scenarios_files = get_files(test_scenarios_dir_path, ".xlsx") for path in test_scenarios_files: test_scenario = self.load_evaluation_test_scenario(path) self.test_scenarios.append(test_scenario) def load_evaluation_test_scenario(self, path): from ast import literal_eval from data_container import ModelDataReader read_excel = ModelDataReader.read_excel tid = read_excel(path, "Test_Info", "Test ID", 0) title = read_excel(path, "Test_Info", "Test Title", 0) description = read_excel(path, "Test_Info", "Test Description", 0) step_ids = read_excel(path, "Steps_n_Blocks_Simple", "Step ID", 0) step_descriptions = read_excel(path, "Steps_n_Blocks_Simple", "Step Description", 0) step_expected_results = read_excel(path, "Steps_n_Blocks_Simple", "Expected Result", 0) step_test_blocks_names = read_excel(path, "Steps_n_Blocks_Simple", "Test Block Name", 0) step_test_block_params_n_vals = read_excel(path, "Steps_n_Blocks_Simple", "Test Block Parameters", 0) req_ids = read_excel(path, "Reqs", "Linked Req ID", 0) req_ids = [int(rid) for rid in req_ids] req_names = read_excel(path, "Reqs", "Linked Req Description", 0) return EvaluationTestScenario(tid, title, description, step_ids, step_descriptions, step_expected_results, step_test_blocks_names, step_test_block_params_n_vals, req_ids, req_names) def run_experiment(self): automated_test_steps_GT = [] traced_requirements_GT = [] automated_test_steps_S = [] traced_requirements_S = [] block_id = self.rec.data.test_blocks_names.index for test_scenario in self.test_scenarios: print(test_scenario.title) reqs_GT = [req.id for req in test_scenario.reqs_GT] reqs_R = self.rec.recommend_reqs_by_id(test_scenario, N=15, method='avg') print("reqs_GT", reqs_GT) print("reqs_R", reqs_R) reqs_count = 0 for req_GT in reqs_GT: if req_GT in reqs_R: reqs_count += 1 print(reqs_count) blocks_count = 0 for i, test_step in enumerate(test_scenario.steps): blocks_R = self.rec.find_top_blocks(test_step.description, N=15, method='avg', text_weight=0.9, param_weight=0.1) block_GT = block_id(test_scenario.blocks_GT[i][0].name) if block_GT in blocks_R: blocks_count += 1 automated_test_steps_GT.append(len(test_scenario.steps)) traced_requirements_GT.append(len(reqs_GT)) automated_test_steps_S.append(blocks_count) traced_requirements_S.append(reqs_count) return (automated_test_steps_GT, traced_requirements_GT, automated_test_steps_S, traced_requirements_S) def visualize_experiment(self, automated_test_steps_GT, traced_requirements_GT, automated_test_steps_S, traced_requirements_S): test_list = list(range(len(self.test_scenarios))) experiment_test_coverage(test_list, automated_test_steps_GT, traced_requirements_GT, automated_test_steps_S, traced_requirements_S)