def test_aig_file_vs_blame_degrees_table(self) -> None: """ Tests the latex booktabs format for the file vs. ci table. """ vara_cfg()["paper_config"][ "current_config"] = "test_diff_correlation_overview_table" initialize_projects() load_paper_config() # latex booktabs is default format table_str = AuthorBlameVsFileDegreesTable( TableConfig.from_kwargs(view=False), case_study=get_loaded_paper_config().get_case_studies( "xz")[0]).tabulate(TableFormat.LATEX_BOOKTABS, False) self.assertEqual( r"""\begin{tabular}{lrrrrr} \toprule {} & blame\_num\_commits & blame\_node\_degree & author\_diff & file\_num\_commits & file\_node\_degree \\ author & & & & & \\ \midrule Alexey Tourbin & NaN & NaN & NaN & 1 & 2 \\ Ben Boeckel & NaN & NaN & NaN & 1 & 2 \\ Jim Meyering & NaN & NaN & NaN & 1 & 2 \\ Lasse Collin & 124.0 & 0.0 & 0.0 & 479 & 6 \\ \bottomrule \end{tabular} """, table_str)
def test_bb_run_slurm_and_container(self, mock_sbatch, mock_build_images) -> None: runner = CliRunner() vara_cfg()['paper_config']['current_config'] = "test_revision_lookup" # needed so we see the paper config load_paper_config() # needed so benchbuild sees the paper config save_config() bb_cfg()["slurm"]["template"] = str( Path(str(vara_cfg()["benchbuild_root"])) / "slurm_container.sh.inc") save_bb_config() # Easiest way to configure slurm + container is 'vara-container' # As a side-effect, this command is now even more tested :) prepare_result = runner.invoke(driver_container.main, ["prepare-slurm"]) self.assertEqual(0, prepare_result.exit_code, prepare_result.exception) self.assertTrue(Path(str(bb_cfg()["slurm"]["template"])).exists()) result = runner.invoke(driver_run.main, ["--slurm", "--container", "-E", "JustCompile"]) self.assertEqual(0, result.exit_code, result.exception) self.assertTrue((Path(str(vara_cfg()["benchbuild_root"])) / "JustCompile-slurm.sh").exists())
def test_file_based_interaction_graph(self) -> None: """Test whether file-based interaction graphs are created correctly.""" vara_cfg()['paper_config']['current_config'] = "test_casestudy_status" load_paper_config() revision = newest_processed_revision_for_case_study( get_paper_config().get_case_studies("xz")[0], BlameReport ) assert revision blame_interaction_graph = create_file_based_interaction_graph( "xz", revision ) self.assertEqual(blame_interaction_graph.project_name, "xz") cig = blame_interaction_graph.commit_interaction_graph() self.assertEqual(482, len(cig.nodes)) self.assertEqual(16518, len(cig.edges)) aig = blame_interaction_graph.author_interaction_graph() self.assertEqual(4, len(aig.nodes)) self.assertEqual(6, len(aig.edges)) caig = blame_interaction_graph.commit_author_interaction_graph() self.assertEqual(486, len(caig.nodes)) self.assertEqual(509, len(caig.edges))
def test_artefacts_show(self) -> None: """Test whether `vara-art show` produces expected output.""" # setup config vara_cfg()['paper_config']['current_config'] = "test_artefacts_driver" load_paper_config() expected = r"""Artefact 'Paper Config Overview': artefact_type: plot artefact_type_version: 2 dry_run: false file_type: png name: Paper Config Overview output_dir: . plot_config: {} plot_generator: pc-overview-plot report_type: EmptyReport view: false """ # vara-art generate runner = CliRunner() result = runner.invoke(driver_artefacts.main, ["show", "Paper Config Overview"]) self.assertEqual(0, result.exit_code, result.exception) self.assertEqual(expected, result.stdout)
def test_store_artefact(self): """Test whether `vara-plot` can store artefacts.""" # setup config vara_cfg()['paper_config']['current_config'] = "test_artefacts_driver" load_paper_config() save_config() artefacts_file_path = get_paper_config().path / "artefacts.yaml" artefacts_file_path.unlink() # vara-plot runner = CliRunner() result = runner.invoke(driver_plot.main, [ "--save-artefact=PC Overview", "--plot-dir=foo", "pc-overview-plot", "--report-type=EmptyReport" ]) self.assertEqual(0, result.exit_code, result.exception) self.assertTrue(artefacts_file_path.exists()) # load new artefact file load_paper_config() artefacts = list(get_paper_config().artefacts) self.assertEqual(1, len(artefacts)) artefact = artefacts[0] self.assertIsInstance(artefact, PlotArtefact) self.assertEqual("PC Overview", artefact.name)
def test_vara_cs_cleanup_error(self): """Test vara-cs cleanup error.""" runner = CliRunner() vara_cfg()["paper_config"]["current_config"] = "test_cleanup_error" save_config() load_paper_config() importlib.reload(driver_casestudy) result = runner.invoke( driver_casestudy.main, ['cleanup', 'all', '--error'] ) self.assertEqual(0, result.exit_code, result.stdout) self.assertFalse( Path( vara_cfg()["result_dir"].value + "/brotli/CRE-CR-brotli-all-6c47009892_5d26c7ff-6d27-478f-bcd1" "-99e8e8e97f16_cerror.txt" ).exists() ) self.assertFalse( Path( vara_cfg()["result_dir"].value + "/brotli/CRE-CR-brotli-all-aaa4424d9b_5d26c7ff-6d27-478f-bcd1-" "99e8e8e97f16_failed.txt" ).exists() ) self.assertTrue( Path( vara_cfg()["result_dir"].value + "/brotli/CRE-CR-brotli-brotli-21ac39f7c8_34d4d1b5-7212-4244-" "9adc-b19bff599cf1_success.yaml" ).exists() )
def test_get_newest_result_files_for_case_study_fail(self) -> None: """Check that when we have two files, the newes one get's selected.""" vara_cfg()['paper_config']['current_config'] = "test_revision_lookup" load_paper_config() bad_file = ReportFilename( 'CRE-CR-brotli-brotli-21ac39f7c8_' '34d4d1b5-7212-4244-9adc-b19bff599cf1_success.yaml') now = datetime.now().timestamp() file_path = Path(str( vara_cfg()['result_dir'])) / 'brotli' / bad_file.filename os.utime(file_path, (now, now)) newest_res_files = MCS.get_newest_result_files_for_case_study( get_paper_config().get_case_studies('brotli')[0], Path(vara_cfg()['result_dir'].value), CR) # remove unnecessary files filtered_newest_res_files = list( filter( lambda res_file: res_file.commit_hash == bad_file.commit_hash, map(lambda res_file: ReportFilename(res_file), newest_res_files))) self.assertFalse(filtered_newest_res_files[0].uuid.endswith('42'))
def test_vara_cs_gen_to_extend_new_stage(self): """Test the extend-functionality of vara-cs gen.""" runner = CliRunner() vara_cfg()["paper_config"]["current_config"] = "test_ext" save_config() load_paper_config() old_commit = 'ef364d3abc5647111c5424ea0d83a567e184a23b' new_commit = '6c6da57ae2aa962aabde6892442227063d87e88c' result = runner.invoke( driver_casestudy.main, [ 'gen', '-p', 'xz', '--new-stage', '--merge-stage', 'test', 'select_specific', new_commit ] ) self.assertEqual(0, result.exit_code, result.exception) case_study_path = Path( vara_cfg()["paper_config"]["folder"].value + "/test_ext/xz_0.case_study" ) self.assertTrue(case_study_path.exists()) case_study = load_case_study_from_file(case_study_path) self.assertTrue( case_study.revisions.__contains__(FullCommitHash(old_commit)) ) self.assertTrue( case_study.revisions.__contains__(FullCommitHash(new_commit)) ) self.assertEqual(2, case_study.num_stages) self.assertEqual('test', case_study.stages[1].name)
def test_newest_processed_revision_no_results(self) -> None: """Check None is returned when no results are available.""" vara_cfg()['paper_config']['current_config'] = "test_revision_lookup" load_paper_config() newest_processed = MCS.newest_processed_revision_for_case_study( get_paper_config().get_case_studies('brotli')[0], CR) self.assertIsNone(newest_processed)
def test_get_revision_not_in_case_study(self) -> None: """Check if we correctly handle the lookup of a revision that is not in the case study.""" vara_cfg()['paper_config']['current_config'] = "test_revision_lookup" load_paper_config() self.assertRaises(ValueError, MCS.get_revision_status_for_case_study, get_paper_config().get_case_studies('brotli')[0], ShortCommitHash('0000000000'), CR)
def test_get_revisions_status_for_case_study_to_high_stage(self) -> None: """Check if we correctly handle look ups where the stage selected is larger than the biggest one in the case study.""" vara_cfg()['paper_config']['current_config'] = "test_revision_lookup" load_paper_config() self.assertListEqual( MCS.get_revisions_status_for_case_study( get_paper_config().get_case_studies('brotli')[0], CR, 9001), [])
def test_get_newest_result_files_for_case_study_with_empty_res_dir( self) -> None: """Check that we correctly handle the edge case where no result dir exists.""" vara_cfg()['paper_config']['current_config'] = "test_revision_lookup" load_paper_config() self.assertListEqual( MCS.get_newest_result_files_for_case_study( get_paper_config().get_case_studies('brotli')[0], Path(vara_cfg()['result_dir'].value), CR), [])
def test_get_revisions_in_case_study(self) -> None: """Check if we correctly handle the lookup of a revision that is in a case study.""" vara_cfg()['paper_config']['current_config'] = "test_revision_lookup" load_paper_config() self.assertEqual( MCS.get_revision_status_for_case_study( get_paper_config().get_case_studies('brotli')[0], ShortCommitHash('21ac39f7c8'), CR), FileStatusExtension.SUCCESS)
def test_newest_processed_revision(self) -> None: """Check whether the newest processed revision is correctly identified.""" vara_cfg()['paper_config']['current_config'] = "test_revision_lookup" load_paper_config() newest_processed = MCS.newest_processed_revision_for_case_study( get_paper_config().get_case_studies('brotli')[0], CR) self.assertEqual( FullCommitHash('21ac39f7c8ca61c855be0bc38900abe7b5a0f67f'), newest_processed)
def test_table_tex_output(self) -> None: """Check whether the table produces the correct tex output.""" vara_cfg()["paper_config"][ "current_config"] = "test_diff_correlation_overview_table" initialize_projects() load_paper_config() table = diff_correlation_overview_table.DiffCorrelationOverviewTable( TableConfig.from_kwargs(view=False)).tabulate( TableFormat.LATEX_BOOKTABS, False) with open("tables/b_diff_correlation_overview.tex") as expected: self.assertEqual(table, expected.read())
def test_get_failed_revisions(self) -> None: """Check if we can correctly find all failed revisions of a case study.""" vara_cfg()['paper_config']['current_config'] = "test_revision_lookup" load_paper_config() failed_revs = MCS.failed_revisions_for_case_study( get_paper_config().get_case_studies('brotli')[0], CR) self.assertEqual(len(failed_revs), 1) self.assertTrue( FullCommitHash('aaa4424d9bdeb10f8af5cb4599a0fc2bbaac5553') in failed_revs)
def test_get_processed_revisions(self) -> None: """Check if we can correctly find all processed revisions of a case study.""" vara_cfg()['paper_config']['current_config'] = "test_revision_lookup" load_paper_config() process_revs = MCS.processed_revisions_for_case_study( get_paper_config().get_case_studies('brotli')[0], CR) self.assertEqual(len(process_revs), 1) self.assertTrue( FullCommitHash('21ac39f7c8ca61c855be0bc38900abe7b5a0f67f') in process_revs)
def test_vara_pc_list(self): """Test the vara-pc list subcommand.""" runner = CliRunner() paper_configs = ["foo", "bar", "baz"] pc_path = Path(vara_cfg()["paper_config"]["folder"].value) for pc in paper_configs: (pc_path / pc).mkdir() vara_cfg()["paper_config"]["current_config"] = "foo" load_paper_config() result = runner.invoke(driver_paper_config.main, ["list"]) self.assertEqual( "Found the following paper_configs:\nbar\nbaz\nfoo *\n", result.output)
def test_artefacts_list(self) -> None: """Test whether `vara-art list` produces expected output.""" # setup config vara_cfg()['paper_config']['current_config'] = "test_artefacts_driver" load_paper_config() # vara-art generate runner = CliRunner() result = runner.invoke(driver_artefacts.main, ["list"]) self.assertEqual(0, result.exit_code, result.exception) self.assertEqual( "Paper Config Overview [plot]\nCorrelation Table [table]\n", result.stdout)
def test_bb_run_all(self) -> None: runner = CliRunner() vara_cfg()['paper_config']['current_config'] = "test_artefacts_driver" # needed so we see the paper config load_paper_config() # needed so benchbuild sees the paper config save_config() result = runner.invoke(driver_run.main, ["-p", "-E", "JustCompile"]) self.assertEqual(0, result.exit_code, result.exception) match = self.__NUM_ACTIONS_PATTERN.search(result.stdout) if not match: self.fail("Could not parse benchbuild output") self.assertEqual("51", match.group(1))
def test_cli_option_converter(self): """Test whether CLI option conversion works correctly.""" # setup config vara_cfg()['paper_config']['current_config'] = "test_artefacts_driver" load_paper_config() save_config() plot_generator = CaseStudyOverviewGenerator( PlotConfig.from_kwargs(view=False), report_type=EmptyReport, case_study=get_loaded_paper_config().get_case_studies("xz")[0]) artefact = PlotArtefact.from_generator("CS Overview", plot_generator, CommonPlotOptions.from_kwargs()) artefact_dict = artefact.get_dict() self.assertEqual("xz_0", artefact_dict["case_study"]) self.assertEqual("EmptyReport", artefact_dict["report_type"])
def test_vara_cs_cleanup_regex(self): """Test vara-cs cleanup regex.""" runner = CliRunner() vara_cfg()["paper_config"]["current_config"] = "test_cleanup_regex" save_config() load_paper_config() importlib.reload(driver_casestudy) result = runner.invoke( driver_casestudy.main, ['cleanup', 'regex', '-f', '.*'], 'y' ) self.assertEqual(0, result.exit_code, result.exception) self.assertFalse( Path( vara_cfg()["result_dir"].value + "/brotli/CRE-CR-brotli-all-6c47009892_5d26c7ff-6d27-478f-bcd1-" "99e8e8e97f16_cerror.txt" ).exists() ) self.assertFalse( Path( vara_cfg()["result_dir"].value + "/brotli/CRE-CR-brotli-all-aaa4424d9b_5d26c7ff-6d27-478f-bcd1-" "99e8e8e97f16_failed.txt" ).exists() ) self.assertFalse( Path( vara_cfg()["result_dir"].value + "/brotli/CRE-CR-brotli-brotli-21ac39f7c8_34d4d1b5-7212-4244-" "9adc-b19bff599cf1_success.yaml" ).exists() ) self.assertFalse( Path( vara_cfg()["result_dir"].value + "/brotli/CRE-CR-brotli-brotli-21ac39f7c8_34d4d1b5-7212-4244-" "9adc-b19bff599142_success.yaml" ).exists() ) self.assertTrue( Path( vara_cfg()["result_dir"].value + "/gravity/BVRE_NoOptTBAA-BVR_NoOpt_TBAA-gravity-gravity-" "b51227de55_8bc2ac4c-b6e3-43d1-aff9-c6b32126b155_success.txt" ).exists() )
def test_plot(self): """Test whether `vara-plot` generates a plot.""" # setup config vara_cfg()['paper_config']['current_config'] = "test_artefacts_driver" load_paper_config() save_config() plot_base_dir = Path(str(vara_cfg()['plots']['plot_dir'])) # vara-plot runner = CliRunner() result = runner.invoke(driver_plot.main, [ "--plot-dir=foo", "pc-overview-plot", "--report-type=EmptyReport" ]) self.assertEqual(0, result.exit_code, result.exception) self.assertTrue((plot_base_dir / "foo" / "paper_config_overview_plot.svg").exists())
def test_vara_pc_select(self): """Test the vara-pc select subcommand.""" runner = CliRunner() paper_configs = ["foo", "bar", "baz"] pc_path = Path(vara_cfg()["paper_config"]["folder"].value) for pc in paper_configs: (pc_path / pc).mkdir() vara_cfg()["paper_config"]["current_config"] = "foo" load_paper_config() result = runner.invoke(driver_paper_config.main, ["select"], input="1") assert not result.exception self.assertEqual( "0. bar\n1. baz\n2. foo *\n" "Choose a number to select a paper config (default=0): ", result.output) self.assertEqual("baz", vara_cfg()["paper_config"]["current_config"].value)
def test_one_case_study_latex_booktabs(self) -> None: """"Tests the latex booktabs format for the cs overview table.""" vara_cfg()["paper_config"]["current_config"] = "test_revision_lookup" initialize_projects() load_paper_config() # latex booktabs is default format table_str = CaseStudyMetricsTable(TableConfig.from_kwargs( view=False)).tabulate(TableFormat.LATEX_BOOKTABS, False) self.assertEqual( r"""\begin{tabular}{llrrrl} \toprule {} & Domain & LOC & Commits & Authors & Revision \\ \midrule \textbf{brotli} & Compression & 34639 & 848 & 40 & aaa4424d9b \\ \bottomrule \end{tabular} """, table_str)
def test_artefacts_generate(self) -> None: """Test whether `vara-art generate` generates all expected files.""" # setup config vara_cfg()['paper_config']['current_config'] = "test_artefacts_driver" load_paper_config() artefacts = get_paper_config().get_all_artefacts() base_output_dir = Artefact.base_output_dir() # vara-art generate runner = CliRunner() result = runner.invoke(driver_artefacts.main, ["generate"]) self.assertEqual(0, result.exit_code, result.exception) # check that overview files are present self.assertTrue((base_output_dir / "index.html").exists()) self.assertTrue((base_output_dir / "plot_matrix.html").exists()) # check that artefact files are present for artefact in artefacts: self.__check_artefact_files_present(artefact)
def test_get_author_data(self) -> None: """Check whether author data is retrieved correctly from the author interaction graph.""" vara_cfg()['paper_config']['current_config'] = "test_casestudy_status" load_paper_config() revision = newest_processed_revision_for_case_study( get_paper_config().get_case_studies("xz")[0], BlameReport ) assert revision blame_interaction_graph = create_blame_interaction_graph("xz", revision) self.assertEqual(blame_interaction_graph.project_name, "xz") aig = blame_interaction_graph.author_interaction_graph() author_data = get_author_data(aig, "Lasse Collin") self.assertEqual(author_data["node_attrs"]["author"], "Lasse Collin") self.assertEqual(author_data["neighbors"], set()) self.assertEqual(0, len(author_data["in_attrs"])) self.assertEqual(0, len(author_data["out_attrs"]))
def test_multiple_case_studies_latex_booktabs(self) -> None: """"Tests the latex booktabs format for the cs overview table.""" vara_cfg()["paper_config"]["current_config"] = "test_artefacts_driver" initialize_projects() load_paper_config() # latex booktabs is default format table_str = CaseStudyMetricsTable(TableConfig.from_kwargs( view=False)).tabulate(TableFormat.LATEX_BOOKTABS, False) self.assertEqual( r"""\begin{tabular}{llrrrl} \toprule {} & Domain & LOC & Commits & Authors & Revision \\ \midrule \textbf{gravity} & Programming language & 22923 & 663 & 39 & 2c71dec8ad \\ \textbf{xz } & Compression & 37021 & 1143 & 16 & c5c7ceb08a \\ \bottomrule \end{tabular} """, table_str)
def test_vara_cs_status(self): """Test for vara-cs status.""" runner = CliRunner() vara_cfg()["paper_config"]["current_config"] = "test_status" save_config() load_paper_config() result = runner.invoke(driver_casestudy.main, ['status', 'JustCompile']) self.assertEqual(0, result.exit_code, result.exception) self.assertEqual( "CS: xz_0: ( 0/5) processed [0/0/0/0/3/2]\n" " c5c7ceb08a [Missing]\n" " ef364d3abc [Missing]\n" " 2f0bc9cd40 [Missing]\n" " 7521bbdc83 [Blocked]\n" " 10437b5b56 [Blocked]\n\n" "---------------------------------------------" "-----------------------------------\n" "Total: ( 0/5) processed [0/0/0/0/3/2]\n", result.stdout )
def test_caig_metrics_table(self) -> None: """Tests the latex booktabs format for the caig metrics table.""" vara_cfg()["paper_config"][ "current_config"] = "test_diff_correlation_overview_table" initialize_projects() load_paper_config() # latex booktabs is default format table_str = CommitAuthorInteractionGraphMetricsTable( TableConfig.from_kwargs(view=False), case_study=get_loaded_paper_config().get_all_case_studies( )).tabulate(TableFormat.LATEX_BOOKTABS, False) self.assertEqual( r"""\begin{tabular}{lrrrrrrrrrrrrrr} \toprule {} & commits & authors & nodes & edges & \multicolumn{4}{c}{node degree} & \multicolumn{3}{c}{node out degree} & \multicolumn{3}{c}{node in degree} \\ {} & mean & median & min & max & median & min & max & median & min & max \\ \midrule \textbf{xz} & 1143 & 28 & 125 & 92 & 1.47 & 1.0 & 0 & 92 & 1.0 & 0 & 1 & 0.0 & 0 & 92 \\ \bottomrule \end{tabular} """, table_str)