def test_aig_file_vs_blame_degrees_table(self) -> None:
        """
        Tests the latex booktabs format for the file vs.

        ci table.
        """
        vara_cfg()["paper_config"][
            "current_config"] = "test_diff_correlation_overview_table"
        initialize_projects()
        load_paper_config()

        # latex booktabs is default format
        table_str = AuthorBlameVsFileDegreesTable(
            TableConfig.from_kwargs(view=False),
            case_study=get_loaded_paper_config().get_case_studies(
                "xz")[0]).tabulate(TableFormat.LATEX_BOOKTABS, False)

        self.assertEqual(
            r"""\begin{tabular}{lrrrrr}
\toprule
{} &  blame\_num\_commits &  blame\_node\_degree &  author\_diff &  file\_num\_commits &  file\_node\_degree \\
author         &                    &                    &              &                   &                   \\
\midrule
Alexey Tourbin &                NaN &                NaN &          NaN &                 1 &                 2 \\
Ben Boeckel    &                NaN &                NaN &          NaN &                 1 &                 2 \\
Jim Meyering   &                NaN &                NaN &          NaN &                 1 &                 2 \\
Lasse Collin   &              124.0 &                0.0 &          0.0 &               479 &                 6 \\
\bottomrule
\end{tabular}
""", table_str)
Exemplo n.º 2
0
 def __init__(self):
     super().__init__()
     self.setAttribute(Qt.WA_DeleteOnClose)
     self.selected_commit = None
     self.setupUi(self)
     initialize_projects()
     self.proxy_model = CommitTableFilterModel()
     self.proxy_model.setFilterCaseSensitivity(Qt.CaseInsensitive)
     self.revision_list.setModel(self.proxy_model)
     self.selected_project = None
     self.revision_list_project = None
     self.update_project_list()
     self.project_list.clicked['QModelIndex'].connect(
         self.show_project_data)
     self.sampling_method.addItems([
         x.name()
         for x in NormalSamplingMethod.normal_sampling_method_types()
     ])
     self.strategie_forms.setCurrentIndex(
         GenerationStrategie.SELECT_REVISION.value)
     self.revision_list.clicked.connect(self.show_revision_data)
     self.select_specific.clicked.connect(self.revisions_of_project)
     self.sample.clicked.connect(self.sample_view)
     self.per_year.clicked.connect(self.revs_per_year_view)
     self.generate.clicked.connect(self.gen)
     self.project_search.textChanged.connect(self.update_project_list)
     self.revision_list.horizontalHeader().sortIndicatorChanged.connect(
         self.revision_list.sortByColumn)
     self.commit_search.textChanged.connect(
         self.proxy_model.setFilterFixedString)
     self.show()
Exemplo n.º 3
0
def main() -> None:
    """
    Main function for working with artefacts.

    `vara-art`
    """
    initialize_cli_tool()
    initialize_projects()
    initialize_reports()
    initialize_tables()
    initialize_plots()
    initialize_artefact_types()
    def test_table_tex_output(self) -> None:
        """Check whether the table produces the correct tex output."""
        vara_cfg()["paper_config"][
            "current_config"] = "test_diff_correlation_overview_table"
        initialize_projects()
        load_paper_config()
        table = diff_correlation_overview_table.DiffCorrelationOverviewTable(
            TableConfig.from_kwargs(view=False)).tabulate(
                TableFormat.LATEX_BOOKTABS, False)

        with open("tables/b_diff_correlation_overview.tex") as expected:
            self.assertEqual(table, expected.read())
Exemplo n.º 5
0
def main(context: click.Context, **kwargs: tp.Any) -> None:
    """Entry point for the plot generation tool."""
    # store common options in context so they can be passed to subcommands
    common_options = CommonPlotOptions.from_kwargs(**kwargs)
    plot_config = PlotConfig.from_kwargs(**kwargs)
    context.ensure_object(dict)
    context.obj["common_options"] = common_options
    context.obj["plot_config"] = plot_config
    context.obj["save_artefact"] = kwargs["save_artefact"]

    initialize_cli_tool()
    initialize_projects()
    initialize_tables()
    initialize_plots()
    def test_one_case_study_latex_booktabs(self) -> None:
        """"Tests the latex booktabs format for the cs overview table."""
        vara_cfg()["paper_config"]["current_config"] = "test_revision_lookup"
        initialize_projects()
        load_paper_config()

        # latex booktabs is default format
        table_str = CaseStudyMetricsTable(TableConfig.from_kwargs(
            view=False)).tabulate(TableFormat.LATEX_BOOKTABS, False)

        self.assertEqual(
            r"""\begin{tabular}{llrrrl}
\toprule
{} &       Domain &    LOC &  Commits &  Authors &    Revision \\
\midrule
\textbf{brotli} &  Compression &  34639 &      848 &       40 &  aaa4424d9b \\
\bottomrule
\end{tabular}
""", table_str)
    def test_multiple_case_studies_latex_booktabs(self) -> None:
        """"Tests the latex booktabs format for the cs overview table."""
        vara_cfg()["paper_config"]["current_config"] = "test_artefacts_driver"
        initialize_projects()
        load_paper_config()

        # latex booktabs is default format
        table_str = CaseStudyMetricsTable(TableConfig.from_kwargs(
            view=False)).tabulate(TableFormat.LATEX_BOOKTABS, False)

        self.assertEqual(
            r"""\begin{tabular}{llrrrl}
\toprule
{} &                Domain &    LOC &  Commits &  Authors &    Revision \\
\midrule
\textbf{gravity} &  Programming language &  22923 &      663 &       39 &  2c71dec8ad \\
\textbf{xz     } &           Compression &  37021 &     1143 &       16 &  c5c7ceb08a \\
\bottomrule
\end{tabular}
""", table_str)
    def setUp(cls) -> None:
        """Initialize projects and set up report paths, a case study, and a
        commit map."""

        initialize_projects()

        cls.br_paths_list = [
            TEST_INPUTS_DIR / Path(
                "results/xz/BRE-BR-xz-xz-2f0bc9cd40"
                "_9e238675-ee7c-4325-8e9f-8ccf6fd3f05c_success.yaml"
            ), TEST_INPUTS_DIR / Path(
                "results/xz/BRE-BR-xz-xz-c5c7ceb08a"
                "_77a6c5bc-e5c7-4532-8814-70dbcc6b5dda_success.yaml"
            ), TEST_INPUTS_DIR / Path(
                "results/xz/BRE-BR-xz-xz-ef364d3abc"
                "_feeeecb2-1826-49e5-a188-d4d883f06d00_success.yaml"
            ), TEST_INPUTS_DIR / Path(
                "results/TwoLibsOneProjectInteractionDiscreteLibsSingleProject/"
                "BRE-BR-TwoLibsOneProjectInteractionDiscreteLibsSingleProject-"
                "elementalist-5e8fe1616d_11ca651c-2d41-42bd-aa4e-8c37ba67b75f"
                "_success.yaml"
            ), TEST_INPUTS_DIR / Path(
                "results/TwoLibsOneProjectInteractionDiscreteLibsSingleProject/"
                "BRE-BR-TwoLibsOneProjectInteractionDiscreteLibsSingleProject-"
                "elementalist-e64923e69e_0b22c10c-4adb-4885-b3d2-416749b53aa8"
                "_success.yaml"
            )
        ]

        cls.case_study = load_case_study_from_file(
            TEST_INPUTS_DIR / Path(
                "paper_configs/test_blame_diff_metrics_database/"
                "TwoLibsOneProjectInteractionDiscreteLibsSingleProject_0."
                "case_study"
            )
        )

        cls.commit_map = get_commit_map(
            "TwoLibsOneProjectInteractionDiscreteLibsSingleProject"
        )
    def test_caig_metrics_table(self) -> None:
        """Tests the latex booktabs format for the caig metrics table."""
        vara_cfg()["paper_config"][
            "current_config"] = "test_diff_correlation_overview_table"
        initialize_projects()
        load_paper_config()

        # latex booktabs is default format
        table_str = CommitAuthorInteractionGraphMetricsTable(
            TableConfig.from_kwargs(view=False),
            case_study=get_loaded_paper_config().get_all_case_studies(
            )).tabulate(TableFormat.LATEX_BOOKTABS, False)

        self.assertEqual(
            r"""\begin{tabular}{lrrrrrrrrrrrrrr}
\toprule
{} & commits & authors & nodes & edges & \multicolumn{4}{c}{node degree} & \multicolumn{3}{c}{node out degree} & \multicolumn{3}{c}{node in degree} \\
{} &        mean & median & min & max &          median & min & max &         median & min & max \\
\midrule
\textbf{xz} &    1143 &      28 &   125 &    92 &        1.47 &    1.0 &   0 &  92 &             1.0 &   0 &   1 &            0.0 &   0 &  92 \\
\bottomrule
\end{tabular}
""", table_str)
    def test_one_case_study_latex_booktabs(self) -> None:
        """Tests the latex booktabs format for the code centrality metrics
        table."""
        vara_cfg()["paper_config"][
            "current_config"] = "test_diff_correlation_overview_table"
        initialize_projects()
        load_paper_config()

        # latex booktabs is default format
        table_str = TopCentralCodeCommitsTable(
            TableConfig.from_kwargs(view=False),
            case_study=get_loaded_paper_config().get_case_studies("xz")[0],
            num_commits=10).tabulate(TableFormat.LATEX_BOOKTABS, False)

        self.assertEqual(
            r"""\begin{table}
\centering
\caption{Top 10 Central Code Commits}
\begin{tabular}{lr}
\toprule
                                  commit &  centrality \\
\midrule
ef68dd4a92976276304de2aedfbe34ae91a86abb &          28 \\
57597d42ca1740ad506437be168d800a50f1a0ad &          16 \\
ea00545beace5b950f709ec21e46878e0f448678 &          16 \\
7f0a4c50f4a374c40acf4b86848f301ad1e82d34 &          15 \\
c15c42abb3c8c6e77c778ef06c97a4a10b8b5d00 &          15 \\
fa3ab0df8ae7a8a1ad55b52266dc0fd387458671 &          10 \\
1d924e584b146136989f48c13fff2632896efb3d &           9 \\
d8b41eedce486d400f701b757b7b5e4e32276618 &           8 \\
1b0ac0c53c761263e91e34195cb21dfdcfeac0bd &           6 \\
e0ea6737b03e83ccaff4514d00e31bb926f8f0f3 &           6 \\
\bottomrule
\end{tabular}
\end{table}
""", table_str)
Exemplo n.º 11
0
def main(
    verbose: int,
    slurm: bool,
    submit: bool,
    container: bool,
    experiment: str,
    projects: tp.List[str],
    pretend: bool,
) -> None:
    """
    Run benchbuild experiments.

    Runs on all projects in the current paper config by default. You can
    restrict this to only certain projects or even revisions using BenchBuild-
    style project selectors: <project>[@<revision>]
    """
    # pylint: disable=too-many-branches
    initialize_cli_tool()
    initialize_projects()

    bb_command_args: tp.List[str] = ["--force-watch-unbuffered"]
    bb_extra_args: tp.List[str] = []

    if sys.stdout.isatty():
        bb_command_args.append("--force-tty")

    if verbose:
        bb_command_args.append("-" + ("v" * verbose))

    if pretend:
        click.echo("Running in pretend mode. No experiments will be executed.")
        # benchbuild only supports pretend in the normal run command
        slurm = False
        container = False

    if slurm:
        bb_command_args.append("slurm")

    if container:
        if slurm:
            if not __is_slurm_prepared():
                click.echo("It seems like benchbuild is not properly "
                           "configured for slurm + containers. "
                           "Please run 'vara-container prepare-slurm' first.")
                sys.exit(1)
            bb_extra_args = ["--", "container", "run"]
            if bb_cfg()["container"]["import"].value:
                bb_extra_args.append("--import")
        else:
            bb_command_args.append("container")

    if not slurm:
        bb_command_args.append("run")

    if pretend:
        bb_command_args.append("-p")

    if not projects:
        projects = list({
            cs.project_name
            for cs in get_paper_config().get_all_case_studies()
        })

    bb_args = list(
        itertools.chain(bb_command_args, ["-E", experiment], projects,
                        bb_extra_args))

    with local.cwd(vara_cfg()["benchbuild_root"].value):
        try:
            with benchbuild[bb_args].bgrun(stdout=PIPE,
                                           stderr=PIPE) as bb_proc:
                try:
                    _, stdout, _ = tee(bb_proc)
                except KeyboardInterrupt:
                    # wait for BB to complete when Ctrl-C is pressed
                    retcode, _, _ = tee(bb_proc)
                    sys.exit(retcode)
        except ProcessExecutionError:
            sys.exit(1)

    if slurm:
        match = __SLURM_SCRIPT_PATTERN.search(stdout)
        if match:
            slurm_script = match.group(1)
            if submit:
                click.echo(
                    f"Submitting slurm script via sbatch: {slurm_script}")
                sbatch(slurm_script)
            else:
                click.echo(f"Run the following command to submit the slurm:\n"
                           f"sbatch {slurm_script}")
        else:
            click.echo("Could not find slurm script.")
            sys.exit(1)
Exemplo n.º 12
0
def main() -> None:
    """Allow easier management of case studies."""
    initialize_cli_tool()
    initialize_projects()
    initialize_reports()
Exemplo n.º 13
0
 def setUpClass(cls):
     initialize_projects()
Exemplo n.º 14
0
import varats.plot.plot  # isort:skip
import varats.table.table  # isort:skip
import varats.containers.containers  # isort:skip

tp.TYPE_CHECKING = False

# set the type checking flag so all types can be resolved in the docs
set_type_checking_flag = True

# -- Prevent import warnings -------------------------------------------------

benchbuild.utils.LOG.setLevel(logging.ERROR)

# -- Generate files ----------------------------------------------------------

from pathlib import Path

from varats.projects.discover_projects import initialize_projects
from varats.ts_utils.doc_util import (
    generate_project_overview_table_file,
    generate_projects_autoclass_files,
    generate_vara_install_requirements,
)

initialize_projects()

generate_project_overview_table_file(
    Path("vara-ts-api/ProjectOverviewTable.inc"))
generate_projects_autoclass_files(Path("vara-ts-api"))
generate_vara_install_requirements(Path("vara-ts"))
Exemplo n.º 15
0
 def setUp(cls) -> None:
     """Initialize all projects before running tests."""
     initialize_projects()