def _verifier_plot_multiple(
    default_fig_suptitle: str, plot_config: PlotConfig,
    final_plot_data: tp.List[tp.Tuple[str, tp.Dict[str, tp.Any]]]
) -> None:
    fig = plt.figure()
    main_axis = fig.subplots()
    main_axis.set_xlim(0, 1)
    main_axis.grid(linestyle='--')
    main_axis.set_xlabel('Revisions normalized')
    main_axis.set_ylabel('Success rate in %')
    main_axis.yaxis.set_major_formatter(mtick.PercentFormatter(1.0))
    fig.subplots_adjust(top=0.95, hspace=0.05, right=0.95, left=0.07)
    mean_over_all_project_successes = 0

    for plot_data in final_plot_data:
        mean_over_all_project_successes += plot_data[1]["average_success_ratio"
                                                       ] / len(final_plot_data)

        # Save an unique int for each varying revision to prepare the data
        # for the normalization on the x-axis
        revisions_as_numbers: npt.NDArray[np.int_] = np.array([
            x + 1 for x, y in enumerate(plot_data[1]["revisions"])
        ]).reshape(-1, 1)

        normalized_revisions = preprocessing.minmax_scale(
            revisions_as_numbers, (0, 1), axis=0, copy=False
        )
        main_axis.plot(
            normalized_revisions,
            plot_data[1]["success_ratio"],
            label=
            f"{plot_data[0]}(\u2205 {plot_data[1]['average_success_ratio']}%)"
        )

    main_axis.title.set_text(f"{plot_config.fig_title(default_fig_suptitle)}")

    plt.setp(
        main_axis.get_xticklabels(), rotation=30, horizontalalignment='right'
    )

    legend = main_axis.legend(
        title=f"{plot_config.legend_title('Success rate of projects')}"
        f"(\u2205 {round(mean_over_all_project_successes, 2)}%):",
        loc='upper left',
        prop={
            'size': plot_config.legend_size(),
            'family': 'monospace'
        }
    )
    legend.set_visible(plot_config.show_legend())

    plt.setp(
        legend.get_title(),
        fontsize=plot_config.legend_size(),
        family='monospace'
    )
Example #2
0
        def command_template(context: click.Context, **kwargs: tp.Any) -> None:
            # extract common arguments and plot config from context
            plot_config: PlotConfig = PlotConfig(False)
            try:
                generator_instance = generator_cls(plot_config, **kwargs)
                plots = generator_instance.generate()
                plot = plots[0]
                if len(plots) > 1:

                    def set_plot(selected_plot: Plot) -> None:
                        nonlocal plot
                        plot = selected_plot

                    cli_list_choice(
                        "The given plot generator creates multiple plots"
                        " please select one:", plots, lambda p: p.name,
                        set_plot)
                cmap = create_lazy_commit_map_loader(context.obj['project'],
                                                     None, 'HEAD', None)()
                extend_with_smooth_revs(context.obj['case_study'], cmap,
                                        context.obj['boundary_gradient'],
                                        context.obj['ignore_blocked'], plot,
                                        context.obj['merge_stage'])
                store_case_study(context.obj['case_study'],
                                 context.obj['path'])
            except PlotGeneratorFailed as ex:
                print(f"Failed to create plot generator {generator_cls.NAME}: "
                      f"{ex.message}")
Example #3
0
    def test_options_accessors(self):
        """Check that all plot config options have accessors."""
        config = PlotConfig.from_kwargs(view=False)

        for name, option in PlotConfig._option_decls.items():
            self.assertTrue(
                hasattr(PlotConfig, name),
                f"Plot config is missing an accessor for the option '{name}'")

            accessor_fn = config.__getattribute__(name)
            signature = inspect.signature(accessor_fn)

            self.assertFalse(
                option.view_default
                and "view_default" not in signature.parameters,
                f"Plot config option {name} has view_default set but "
                f"accessor does not allow overriding. Either remove "
                f"view_default or use PCOGetterV for the accessor.")
            self.assertFalse(
                not option.view_default
                and "view_default" in signature.parameters,
                f"Plot config option {name} has no view_default set but "
                f"accessor allows overriding view_default. Either add a "
                f"view_default to the option or use PCOGetter for the "
                f"accessor.")
Example #4
0
    def test_get_dict(self):
        """Check that dict only contains options with set values."""
        config = PlotConfig.from_kwargs(view=False, label_size=1)
        config_dict = config.get_dict()
        self.assertIn("label_size", config_dict)
        self.assertEqual(1, config_dict["label_size"])

        self.assertNotIn("x_tick_size", config_dict)
def _verifier_plot_single(
    default_fig_suptitle: str, plot_config: PlotConfig,
    final_plot_data: tp.Tuple[str, tp.Dict[str, tp.Any]]
) -> None:
    fig, main_axis = plt.subplots()
    fig.suptitle(default_fig_suptitle, fontsize=plot_config.font_size(8))
    main_axis.grid(linestyle='--')
    main_axis.set_xlabel('Revisions')
    main_axis.set_ylabel('Success/Failure rate in %')
    main_axis.yaxis.set_major_formatter(mtick.PercentFormatter(1.0))
    fig.subplots_adjust(top=0.95, hspace=0.05, right=0.95, left=0.07)

    main_axis.stackplot(
        final_plot_data[1]["revisions"],
        final_plot_data[1]["success_ratio"],
        final_plot_data[1]["failure_ratio"],
        labels=[
            f"successes(\u2205 {final_plot_data[1]['average_success_ratio']}%)",
            f"failures(\u2205 {final_plot_data[1]['average_failure_ratio']}%)"
        ],
        colors=[SUCCESS_COLOR, FAILED_COLOR],
        alpha=0.5
    )

    plt.setp(
        main_axis.get_xticklabels(), rotation=30, horizontalalignment='right'
    )

    legend = main_axis.legend(
        title=plot_config.legend_title("Annotation types:"),
        loc='upper left',
        prop={
            'size': plot_config.legend_size(),
            'family': 'monospace'
        }
    )
    legend.set_visible(plot_config.show_legend)

    plt.setp(
        legend.get_title(),
        fontsize=plot_config.legend_size(),
        family='monospace'
    )
Example #6
0
def main(context: click.Context, **kwargs: tp.Any) -> None:
    """Entry point for the plot generation tool."""
    # store common options in context so they can be passed to subcommands
    common_options = CommonPlotOptions.from_kwargs(**kwargs)
    plot_config = PlotConfig.from_kwargs(**kwargs)
    context.ensure_object(dict)
    context.obj["common_options"] = common_options
    context.obj["plot_config"] = plot_config
    context.obj["save_artefact"] = kwargs["save_artefact"]

    initialize_cli_tool()
    initialize_projects()
    initialize_tables()
    initialize_plots()
Example #7
0
    def test_cli_option_converter(self):
        """Test whether CLI option conversion works correctly."""
        # setup config
        vara_cfg()['paper_config']['current_config'] = "test_artefacts_driver"
        load_paper_config()
        save_config()

        plot_generator = CaseStudyOverviewGenerator(
            PlotConfig.from_kwargs(view=False),
            report_type=EmptyReport,
            case_study=get_loaded_paper_config().get_case_studies("xz")[0])
        artefact = PlotArtefact.from_generator("CS Overview", plot_generator,
                                               CommonPlotOptions.from_kwargs())
        artefact_dict = artefact.get_dict()
        self.assertEqual("xz_0", artefact_dict["case_study"])
        self.assertEqual("EmptyReport", artefact_dict["report_type"])
Example #8
0
 def test_view_default_override(self):
     """Test if passed view_default is used over default in view mode."""
     config = PlotConfig.from_kwargs(view=True)
     self.assertEqual(5, config.height(default=4, view_default=5))
Example #9
0
 def test_no_view_default_override(self):
     """Test if passed default is used over view_default in non-view mode."""
     config = PlotConfig.from_kwargs(view=False)
     self.assertEqual(4, config.height(default=4, view_default=5))
Example #10
0
 def test_view_get_option_value_set_no_override(self):
     """Test if passed default does not override set value in view mode."""
     config = PlotConfig.from_kwargs(view=True, height=42)
     self.assertEqual(42, config.height(view_default=5))
Example #11
0
 def test_view_get_option_value_not_set_override(self):
     """Test if passed default overrides global default in view mode."""
     config = PlotConfig.from_kwargs(view=True, show_legend=True)
     self.assertEqual(42, config.legend_size(view_default=42))
Example #12
0
 def test_view_get_option_value_set(self):
     """Test if a set value overrides the default value in view mode."""
     config = PlotConfig.from_kwargs(view=True, fig_title="Test", width=1)
     self.assertEqual("Test", config.fig_title())
     self.assertEqual(1, config.width())
     self.assertEqual(1000, config.height())
Example #13
0
 def test_view_get_option_value_not_set(self):
     """Test if default value is returned in view mode."""
     config = PlotConfig.from_kwargs(view=True)
     self.assertEqual(10, config.font_size())
Example #14
0
 def test_get_option_value_set(self):
     """Test if a set value overrides the default value."""
     config = PlotConfig.from_kwargs(view=False, fig_title="Test")
     self.assertEqual("Test", config.fig_title())
     self.assertEqual(1500, config.width())
Example #15
0
def _plot_overview_graph(results: tp.Dict[str, tp.Any],
                         plot_config: PlotConfig) -> None:
    """
    Create a plot that shows an overview of all case-studies of a paper-config
    about how many revisions are successful per project/year.

    Args:
        results: the results data as generated by `_gen_overview_plot()`
    """
    num_years = len(results['year_range'])
    num_projects = len(results['project_names'])

    revs_successful: npt.NDArray[tp.Any] = np.asarray(
        results['revs_successful'])
    revs_blocked: npt.NDArray[tp.Any] = np.asarray(results['revs_blocked'])
    revs_total: npt.NDArray[tp.Any] = np.asarray(results['revs_total'])

    # We want to interpolate three values/colors in the heatmap.
    # As seaborn's heatmap does not allow this, we manually compute the colors
    # for all entries and create a discrete color map from these colors.
    # The entries of the heatmap are then simply the indices of the data
    # mapped to the range [0,1].

    # the +0.5 is needed to prevent floating point precision issues
    revs_success_ratio: npt.NDArray[np.float64] = np.asarray([
        i + 0.5 if t > 0 else np.nan
        for i, t in enumerate(revs_total.flatten())
    ])
    revs_success_ratio = revs_success_ratio / len(revs_success_ratio)
    revs_success_ratio = revs_success_ratio.reshape(num_projects, num_years)

    def to_color(n_success: float, n_blocked: float,
                 n_total: float) -> npt.NDArray[np.float64]:
        f_success = n_success / float(n_total)
        f_blocked = n_blocked / float(n_total)
        f_failed = 1.0 - f_success - f_blocked
        return np.asarray(f_success * SUCCESS_COLOR +
                          f_blocked * BLOCKED_COLOR + f_failed * FAILED_COLOR)

    colors = [
        to_color(revs_successful, revs_blocked, revs_total)
        for revs_successful, revs_blocked, revs_total in zip(
            revs_successful.flatten(), revs_blocked.flatten(),
            revs_total.flatten())
    ]

    labels: npt.NDArray[np.str_] = (np.asarray([
        f"{revs_successful:1.0f}/{revs_blocked:1.0f}\n{revs_total:1.0f}"
        for revs_successful, revs_blocked, revs_total in zip(
            revs_successful.flatten(), revs_blocked.flatten(),
            revs_total.flatten())
    ])).reshape(num_projects, num_years)

    # Note: See the following URL for this size calculation:
    # https://stackoverflow.com/questions/51144934/how-to-increase-the-cell-size-for-annotation-in-seaborn-heatmap

    fontsize_pt = 12

    # compute the matrix height in points and inches
    matrix_height_pt = fontsize_pt * num_projects * 40
    matrix_height_in = matrix_height_pt / plot_config.dpi()

    # compute the required figure height
    top_margin = 0.05
    bottom_margin = 0.10
    figure_height = matrix_height_in / (1 - top_margin - bottom_margin)

    # build the figure instance with the desired height
    plt.subplots(figsize=(18, figure_height),
                 gridspec_kw=dict(top=(1 - top_margin), bottom=bottom_margin))

    ax = sb.heatmap(revs_success_ratio,
                    annot=labels,
                    fmt='',
                    cmap=colors,
                    xticklabels=results['year_range'],
                    yticklabels=results['project_names'],
                    linewidths=.5,
                    vmin=0,
                    vmax=1,
                    cbar=False,
                    square=True)

    legend_entries = [
        Patch(facecolor=SUCCESS_COLOR),
        Patch(facecolor=BLOCKED_COLOR),
        Patch(facecolor=FAILED_COLOR),
    ]
    ax.legend(legend_entries,
              ['Success (top left)', 'Blocked (top right)', 'Failed/Missing'],
              loc='upper left',
              bbox_to_anchor=(1, 1))