def publish_results(self, document: Document, docs_folder: str,
                     measurement_manager: MeasurementManager,
                     topology_parameters: List[str]):
     document.add(Heading(self.template_params.experiment_name))
     for title in measurement_manager.single_run_measurements[
             0].get_item_names():
         if title not in [
                 "initial training error", "initial testing error",
                 "untraining error", "retraining error", "testing error"
         ]:
             continue
         data = measurement_manager.get_values_from_all_runs(title)
         n_runs = len(data)
         if self.template_params.every_second_is_baseline:  # TODO MS
             labels = (n_runs // 2) * ["experiment", "baseline"]
         else:
             labels = n_runs * [""]
         plot_multiple_runs(list(range(len(data[0]))),
                            data,
                            ylabel="error",
                            xlabel="steps",
                            labels=labels,
                            title=title,
                            smoothing_window_size=21,
                            path=path.join(docs_folder, title),
                            doc=document)
Exemplo n.º 2
0
    def publish_results(self, document: Document, docs_folder: str,
                        measurement_manager: MeasurementManager,
                        topology_parameters: List[str]):
        steps = measurement_manager.get_values_from_all_runs('current_step')
        labels = topology_parameters

        multi_config_rewards = measurement_manager.get_custom_data_from_all_runs(
            'average_rewards')

        for single_config_multi_run_rewards in multi_config_rewards:
            num_runs = len(single_config_multi_run_rewards[0])
            single_runs = [[] for _ in range(num_runs)]
            for timestep in single_config_multi_run_rewards:
                for i in range(num_runs):
                    single_runs[i].append(timestep[i])

            max_value = max(max(run) for run in single_runs)

            for i, timeseries in enumerate(single_runs):
                title = f"average_reward_run_{i}"
                plot_multiple_runs(steps,
                                   timeseries,
                                   ylabel=f"average_reward_run_{i}",
                                   xlabel="steps",
                                   ylim=[0, max_value],
                                   labels=labels,
                                   title=title,
                                   path=path.join(docs_folder, title),
                                   doc=document)

            means = []
            mins = []
            maxes = []
            for timestep in single_config_multi_run_rewards:
                means.append(np.mean(timestep))
                mins.append(np.min(timestep))
                maxes.append(np.max(timestep))

            title = "all_average_rewards"
            plot_multiple_runs(steps,
                               means,
                               y_lower=mins,
                               y_upper=maxes,
                               ylabel="average_reward",
                               xlabel="steps",
                               labels=labels,
                               title=title,
                               path=path.join(docs_folder, title),
                               doc=document)
Exemplo n.º 3
0
def different_plots(x_values, y_values, baseline, legend):
    """Tests potentially problematic plot setups."""
    plot_multiple_runs(x_values, y_values, labels=legend)
    plot_multiple_runs(x_values,
                       y_values,
                       labels=legend,
                       smoothing_window_size=3)
    plot_multiple_runs_with_baselines(x_values,
                                      y_values,
                                      baseline,
                                      labels=legend)
    plot_multiple_runs_with_baselines(x_values,
                                      y_values,
                                      baseline,
                                      labels=legend,
                                      smoothing_window_size=3)
Exemplo n.º 4
0
    def _plot_agreement(prefix: str, expert_id: int, run_id: int,
                        testing_phase_ids: List[int],
                        agreements: List[List[int]], doc, docs_folder):
        """Plot cluster agreements for one expert in a layer"""

        title = prefix + f'- E{expert_id}-Run{run_id} - Clustering agreements'
        plot_multiple_runs(
            testing_phase_ids,
            agreements,
            title=title,
            ylabel='agreement',
            xlabel=f'testing phases',
            disable_ascii_labels=True,
            hide_legend=True,
            ylim=[ClusterAgreementMeasurement.NO_VALUE - 0.1, 1.1],
            path=path.join(docs_folder, title),
            doc=doc)
Exemplo n.º 5
0
    def _publish_results(self):
        """Plot and save the results."""

        doc = Document()
        date = get_stamp()

        labels = ExperimentTemplateBase.parameters_to_string(
            self._topology_parameters_list)

        title = 'Mutual Information labels vs ' + self._experiment_name
        self.plot_save(title, self._mutual_info, self._baseline_mutual_info,
                       'Norm. mutual information', labels, date,
                       self._docs_folder, doc)

        title = 'Weak classifier accuracy labels vs ' + self._experiment_name
        self.plot_save(title, self._classifier_accuracy,
                       self._baseline_classifier_accuracy,
                       'Classifier accuracy', labels, date, self._docs_folder,
                       doc)  #, smoothing_size=3)

        title = 'average delta'
        f = plot_multiple_runs(
            self._different_steps[0],  # here the X axes are identical
            self._average_delta,
            title=title,
            ylabel='log(delta)',
            xlabel='steps',
            labels=labels)
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        title = 'average boosting duration'
        f = plot_multiple_runs(self._different_steps[0],
                               self._average_boosting_dur,
                               title=title,
                               ylabel='duration',
                               xlabel='steps',
                               labels=labels)
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        doc.write_file(
            path.join(self._docs_folder,
                      to_safe_name(self._complete_name() + date + ".html")))

        print('done')
Exemplo n.º 6
0
    def get_data(self) -> plt:
        plt.close('all')

        data = self._matrix.cpu().numpy()
        n_steps = data.shape[1]

        x_vlaues = np.arange(0, n_steps)
        figure = plot_multiple_runs(x_vlaues, data)

        return figure
Exemplo n.º 7
0
    def publish_one_run(self, single_run_measurements: SingleRunMeasurements,
                        document: Document, docs_folder: str,
                        topology_parameters: List[str]):

        steps = single_run_measurements.get_items('current_step')
        accuracy_1 = single_run_measurements.get_items(ACCURACY_1)
        accuracy_per_flock_1 = single_run_measurements.get_items(
            ACCURACY_PER_FLOCK_1)
        accuracy_2 = single_run_measurements.get_items(ACCURACY_2)
        accuracy_per_flock_2 = single_run_measurements.get_items(
            ACCURACY_PER_FLOCK_2)

        labels = topology_parameters

        multiple_flocks_alpha = 0.05

        title = 'accuracy_per_flock_1'
        f = plot_multiple_runs([steps], [accuracy_per_flock_1],
                               title=title,
                               ylabel='accuracy_per_flock_1',
                               xlabel='steps',
                               labels=labels,
                               hide_legend=True,
                               other_params=[{
                                   'alpha': multiple_flocks_alpha
                               }])
        add_fig_to_doc(f, os.path.join(docs_folder, title), document)

        title = 'accuracy_per_flock_2'
        f = plot_multiple_runs([steps], [accuracy_per_flock_2],
                               title=title,
                               ylabel='accuracy_per_flock_2',
                               xlabel='steps',
                               labels=labels,
                               hide_legend=True,
                               other_params=[{
                                   'alpha': multiple_flocks_alpha
                               }])
        add_fig_to_doc(f, os.path.join(docs_folder, title), document)

        a1 = np.array(accuracy_1)
        a2 = np.array(accuracy_2)
 def plot_save(self, title, series, ylabel, labels, date, docs_folder, doc, smoothing_window_size=None):
     f = plot_multiple_runs(
         self._steps,
         series,
         title=title,
         xlabel='Simulation step',
         ylabel=ylabel,
         labels=labels,
         smoothing_window_size=smoothing_window_size)
     add_fig_to_doc(f, path.join(docs_folder, to_safe_name(title)), doc)
     plt.close(f)
Exemplo n.º 9
0
    def get_data(self) -> plt:
        plt.close('all')

        self._buffer = np.append(self._buffer,
                                 self._matrix.cpu().numpy(),
                                 axis=1)

        n_steps = self._buffer.shape[1]

        x_vlaues = np.arange(0, n_steps)
        figure = plot_multiple_runs(x_vlaues, self._buffer)

        return figure
Exemplo n.º 10
0
    def publish_results(self, labels, date, docs_folder, doc):

        prefix = 'L' + str(self._layer_id) + '--'

        title = prefix + self._available(
        ) + 'Mutual Information labels vs ' + self._experiment_name
        Task0OnlineLearningTemplate.plot_save(title, self._steps,
                                              self._mutual_info,
                                              self._baseline_mutual_info,
                                              'Norm. mutual information',
                                              labels, date, docs_folder, doc)

        title = prefix + self._available(
        ) + 'Weak classifier accuracy labels vs ' + self._experiment_name
        Task0OnlineLearningTemplate.plot_save(
            title, self._steps, self._classifier_accuracy,
            self._baseline_classifier_accuracy, 'Classifier accuracy', labels,
            date, docs_folder, doc)  # , smoothing_size=3)

        # TODO why this is different than the two above?
        title = prefix + 'average delta'
        f = plot_multiple_runs(self._different_steps,
                               self._average_delta,
                               title=title,
                               ylabel='log(delta)',
                               xlabel='steps',
                               labels=labels)
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        title = prefix + 'average boosting duration'
        f = plot_multiple_runs(self._different_steps,
                               self._average_boosting_dur,
                               title=title,
                               ylabel='duration',
                               xlabel='steps',
                               labels=labels)
        add_fig_to_doc(f, path.join(docs_folder, title), doc)
Exemplo n.º 11
0
def plot_testing_accuracy(steps: np.ndarray, d1: np.ndarray, d2: np.ndarray,
                          window_size: int, name: str, trim_size: int,
                          labels: List[str], document: Document,
                          docs_folder: str):
    a1 = compute_moving_average(d1, window_size)
    a2 = compute_moving_average(d2, window_size)
    title = f'{name}, window size: {window_size}'

    f = plot_multiple_runs(
        np.expand_dims(steps[trim_size:-trim_size - 1], axis=0),
        np.stack((np.expand_dims(a1[trim_size:-trim_size - 1], axis=0),
                  np.expand_dims(a2[trim_size:-trim_size - 1], axis=0)),
                 axis=-1),
        title=title,
        ylabel='accuracy',
        xlabel='steps',
        labels=labels,
        other_params=[{
            'color': None
        }])
    add_fig_to_doc(f, os.path.join(docs_folder, title), document)
    def _plot_agreement(self,
                        prefix: str,
                        expert_id: int,
                        run_id: int,
                        testing_phase_ids: List[int],
                        agreements: List[List[int]],
                        docs_folder,
                        doc):
        """Plot cluster agreements for one expert in a layer"""
        run_params = ExperimentTemplateBase.parameters_to_string([self._topology_parameters_list[run_id]])

        title = prefix + f'- E{expert_id}-Run{run_id} - Clustering agreements'
        f = plot_multiple_runs(
            testing_phase_ids,
            agreements,
            title=title,
            ylabel='agreement',
            # xlabel=f'params: {run_params}',
            xlabel=f'testing phases',
            disable_ascii_labels=True,
            hide_legend=True,
            ylim=[ClusterAgreementMeasurement.NO_VALUE - 0.1, 1.1]
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)
Exemplo n.º 13
0
    def _publish_results(self):
        """Plot and save the results."""

        doc = Document()
        date = get_stamp()

        labels = ExperimentTemplateBase.parameters_to_string(
            self._topology_parameters_list)

        for manager in self._layer_measurement_managers:
            manager.publish_results(labels=labels,
                                    date=date,
                                    docs_folder=self._docs_folder,
                                    doc=doc)

        # plot the running MSE
        title = 'Mean Square Error of TA classification'
        f = plot_multiple_runs_with_baselines(self._steps,
                                              self._predicted_labels_mse,
                                              self._baseline_labels_mse,
                                              title=title,
                                              ylabel='mse',
                                              xlabel='steps',
                                              labels=labels)
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        title = 'TA classification accuracy'
        f = plot_multiple_runs_with_baselines(
            self._steps,
            self._classification_accuracy,
            self._random_classification_accuracy,
            title=title,
            ylabel='accuracy (1 ~ 100%)',
            xlabel='steps',
            labels=labels)
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        title = 'TA classification accuracy - (SE-metric)'
        f = plot_multiple_runs_with_baselines(
            self._steps,
            self._se_classification_accuracy,
            self._se_random_classification_accuracy,
            title=title,
            ylabel='accuracy (1 ~ 100%)',
            xlabel='steps',
            labels=labels)
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        title = 'is_learning'
        f = plot_multiple_runs(
            self._steps,
            self._is_learning_info,
            title=title,
            ylabel='is learning',
            xlabel='steps',
        )
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        doc.write_file(
            path.join(self._docs_folder,
                      to_safe_name(self._complete_name() + date + ".html")))

        print('done')
    def _publish_results(self):
        doc = Document()
        doc.add(self._get_heading('_not set'))

        params_str = pprint.pformat(
            Task0TrainTestTemplateRelearning.pprint_parameters_to_string(self._topology_parameters_list), 1, 160
        )

        doc.add(f"<p><b>Parameters:</b><br /><pre>learning rate: {self._learning_rate},\n" + params_str + "</pre></p>")

        labels = ExperimentTemplateBase.parameters_to_string(self._topology_parameters_list)

        testing_phases = list(range(0, len(self._clustering_agreement_list[0][0])))

        for i, run_label in enumerate(self._run_labels):
            title = 'Clustering agreement ' + run_label
            f = plot_multiple_runs(
                testing_phases,
                self._clustering_agreement_list[i],
                title=title,
                ylabel='agreement',
                xlabel='testing training_phases',
                disable_ascii_labels=True,
                hide_legend=True,
                ylim=[ClusterAgreementMeasurement.NO_VALUE - 0.1, 1.1]
            )
            add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        title = 'Model classification accuracy (step-wise)'
        f = plot_multiple_runs_with_baselines(
            testing_phases,
            self._model_accuracy,
            self._baseline_accuracy,
            title=title,
            ylabel='accuracy (1 ~ 100%)',
            xlabel='testing phase',
            ylim=[-0.1, 1.1],
            labels=self._run_labels
        )
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)
        title = 'Model SE classification accuracy (step-wise)'
        f = plot_multiple_runs_with_baselines(
            testing_phases,
            self._model_se_accuracy,
            self._baseline_se_accuracy,
            title=title,
            ylabel='accuracy (1 ~ 100%)',
            xlabel='testing_phase',
            ylim=[-0.1, 1.1],
            labels=self._run_labels
        )
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        title = 'Weak classifier accuracy trained on SP outputs to labels'
        f = plot_multiple_runs_with_baselines(
            testing_phases,
            self._weak_class_accuracy,
            self._base_weak_class_accuracy,
            title=title,
            ylabel='Accuracy (1 ~ 100%)',
            xlabel='steps',
            labels=labels,
            ylim=[-0.1, 1.1],
            hide_legend=True
        )
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        title = 'Average Deltas Train'
        f = plot_multiple_runs(
            [self._sp_evaluation_period * i for i in range(0, len(self._average_delta_train[0]))],
            self._average_delta_train,
            title=title,
            ylabel='average_deltas',
            xlabel='steps',
            labels=self._run_labels,
            disable_ascii_labels=True,
            use_scatter=False
        )
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        title = 'Average Deltas Test'
        f = plot_multiple_runs(
            [self._sp_evaluation_period * i for i in range(0, len(self._average_delta_test[0]))],
            self._average_delta_test,
            title=title,
            ylabel='average_deltas',
            xlabel='steps',
            labels=self._run_labels,
            disable_ascii_labels=True,
            use_scatter=False
        )
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        title = 'Average boosting duration'
        f = plot_multiple_runs(
            [self._sp_evaluation_period * i for i in range(0, len(self._average_boosting_dur[0]))],
            self._average_boosting_dur,
            title=title,
            ylabel='duration',
            xlabel='steps',
            labels=labels,
            hide_legend=True
        )
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        doc.write_file(path.join(self._docs_folder, "main.html"))
        print('done')
Exemplo n.º 15
0
    def publish_results_for_layer(document: Document, docs_folder: str,
                                  measurement_manager: MeasurementManager,
                                  topology_parameters: List[str],
                                  layer_id: int, num_layers: int,
                                  show_conv_agreements: bool,
                                  is_train_test_classifier_computed: bool):
        """Publish results for each layer separately

        This uses the data measured and computed for each run by the Task0TaAnalysisLayerComponent and aggregated
        and stored in the measurement_manager.
        """

        logger.info(f'publishing results for layer {layer_id}...')

        num_boosted_clusters = measurement_manager.get_values_from_all_runs(
            f'num_boosted_getter_{layer_id}')
        average_boosting_dur = measurement_manager.get_values_from_all_runs(
            f'average_boosting_dur_{layer_id}')
        average_deltas = measurement_manager.get_values_from_all_runs(
            f'average_delta_{layer_id}')

        base_weak_class_accuracy = measurement_manager.get_custom_data_from_all_runs(
            f'base_weak_class_accuracy_{layer_id}')
        clustering_agreements = measurement_manager.get_custom_data_from_all_runs(
            f'clustering_agreements_{layer_id}')

        average_steps_deltas = measurement_manager.get_items_from_all_runs(
            f'average_delta_{layer_id}')
        sp_evaluation_steps = [key for key, value in average_steps_deltas[0]]

        labels = topology_parameters

        document.add(f"<br><br><br><b>Results for layer {layer_id}</b><br>")
        prefix = 'L' + str(layer_id) + '--'

        testing_phase_ids = list(range(0, len(
            clustering_agreements[0][0])))  # x-axis values

        if is_train_test_classifier_computed:
            weak_class_accuracy_train = measurement_manager.get_custom_data_from_all_runs(
                f'weak_class_accuracy_train_{layer_id}')
            weak_class_accuracy_test = measurement_manager.get_custom_data_from_all_runs(
                f'weak_class_accuracy_test_{layer_id}')

            title = prefix + ' Weak classifier accuracy (trained on train, tested on train data)'
            plot_multiple_runs_with_baselines(testing_phase_ids,
                                              weak_class_accuracy_train,
                                              base_weak_class_accuracy,
                                              title=title,
                                              ylabel='Accuracy (1 ~ 100%)',
                                              xlabel='steps',
                                              labels=labels,
                                              ylim=[-0.1, 1.1],
                                              hide_legend=True,
                                              path=path.join(
                                                  docs_folder, title),
                                              doc=document)

            title = prefix + ' Weak classifier accuracy (trained on train, tested on test data)'
            plot_multiple_runs_with_baselines(testing_phase_ids,
                                              weak_class_accuracy_test,
                                              base_weak_class_accuracy,
                                              title=title,
                                              ylabel='Accuracy (1 ~ 100%)',
                                              xlabel='steps',
                                              labels=labels,
                                              ylim=[-0.1, 1.1],
                                              hide_legend=True,
                                              path=path.join(
                                                  docs_folder, title),
                                              doc=document)

        weak_class_accuracy = measurement_manager.get_custom_data_from_all_runs(
            f'weak_class_accuracy_{layer_id}')
        title = prefix + ' Weak classifier accuracy (trained on test, tested on test)'
        plot_multiple_runs_with_baselines(testing_phase_ids,
                                          weak_class_accuracy,
                                          base_weak_class_accuracy,
                                          title=title,
                                          ylabel='Accuracy (1 ~ 100%)',
                                          xlabel='steps',
                                          labels=labels,
                                          ylim=[-0.1, 1.1],
                                          hide_legend=True,
                                          path=path.join(docs_folder, title),
                                          doc=document)

        title = prefix + '- Average boosting duration'
        plot_multiple_runs(sp_evaluation_steps,
                           average_boosting_dur,
                           title=title,
                           ylabel='duration',
                           xlabel='steps',
                           labels=labels,
                           hide_legend=True,
                           path=path.join(docs_folder, title),
                           doc=document)

        title = prefix + '- Num boosted clusters'
        plot_multiple_runs(sp_evaluation_steps,
                           num_boosted_clusters,
                           title=title,
                           ylabel='Num boosted clusters / total clusters',
                           xlabel='steps',
                           labels=labels,
                           ylim=[-0.1, 1.1],
                           hide_legend=True,
                           path=path.join(docs_folder, title),
                           doc=document)

        title = prefix + '- Average_deltas'
        plot_multiple_runs(
            sp_evaluation_steps,
            average_deltas,
            title=title,
            ylabel='average_deltas',
            xlabel='steps',
            labels=labels,
            disable_ascii_labels=True,
            hide_legend=True,
            # use_scatter=True,
            path=path.join(docs_folder, title),
            doc=document)

        # if this is not the top layer, show conv agreements only if required
        if show_conv_agreements or layer_id == (num_layers - 1):
            agreements = clustering_agreements

            for run_id, run_agreements in enumerate(agreements):
                for expert_id, expert_agreements in enumerate(run_agreements):
                    Task0TaAnalysisTemplate._plot_agreement(
                        prefix, expert_id, run_id, testing_phase_ids,
                        expert_agreements, document, docs_folder)

        logger.info('done')
    def publish_results(self, document: Document, docs_folder: str,
                        measurement_manager: MeasurementManager,
                        topology_parameters: List[str]):
        """An alternative to the _publish_results method, this is called from _publish_results now

        Draw and add your plots to the document here.
        """
        steps = measurement_manager.get_values_from_all_runs('current_step')
        plotted_training_phase_id = measurement_manager.get_values_from_all_runs(
            'training_phase_id')
        plotted_testing_phase_id = measurement_manager.get_values_from_all_runs(
            'testing_phase_id')

        labels = topology_parameters

        title = 'training_phase_id'
        f = plot_multiple_runs(steps,
                               plotted_training_phase_id,
                               title=title,
                               ylabel='training_phase_id',
                               xlabel='steps',
                               labels=labels)
        add_fig_to_doc(f, path.join(docs_folder, title), document)

        title = 'testing_phase_id'
        f = plot_multiple_runs(steps,
                               plotted_testing_phase_id,
                               title=title,
                               ylabel='testing_phase_id',
                               xlabel='steps',
                               labels=labels)
        add_fig_to_doc(f, path.join(docs_folder, title), document)

        max_x = max(steps[0])
        average_delta_train_0 = measurement_manager.get_items_from_all_runs(
            'average_delta0_train')
        test_sp_steps = [key for key, value in average_delta_train_0[0]]

        title = 'average_delta_train_layer0'
        f = plot_multiple_runs(test_sp_steps,
                               [[value for key, value in sequence]
                                for sequence in average_delta_train_0],
                               title=title,
                               ylabel='average_deltas',
                               xlabel='steps',
                               labels=labels,
                               xlim=[0, max_x],
                               disable_ascii_labels=True,
                               hide_legend=True
                               # use_scatter=True
                               )
        add_fig_to_doc(f, path.join(docs_folder, title), document)

        average_delta_train_1 = measurement_manager.get_items_from_all_runs(
            'average_delta1_train')
        title = 'average_delta_train_layer1'
        f = plot_multiple_runs(test_sp_steps,
                               [[value for key, value in sequence]
                                for sequence in average_delta_train_1],
                               title=title,
                               ylabel='average_deltas',
                               xlabel='steps',
                               labels=labels,
                               xlim=[0, max_x],
                               disable_ascii_labels=True,
                               hide_legend=True
                               # use_scatter=True
                               )
        add_fig_to_doc(f, path.join(docs_folder, title), document)

        predicted_labels_mse = measurement_manager.get_custom_data_from_all_runs(
            'predicted_labels_mse')
        testing_phases_x = list(range(0, len(predicted_labels_mse[0])))

        model_accuracy = measurement_manager.get_custom_data_from_all_runs(
            'model_accuracy')
        baseline_accuracy = measurement_manager.get_custom_data_from_all_runs(
            'baseline_accuracy')
        # plot the classification accuracy
        title = 'Label reconstruction accuracy (step-wise)'
        f = plot_multiple_runs_with_baselines(testing_phases_x,
                                              model_accuracy,
                                              baseline_accuracy,
                                              title=title,
                                              ylabel='accuracy (1 ~ 100%)',
                                              xlabel='testing phase ID',
                                              ylim=[-0.1, 1.1],
                                              labels=labels,
                                              hide_legend=True)
        add_fig_to_doc(f, path.join(docs_folder, title), document)

        model_se_accuracy = measurement_manager.get_custom_data_from_all_runs(
            'model_se_accuracy')
        baseline_se_accuracy = measurement_manager.get_custom_data_from_all_runs(
            'baseline_se_accuracy')

        # plot the classification accuracy
        title = 'Label reconstruction SE accuracy (step-wise)'
        f = plot_multiple_runs_with_baselines(testing_phases_x,
                                              model_se_accuracy,
                                              baseline_se_accuracy,
                                              title=title,
                                              ylabel='accuracy (1 ~ 100%)',
                                              xlabel='testing_phase ID',
                                              ylim=[-0.1, 1.1],
                                              labels=labels,
                                              hide_legend=True)
        add_fig_to_doc(f, path.join(docs_folder, title), document)

        baseline_labels_mse = measurement_manager.get_custom_data_from_all_runs(
            'baseline_labels_mse')

        # plot the MSE
        title = 'Mean Square Error of label reconstruction'
        f = plot_multiple_runs_with_baselines(
            testing_phases_x,
            predicted_labels_mse,
            baseline_labels_mse,
            title=title,
            ylabel='MSE',
            xlabel='testing phase ID',
            labels=labels,
            hide_legend=True,
            ylim=[-0.1, 0.2]  # just for better resolution
        )
        add_fig_to_doc(f, path.join(docs_folder, title), document)
    def _publish_results_to_doc(self, doc: Document, date: str, docs_folder: path):
        """Adds my results to the results produced by the base class"""
        super()._publish_results_to_doc(doc, date, docs_folder)

        doc.add(f"<br><br><br><b>Common results</b><br>")

        labels = ExperimentTemplateBase.extract_params_for_legend(self._topology_parameters_list)

        title = 'training_phase_id'
        f = plot_multiple_runs(
            self._steps,
            self._plotted_training_phase_id,
            title=title,
            ylabel='training_phase_id',
            xlabel='steps',
            labels=labels
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        title = 'testing_phase_id'
        f = plot_multiple_runs(
            self._steps,
            self._plotted_testing_phase_id,
            title=title,
            ylabel='testing_phase_id',
            xlabel='steps',
            labels=labels
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        if self._plot_is_learning:
            # plot the classification accuracy
            title = 'Is Learning'
            f = plot_multiple_runs(
                self._steps,
                self._is_learning_info,
                title=title,
                ylabel='learning=True?',
                xlabel='steps',
                ylim=[-0.1, 1.1],
                labels=labels
            )
            add_fig_to_doc(f, path.join(docs_folder, title), doc)

        testing_phase_ids = list(range(0, len(self._mutual_info[0])))  # x-axis values

        title = 'Top-layer L' + str(self._top_layer_id()) + ' Mutual information of SP output with labels'
        f = plot_multiple_runs_with_baselines(
            testing_phase_ids,
            self._mutual_info,
            self._base_mutual_info,
            title=title,
            ylabel='Normalized mutual information',
            xlabel='steps',
            labels=labels,
            ylim=[-0.1, 1.1],
            hide_legend=True
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        # plot the classification accuracy
        title = 'Label reconstruction accuracy (step-wise)'
        f = plot_multiple_runs_with_baselines(
            testing_phase_ids,
            self._model_accuracy,
            self._baseline_accuracy,
            title=title,
            ylabel='accuracy (1 ~ 100%)',
            xlabel='testing phase ID',
            ylim=[-0.1, 1.1],
            labels=labels,
            hide_legend=True
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        # plot the classification accuracy
        title = 'Label reconstruction SE accuracy'
        f = plot_multiple_runs_with_baselines(
            testing_phase_ids,
            self._model_se_accuracy,
            self._baseline_se_accuracy,
            title=title,
            ylabel='accuracy (1 ~ 100%)',
            xlabel='testing_phase ID',
            ylim=[-0.1, 1.1],
            labels=labels,
            hide_legend=True
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        # plot the MSE
        title = 'Mean Square Error of label reconstruction'
        f = plot_multiple_runs_with_baselines(
            testing_phase_ids,
            self._predicted_labels_mse,
            self._baseline_labels_mse,
            title=title,
            ylabel='MSE',
            xlabel='testing phase ID',
            labels=labels,
            hide_legend=True,
            ylim=[-0.1, 0.2]  # just for better resolution
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        for manager in reversed(self._layer_measurement_managers):
            manager.publish_results(labels=title, date=date, docs_folder=docs_folder, doc=doc)
Exemplo n.º 18
0
    def publish_results(self, document: Document, docs_folder: str,
                        measurement_manager: MeasurementManager,
                        topology_parameters: List[str]):
        """An alternative to the _publish_results method, this is called from _publish_results now

        Draw and add your plots to the document here.
        """

        steps = measurement_manager.get_values_from_all_runs('current_step')
        accuracy_1 = measurement_manager.get_values_from_all_runs(ACCURACY_1)
        accuracy_per_flock_1 = measurement_manager.get_values_from_all_runs(
            ACCURACY_PER_FLOCK_1)
        accuracy_2 = measurement_manager.get_values_from_all_runs(ACCURACY_2)
        accuracy_per_flock_2 = measurement_manager.get_values_from_all_runs(
            ACCURACY_PER_FLOCK_2)

        multiple_flocks_alpha = 0.05
        # measurement_manager.single_run_measurements[0].get_custom_data('custom')
        labels = topology_parameters

        a1 = np.array(accuracy_1)
        a2 = np.array(accuracy_2)

        title = 'accuracy_1_2'
        f = plot_multiple_runs(steps,
                               np.stack((a1, a2), axis=-1),
                               title=title,
                               ylabel='accuracy_1_2',
                               xlabel='steps',
                               labels=labels,
                               other_params=[{
                                   'color': None
                               }])
        add_fig_to_doc(f, os.path.join(docs_folder, title), document)

        experiment_params = measurement_manager.single_run_measurements[
            0].model_parameters['params']['experiment_params']
        experiment_params.component.publish_results(document, docs_folder,
                                                    measurement_manager,
                                                    topology_parameters)

        title = 'accuracy_per_flock_1'
        f = plot_multiple_runs(steps,
                               accuracy_per_flock_1,
                               title=title,
                               ylabel='accuracy_per_flock_1',
                               xlabel='steps',
                               labels=labels,
                               hide_legend=True,
                               other_params=[{
                                   'alpha': multiple_flocks_alpha
                               }])
        add_fig_to_doc(f, os.path.join(docs_folder, title), document)

        # title = 'accuracy_2'
        # f = plot_multiple_runs(
        #     steps,
        #     accuracy_2,
        #     title=title,
        #     ylabel='accuracy_2',
        #     xlabel='steps',
        #     labels=labels
        # )
        # add_fig_to_doc(f, os.path.join(docs_folder, title), document)

        title = 'accuracy_per_flock_2'
        f = plot_multiple_runs(steps,
                               accuracy_per_flock_2,
                               title=title,
                               ylabel='accuracy_per_flock_2',
                               xlabel='steps',
                               labels=labels,
                               hide_legend=True,
                               other_params=[{
                                   'alpha': multiple_flocks_alpha
                               }])
        add_fig_to_doc(f, os.path.join(docs_folder, title), document)

        # Add table with MSE single-step values
        # prediction_mse_values = [f'{v:.5f}' for v in prediction_mse[0]]
        # document.add_table(['step', 'prediction_mse'], list(zip(steps[0], prediction_mse_values)),
        #                    attribs={'style': 'font-size:0.8em;'})

        # for single_run_measurements in measurement_manager.single_run_measurements:
        #     self.publish_one_run(single_run_measurements, document, docs_folder, topology_parameters)

        doc_path = os.path.join(docs_folder,
                                to_safe_name(self.experiment_name + ".html"))
        if doc_path.startswith("\\\\?\\"):
            doc_path = doc_path[len("\\\\?\\"):]

        # Note not logging to UI now
        logger.info(
            f'Results published <a href="file:///{doc_path}">{doc_path}</a>')
        logger.info(f'Results published {doc_path}')
Exemplo n.º 19
0
    def _publish_results(self):
        """Plot and optionally save the results."""

        mse = np.array(self._mse)
        mse_testing = np.array(self._mse_testing)

        memory_used = torch.tensor(self._memory_used, dtype=torch.float32)
        window_size = 201
        memory_used = (memory_used.view(-1, 1).expand(mse_testing.shape) /
                       (1024**2))
        error_memory_ratio = torch.tensor(mse_testing,
                                          dtype=torch.float32) * memory_used
        accuracy_memory_ratio = []
        for run_acc, run_mem in zip(self._weak_classifier_results,
                                    memory_used):
            accuracy_memory_ratio_run = []
            for acc, mem in zip(run_acc, run_mem):
                accuracy_memory_ratio_run.append((1 - acc) * mem)

            accuracy_memory_ratio.append(accuracy_memory_ratio_run)

        accuracy_memory_ratio = torch.tensor(accuracy_memory_ratio)

        doc = Document()

        figs = []
        xlabel = "steps"
        ylabel = "mean reconstruction error"
        title = "Influence of hyperparameters on reconstruction error (training)"
        figsize = (18, 12)
        date = get_stamp()

        fig = plot_multiple_runs(
            x_values=np.arange(len(mse[0])),
            y_values=mse,
            labels=ExperimentTemplateBase.parameters_to_string(
                self._topology_parameters_list),
            smoothing_window_size=window_size,
            xlabel=xlabel,
            ylabel=ylabel,
            title=title,
            figsize=figsize,
            hide_legend=True)
        add_fig_to_doc(fig, path.join(self._docs_folder, to_safe_name(title)),
                       doc)

        fig = plot_multiple_runs(
            x_values=np.arange(len(mse[0])),
            y_values=mse,
            labels=ExperimentTemplateBase.parameters_to_string(
                self._topology_parameters_list),
            xlabel=xlabel,
            ylabel=ylabel,
            title=title,
            figsize=figsize,
            hide_legend=True)
        add_fig_to_doc(
            fig, path.join(self._docs_folder,
                           to_safe_name(title) + "_smooth"), doc)

        title = "Influence of hyperparameters on reconstruction error (testing)"
        fig = plot_multiple_runs(
            x_values=np.array(self._training_steps_before_testing[0]),
            y_values=mse_testing,
            labels=ExperimentTemplateBase.parameters_to_string(
                self._topology_parameters_list),
            xlabel=xlabel,
            ylabel=ylabel,
            title=title,
            figsize=figsize,
        )
        add_fig_to_doc(fig, path.join(self._docs_folder, to_safe_name(title)),
                       doc)

        title = "Weak classifier accuracy"
        fig = plot_multiple_runs(
            x_values=np.array(self._training_steps_before_testing[0]),
            y_values=np.array(self._weak_classifier_results),
            labels=ExperimentTemplateBase.parameters_to_string(
                self._topology_parameters_list),
            xlabel="steps",
            ylabel="accuracy",
            title=title,
            figsize=figsize,
            hide_legend=True)
        add_fig_to_doc(fig, path.join(self._docs_folder, to_safe_name(title)),
                       doc)

        title = "Memory * reconstruction tradeoff (testing)"
        fig = plot_multiple_runs(
            x_values=np.array(self._training_steps_before_testing[0]),
            y_values=error_memory_ratio.numpy(),
            labels=ExperimentTemplateBase.parameters_to_string(
                self._topology_parameters_list),
            xlabel=xlabel,
            ylabel=
            "Mean Reconstruction Error times required meogabyte of memory",
            title=title,
            figsize=figsize,
            hide_legend=True)
        add_fig_to_doc(fig, path.join(self._docs_folder, to_safe_name(title)),
                       doc)

        title = "Memory * error (= 1 - acc) tradeoff"
        fig = plot_multiple_runs(
            x_values=np.array(self._training_steps_before_testing[0]),
            y_values=accuracy_memory_ratio.numpy(),
            labels=ExperimentTemplateBase.parameters_to_string(
                self._topology_parameters_list),
            xlabel=xlabel,
            ylabel=
            "Mean Reconstruction Error times required meogabyte of memory",
            title=title,
            figsize=figsize,
            hide_legend=True)
        add_fig_to_doc(fig, path.join(self._docs_folder, to_safe_name(title)),
                       doc)

        title = "Entropy and mean reconstruction error"
        fig = plot_multiple_runs(
            x_values=np.array(self._code_entropy),
            y_values=mse_testing,
            labels=ExperimentTemplateBase.parameters_to_string(
                self._topology_parameters_list),
            xlabel="entropy",
            ylabel="mean reconstruction error",
            title=title,
            figsize=figsize,
            hide_legend=True)
        add_fig_to_doc(fig, path.join(self._docs_folder, to_safe_name(title)),
                       doc)

        title = "Mean reconstruction error and classifier accuracy"
        fig = plot_multiple_runs(
            x_values=mse_testing,
            y_values=np.array(self._weak_classifier_results),
            labels=ExperimentTemplateBase.parameters_to_string(
                self._topology_parameters_list),
            xlabel="mean reconstruction error",
            ylabel="accuracy",
            title=title,
            figsize=figsize,
            hide_legend=True)
        add_fig_to_doc(fig, path.join(self._docs_folder, to_safe_name(title)),
                       doc)

        title = "Entropy and classifier accuracy"
        fig = plot_multiple_runs(
            x_values=np.array(self._code_entropy),
            y_values=np.array(self._weak_classifier_results),
            labels=ExperimentTemplateBase.parameters_to_string(
                self._topology_parameters_list),
            xlabel="entropy",
            ylabel="accuracy",
            title=title,
            figsize=figsize,
            hide_legend=True)
        add_fig_to_doc(fig, path.join(self._docs_folder, to_safe_name(title)),
                       doc)

        doc.write_file(
            path.join(self._docs_folder,
                      f"{self._topology_class.__name__}_" + date + ".html"))

        print(self._memory_used)
Exemplo n.º 20
0
    def publish_results(self, document: Document, docs_folder: str,
                        measurement_manager: MeasurementManager,
                        topology_parameters: List[str]):
        """An alternative to the _publish_results method, this is called from _publish_results now

        Draw and add your plots to the document here.
        """
        steps = measurement_manager.get_values_from_all_runs('current_step')
        plotted_training_phase_id = measurement_manager.get_values_from_all_runs(
            'training_phase_id')
        plotted_testing_phase_id = measurement_manager.get_values_from_all_runs(
            'testing_phase_id')
        plotted_is_learning = measurement_manager.get_values_from_all_runs(
            'is_learning')

        labels = topology_parameters
        document.add(
            f"<br><br><br><b>Common results from the TopExpert</b><br>")

        title = 'training_phase_id'
        plot_multiple_runs(steps,
                           plotted_training_phase_id,
                           title=title,
                           ylabel='training_phase_id',
                           xlabel='steps',
                           labels=labels,
                           path=path.join(docs_folder, title),
                           doc=document)

        title = 'testing_phase_id'
        plot_multiple_runs(steps,
                           plotted_testing_phase_id,
                           title=title,
                           ylabel='testing_phase_id',
                           xlabel='steps',
                           labels=labels,
                           path=path.join(docs_folder, title),
                           doc=document)

        title = 'is_learning'
        plot_multiple_runs(steps,
                           plotted_is_learning,
                           title=title,
                           ylabel='is_learning',
                           xlabel='steps',
                           labels=labels,
                           path=path.join(docs_folder, title),
                           doc=document)

        predicted_labels_mse = measurement_manager.get_custom_data_from_all_runs(
            'predicted_labels_mse')
        testing_phases_x = list(range(0, len(predicted_labels_mse[0])))

        model_accuracy = measurement_manager.get_custom_data_from_all_runs(
            'model_accuracy')
        baseline_accuracy = measurement_manager.get_custom_data_from_all_runs(
            'baseline_accuracy')
        # plot the classification accuracy
        title = 'Label reconstruction accuracy (step-wise)'
        plot_multiple_runs_with_baselines(testing_phases_x,
                                          model_accuracy,
                                          baseline_accuracy,
                                          title=title,
                                          ylabel='accuracy (1 ~ 100%)',
                                          xlabel='testing phase ID',
                                          ylim=[-0.1, 1.1],
                                          labels=labels,
                                          hide_legend=True,
                                          path=path.join(docs_folder, title),
                                          doc=document)

        model_se_accuracy = measurement_manager.get_custom_data_from_all_runs(
            'model_se_accuracy')
        baseline_se_accuracy = measurement_manager.get_custom_data_from_all_runs(
            'baseline_se_accuracy')

        # plot the classification accuracy
        title = 'Label reconstruction SE accuracy (object-wise)'
        plot_multiple_runs_with_baselines(testing_phases_x,
                                          model_se_accuracy,
                                          baseline_se_accuracy,
                                          title=title,
                                          ylabel='accuracy (1 ~ 100%)',
                                          xlabel='testing_phase ID',
                                          ylim=[-0.1, 1.1],
                                          labels=labels,
                                          hide_legend=True,
                                          path=path.join(docs_folder, title),
                                          doc=document)

        baseline_labels_mse = measurement_manager.get_custom_data_from_all_runs(
            'baseline_labels_mse')

        # plot the MSE
        title = 'Mean Square Error of label reconstruction'
        plot_multiple_runs_with_baselines(
            testing_phases_x,
            predicted_labels_mse,
            baseline_labels_mse,
            title=title,
            ylabel='MSE',
            xlabel='testing phase ID',
            labels=labels,
            hide_legend=True,
            ylim=[-0.1, 0.2],  # just for better resolution
            path=path.join(docs_folder, title),
            doc=document)

        for layer_id in reversed(range(self._experiment_params.num_layers)):
            self.publish_results_for_layer(
                document, docs_folder, measurement_manager,
                topology_parameters, layer_id,
                self._experiment_params.num_layers,
                self._experiment_params.show_conv_agreements,
                self._experiment_params.is_train_test_classifier_computed)
Exemplo n.º 21
0
    def _publish_results(self):

        doc = Document()

        xlabel = "steps"
        ylabel = "number of SP forward executions"
        title_fe_dt = "smoothed derivation of SP forward executions and TP forward executions (SP O QD)"
        figsize = (18, 12)
        date = get_stamp()

        layer_names = ['L0', 'L1', 'L2']
        nr_layers = len(layer_names)
        labels = list(map(lambda x: x + " forward execution", layer_names)) + \
                 list(map(lambda x: x + " qualitative difference", layer_names))

        colors = ['b', 'orange', 'g', 'r', 'p']
        color_params = [{'c': color} for color in colors[:nr_layers]]
        color_ls_params = [{
            'c': color,
            'ls': '--'
        } for color in colors[:nr_layers]]
        other_params = color_params + color_ls_params

        params_description = ExperimentTemplateBase.parameters_to_string(
            self._topology_parameters_list)
        for run in range(len(self.sp_executions)):
            sp_execution_dt = list(
                map(compute_derivations, self.sp_executions[run]))
            sp_output_dt = self.sp_output_stability[run]

            title = title_fe_dt + f" run {run} "
            fig = plot_multiple_runs(x_values=self.training_steps,
                                     y_values=sp_execution_dt + sp_output_dt,
                                     ylim=[0, 1],
                                     labels=labels,
                                     smoothing_window_size=501,
                                     xlabel=xlabel,
                                     ylabel=ylabel,
                                     title=title + params_description[run],
                                     figsize=figsize,
                                     hide_legend=False,
                                     other_params=other_params)
            add_fig_to_doc(fig,
                           path.join(self._docs_folder, to_safe_name(title)),
                           doc)

        title = "classification accuracy from reconstructed labels"
        fig = plot_multiple_runs(x_values=list(range(
            self._num_testing_phases)),
                                 y_values=self.classification_accuracy,
                                 ylim=[0, 1],
                                 labels=params_description,
                                 smoothing_window_size=None,
                                 xlabel="accuracy",
                                 ylabel="phases",
                                 title=title,
                                 figsize=figsize,
                                 hide_legend=False)
        add_fig_to_doc(fig, path.join(self._docs_folder, to_safe_name(title)),
                       doc)

        title = "SE classification accuracy from reconstructed labels"
        fig = plot_multiple_runs(x_values=list(range(
            self._num_testing_phases)),
                                 y_values=self.classification_accuracy_se,
                                 ylim=[0, 1],
                                 labels=params_description,
                                 smoothing_window_size=None,
                                 xlabel="SE accuracy",
                                 ylabel="phases",
                                 title=title,
                                 figsize=figsize,
                                 hide_legend=False)
        add_fig_to_doc(fig, path.join(self._docs_folder, to_safe_name(title)),
                       doc)

        doc.write_file(path.join(self._docs_folder, f"main.html"))
Exemplo n.º 22
0
    def _publish_results_to_doc(self, doc: Document, date: str, docs_folder: os.path):
        """An alternative to the _publish_results method, this is called from _publish_results now

        Draw and add your topologies to the document here.
        """

        labels = ExperimentTemplateBase.extract_params_for_legend(self._topology_parameters_list)

        title = 'training_phase_id'
        f = plot_multiple_runs(
            self._steps,
            self._plotted_training_phase_id,
            title=title,
            ylabel='training_phase_id',
            xlabel='steps',
            labels=labels
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        title = 'testing_phase_id'
        f = plot_multiple_runs(
            self._steps,
            self._plotted_testing_phase_id,
            title=title,
            ylabel='testing_phase_id',
            xlabel='steps',
            labels=labels
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        # plot the classification accuracy
        title = 'Is Learning'
        f = plot_multiple_runs(
            self._steps,
            self._is_learning_info,
            title=title,
            ylabel='learning=True?',
            xlabel='steps',
            ylim=[-0.1, 1.1],
            labels=labels
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        max_x = max(self._steps[0])

        title = 'average_delta_train_layer0'
        f = plot_multiple_runs(
            self._test_sp_steps[0],
            self._average_delta_train_0,
            title=title,
            ylabel='average_deltas',
            xlabel='steps',
            labels=labels,
            xlim=[0, max_x],
            disable_ascii_labels=True,
            hide_legend=True
            # use_scatter=True
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        title = 'average_delta_train_layer1'
        f = plot_multiple_runs(
            self._test_sp_steps[0],
            self._average_delta_train_1,
            title=title,
            ylabel='average_deltas',
            xlabel='steps',
            labels=labels,
            xlim=[0, max_x],
            disable_ascii_labels=True,
            hide_legend=True
            # use_scatter=True
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        testing_phases_x = list(range(0, len(self._predicted_labels_mse[0])))

        # plot the classification accuracy
        title = 'Label reconstruction accuracy (step-wise)'
        f = plot_multiple_runs_with_baselines(
            testing_phases_x,
            self._model_accuracy,
            self._baseline_accuracy,
            title=title,
            ylabel='accuracy (1 ~ 100%)',
            xlabel='testing phase ID',
            ylim=[-0.1, 1.1],
            labels=labels,
            hide_legend=True
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        # plot the classification accuracy
        title = 'Label reconstruction SE accuracy (step-wise)'
        f = plot_multiple_runs_with_baselines(
            testing_phases_x,
            self._model_se_accuracy,
            self._baseline_se_accuracy,
            title=title,
            ylabel='accuracy (1 ~ 100%)',
            xlabel='testing_phase ID',
            ylim=[-0.1, 1.1],
            labels=labels,
            hide_legend=True
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        # plot the MSE
        title = 'Mean Square Error of label reconstruction'
        f = plot_multiple_runs_with_baselines(
            testing_phases_x,
            self._predicted_labels_mse,
            self._baseline_labels_mse,
            title=title,
            ylabel='MSE',
            xlabel='testing phase ID',
            labels=labels,
            hide_legend=True,
            ylim=[-0.1, 0.2]  # just for better resolution
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)
    def publish_results(self, labels, date, docs_folder, doc):

        doc.add(f"<br><br><br><b>Results for layer {self._layer_id}</b><br>")

        prefix = 'L' + str(self._layer_id) + '--'

        testing_phase_ids = list(range(0, len(self._clustering_agreements[0][0])))  # x-axis values

        title = prefix + ' Weak classifier accuracy trained on SP outputs to labels'
        f = plot_multiple_runs_with_baselines(
            testing_phase_ids,
            self._weak_class_accuracy,
            self._base_weak_class_accuracy,
            title=title,
            ylabel='Accuracy (1 ~ 100%)',
            xlabel='steps',
            labels=labels,
            ylim=[-0.1, 1.1],
            hide_legend=True
        )

        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        title = prefix + '- Average boosting duration'
        f = plot_multiple_runs(
            self._sp_evaluation_steps[0],
            self._average_boosting_dur,
            title=title,
            ylabel='duration',
            xlabel='steps',
            labels=labels,
            hide_legend=True
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        title = prefix + '- Num boosted clusters'
        f = plot_multiple_runs(
            self._sp_evaluation_steps[0],
            self._num_boosted_clusters,
            title=title,
            ylabel='Num boosted clusters / total clusters',
            xlabel='steps',
            labels=labels,
            ylim=[-0.1, 1.1],
            hide_legend=True
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        title = prefix + '- Average_deltas'
        f = plot_multiple_runs(
            self._sp_evaluation_steps[0],
            self._average_deltas,
            title=title,
            ylabel='average_deltas',
            xlabel='steps',
            labels=labels,
            disable_ascii_labels=True,
            hide_legend=True
            # use_scatter=True
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        # if this is not the top layer, show conv agreements only if required
        if self._show_conv_agreements or self._layer_id == (self._num_layers - 1):
            agreements = self._clustering_agreements

            for run_id, run_agreements in enumerate(agreements):
                for expert_id, expert_agreements in enumerate(run_agreements):
                    self._plot_agreement(
                        prefix,
                        expert_id,
                        run_id,
                        testing_phase_ids,
                        expert_agreements,
                        docs_folder,
                        doc
                    )