Пример #1
0
def different_plots(x_values, y_values, baseline, legend):
    """Tests potentially problematic plot setups."""
    plot_multiple_runs(x_values, y_values, labels=legend)
    plot_multiple_runs(x_values,
                       y_values,
                       labels=legend,
                       smoothing_window_size=3)
    plot_multiple_runs_with_baselines(x_values,
                                      y_values,
                                      baseline,
                                      labels=legend)
    plot_multiple_runs_with_baselines(x_values,
                                      y_values,
                                      baseline,
                                      labels=legend,
                                      smoothing_window_size=3)
Пример #2
0
    def plot_save(self,
                  title,
                  series,
                  series_baselines,
                  ylabel,
                  labels,
                  date,
                  docs_folder,
                  doc,
                  smoothing_size: int = None):
        """Baselines are in dotted grey, the model outputs are colored."""

        f = plot_multiple_runs_with_baselines(
            self._steps[0],
            series,
            series_baselines,
            title=title,
            xlabel='Simulation step',
            ylim=[0, 1.1],
            ylabel=ylabel,
            smoothing_window_size=smoothing_size,
            labels=labels)

        add_fig_to_doc(f, path.join(docs_folder, title), doc)
Пример #3
0
    def publish_results_for_layer(document: Document, docs_folder: str,
                                  measurement_manager: MeasurementManager,
                                  topology_parameters: List[str],
                                  layer_id: int, num_layers: int,
                                  show_conv_agreements: bool,
                                  is_train_test_classifier_computed: bool):
        """Publish results for each layer separately

        This uses the data measured and computed for each run by the Task0TaAnalysisLayerComponent and aggregated
        and stored in the measurement_manager.
        """

        logger.info(f'publishing results for layer {layer_id}...')

        num_boosted_clusters = measurement_manager.get_values_from_all_runs(
            f'num_boosted_getter_{layer_id}')
        average_boosting_dur = measurement_manager.get_values_from_all_runs(
            f'average_boosting_dur_{layer_id}')
        average_deltas = measurement_manager.get_values_from_all_runs(
            f'average_delta_{layer_id}')

        base_weak_class_accuracy = measurement_manager.get_custom_data_from_all_runs(
            f'base_weak_class_accuracy_{layer_id}')
        clustering_agreements = measurement_manager.get_custom_data_from_all_runs(
            f'clustering_agreements_{layer_id}')

        average_steps_deltas = measurement_manager.get_items_from_all_runs(
            f'average_delta_{layer_id}')
        sp_evaluation_steps = [key for key, value in average_steps_deltas[0]]

        labels = topology_parameters

        document.add(f"<br><br><br><b>Results for layer {layer_id}</b><br>")
        prefix = 'L' + str(layer_id) + '--'

        testing_phase_ids = list(range(0, len(
            clustering_agreements[0][0])))  # x-axis values

        if is_train_test_classifier_computed:
            weak_class_accuracy_train = measurement_manager.get_custom_data_from_all_runs(
                f'weak_class_accuracy_train_{layer_id}')
            weak_class_accuracy_test = measurement_manager.get_custom_data_from_all_runs(
                f'weak_class_accuracy_test_{layer_id}')

            title = prefix + ' Weak classifier accuracy (trained on train, tested on train data)'
            plot_multiple_runs_with_baselines(testing_phase_ids,
                                              weak_class_accuracy_train,
                                              base_weak_class_accuracy,
                                              title=title,
                                              ylabel='Accuracy (1 ~ 100%)',
                                              xlabel='steps',
                                              labels=labels,
                                              ylim=[-0.1, 1.1],
                                              hide_legend=True,
                                              path=path.join(
                                                  docs_folder, title),
                                              doc=document)

            title = prefix + ' Weak classifier accuracy (trained on train, tested on test data)'
            plot_multiple_runs_with_baselines(testing_phase_ids,
                                              weak_class_accuracy_test,
                                              base_weak_class_accuracy,
                                              title=title,
                                              ylabel='Accuracy (1 ~ 100%)',
                                              xlabel='steps',
                                              labels=labels,
                                              ylim=[-0.1, 1.1],
                                              hide_legend=True,
                                              path=path.join(
                                                  docs_folder, title),
                                              doc=document)

        weak_class_accuracy = measurement_manager.get_custom_data_from_all_runs(
            f'weak_class_accuracy_{layer_id}')
        title = prefix + ' Weak classifier accuracy (trained on test, tested on test)'
        plot_multiple_runs_with_baselines(testing_phase_ids,
                                          weak_class_accuracy,
                                          base_weak_class_accuracy,
                                          title=title,
                                          ylabel='Accuracy (1 ~ 100%)',
                                          xlabel='steps',
                                          labels=labels,
                                          ylim=[-0.1, 1.1],
                                          hide_legend=True,
                                          path=path.join(docs_folder, title),
                                          doc=document)

        title = prefix + '- Average boosting duration'
        plot_multiple_runs(sp_evaluation_steps,
                           average_boosting_dur,
                           title=title,
                           ylabel='duration',
                           xlabel='steps',
                           labels=labels,
                           hide_legend=True,
                           path=path.join(docs_folder, title),
                           doc=document)

        title = prefix + '- Num boosted clusters'
        plot_multiple_runs(sp_evaluation_steps,
                           num_boosted_clusters,
                           title=title,
                           ylabel='Num boosted clusters / total clusters',
                           xlabel='steps',
                           labels=labels,
                           ylim=[-0.1, 1.1],
                           hide_legend=True,
                           path=path.join(docs_folder, title),
                           doc=document)

        title = prefix + '- Average_deltas'
        plot_multiple_runs(
            sp_evaluation_steps,
            average_deltas,
            title=title,
            ylabel='average_deltas',
            xlabel='steps',
            labels=labels,
            disable_ascii_labels=True,
            hide_legend=True,
            # use_scatter=True,
            path=path.join(docs_folder, title),
            doc=document)

        # if this is not the top layer, show conv agreements only if required
        if show_conv_agreements or layer_id == (num_layers - 1):
            agreements = clustering_agreements

            for run_id, run_agreements in enumerate(agreements):
                for expert_id, expert_agreements in enumerate(run_agreements):
                    Task0TaAnalysisTemplate._plot_agreement(
                        prefix, expert_id, run_id, testing_phase_ids,
                        expert_agreements, document, docs_folder)

        logger.info('done')
Пример #4
0
    def publish_results(self, document: Document, docs_folder: str,
                        measurement_manager: MeasurementManager,
                        topology_parameters: List[str]):
        """An alternative to the _publish_results method, this is called from _publish_results now

        Draw and add your plots to the document here.
        """
        steps = measurement_manager.get_values_from_all_runs('current_step')
        plotted_training_phase_id = measurement_manager.get_values_from_all_runs(
            'training_phase_id')
        plotted_testing_phase_id = measurement_manager.get_values_from_all_runs(
            'testing_phase_id')
        plotted_is_learning = measurement_manager.get_values_from_all_runs(
            'is_learning')

        labels = topology_parameters
        document.add(
            f"<br><br><br><b>Common results from the TopExpert</b><br>")

        title = 'training_phase_id'
        plot_multiple_runs(steps,
                           plotted_training_phase_id,
                           title=title,
                           ylabel='training_phase_id',
                           xlabel='steps',
                           labels=labels,
                           path=path.join(docs_folder, title),
                           doc=document)

        title = 'testing_phase_id'
        plot_multiple_runs(steps,
                           plotted_testing_phase_id,
                           title=title,
                           ylabel='testing_phase_id',
                           xlabel='steps',
                           labels=labels,
                           path=path.join(docs_folder, title),
                           doc=document)

        title = 'is_learning'
        plot_multiple_runs(steps,
                           plotted_is_learning,
                           title=title,
                           ylabel='is_learning',
                           xlabel='steps',
                           labels=labels,
                           path=path.join(docs_folder, title),
                           doc=document)

        predicted_labels_mse = measurement_manager.get_custom_data_from_all_runs(
            'predicted_labels_mse')
        testing_phases_x = list(range(0, len(predicted_labels_mse[0])))

        model_accuracy = measurement_manager.get_custom_data_from_all_runs(
            'model_accuracy')
        baseline_accuracy = measurement_manager.get_custom_data_from_all_runs(
            'baseline_accuracy')
        # plot the classification accuracy
        title = 'Label reconstruction accuracy (step-wise)'
        plot_multiple_runs_with_baselines(testing_phases_x,
                                          model_accuracy,
                                          baseline_accuracy,
                                          title=title,
                                          ylabel='accuracy (1 ~ 100%)',
                                          xlabel='testing phase ID',
                                          ylim=[-0.1, 1.1],
                                          labels=labels,
                                          hide_legend=True,
                                          path=path.join(docs_folder, title),
                                          doc=document)

        model_se_accuracy = measurement_manager.get_custom_data_from_all_runs(
            'model_se_accuracy')
        baseline_se_accuracy = measurement_manager.get_custom_data_from_all_runs(
            'baseline_se_accuracy')

        # plot the classification accuracy
        title = 'Label reconstruction SE accuracy (object-wise)'
        plot_multiple_runs_with_baselines(testing_phases_x,
                                          model_se_accuracy,
                                          baseline_se_accuracy,
                                          title=title,
                                          ylabel='accuracy (1 ~ 100%)',
                                          xlabel='testing_phase ID',
                                          ylim=[-0.1, 1.1],
                                          labels=labels,
                                          hide_legend=True,
                                          path=path.join(docs_folder, title),
                                          doc=document)

        baseline_labels_mse = measurement_manager.get_custom_data_from_all_runs(
            'baseline_labels_mse')

        # plot the MSE
        title = 'Mean Square Error of label reconstruction'
        plot_multiple_runs_with_baselines(
            testing_phases_x,
            predicted_labels_mse,
            baseline_labels_mse,
            title=title,
            ylabel='MSE',
            xlabel='testing phase ID',
            labels=labels,
            hide_legend=True,
            ylim=[-0.1, 0.2],  # just for better resolution
            path=path.join(docs_folder, title),
            doc=document)

        for layer_id in reversed(range(self._experiment_params.num_layers)):
            self.publish_results_for_layer(
                document, docs_folder, measurement_manager,
                topology_parameters, layer_id,
                self._experiment_params.num_layers,
                self._experiment_params.show_conv_agreements,
                self._experiment_params.is_train_test_classifier_computed)
Пример #5
0
    def _publish_results(self):
        """Plot and save the results."""

        doc = Document()
        date = get_stamp()

        labels = ExperimentTemplateBase.parameters_to_string(
            self._topology_parameters_list)

        for manager in self._layer_measurement_managers:
            manager.publish_results(labels=labels,
                                    date=date,
                                    docs_folder=self._docs_folder,
                                    doc=doc)

        # plot the running MSE
        title = 'Mean Square Error of TA classification'
        f = plot_multiple_runs_with_baselines(self._steps,
                                              self._predicted_labels_mse,
                                              self._baseline_labels_mse,
                                              title=title,
                                              ylabel='mse',
                                              xlabel='steps',
                                              labels=labels)
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        title = 'TA classification accuracy'
        f = plot_multiple_runs_with_baselines(
            self._steps,
            self._classification_accuracy,
            self._random_classification_accuracy,
            title=title,
            ylabel='accuracy (1 ~ 100%)',
            xlabel='steps',
            labels=labels)
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        title = 'TA classification accuracy - (SE-metric)'
        f = plot_multiple_runs_with_baselines(
            self._steps,
            self._se_classification_accuracy,
            self._se_random_classification_accuracy,
            title=title,
            ylabel='accuracy (1 ~ 100%)',
            xlabel='steps',
            labels=labels)
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        title = 'is_learning'
        f = plot_multiple_runs(
            self._steps,
            self._is_learning_info,
            title=title,
            ylabel='is learning',
            xlabel='steps',
        )
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        doc.write_file(
            path.join(self._docs_folder,
                      to_safe_name(self._complete_name() + date + ".html")))

        print('done')
    def _publish_results(self):
        doc = Document()
        doc.add(self._get_heading('_not set'))

        params_str = pprint.pformat(
            Task0TrainTestTemplateRelearning.pprint_parameters_to_string(self._topology_parameters_list), 1, 160
        )

        doc.add(f"<p><b>Parameters:</b><br /><pre>learning rate: {self._learning_rate},\n" + params_str + "</pre></p>")

        labels = ExperimentTemplateBase.parameters_to_string(self._topology_parameters_list)

        testing_phases = list(range(0, len(self._clustering_agreement_list[0][0])))

        for i, run_label in enumerate(self._run_labels):
            title = 'Clustering agreement ' + run_label
            f = plot_multiple_runs(
                testing_phases,
                self._clustering_agreement_list[i],
                title=title,
                ylabel='agreement',
                xlabel='testing training_phases',
                disable_ascii_labels=True,
                hide_legend=True,
                ylim=[ClusterAgreementMeasurement.NO_VALUE - 0.1, 1.1]
            )
            add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        title = 'Model classification accuracy (step-wise)'
        f = plot_multiple_runs_with_baselines(
            testing_phases,
            self._model_accuracy,
            self._baseline_accuracy,
            title=title,
            ylabel='accuracy (1 ~ 100%)',
            xlabel='testing phase',
            ylim=[-0.1, 1.1],
            labels=self._run_labels
        )
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)
        title = 'Model SE classification accuracy (step-wise)'
        f = plot_multiple_runs_with_baselines(
            testing_phases,
            self._model_se_accuracy,
            self._baseline_se_accuracy,
            title=title,
            ylabel='accuracy (1 ~ 100%)',
            xlabel='testing_phase',
            ylim=[-0.1, 1.1],
            labels=self._run_labels
        )
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        title = 'Weak classifier accuracy trained on SP outputs to labels'
        f = plot_multiple_runs_with_baselines(
            testing_phases,
            self._weak_class_accuracy,
            self._base_weak_class_accuracy,
            title=title,
            ylabel='Accuracy (1 ~ 100%)',
            xlabel='steps',
            labels=labels,
            ylim=[-0.1, 1.1],
            hide_legend=True
        )
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        title = 'Average Deltas Train'
        f = plot_multiple_runs(
            [self._sp_evaluation_period * i for i in range(0, len(self._average_delta_train[0]))],
            self._average_delta_train,
            title=title,
            ylabel='average_deltas',
            xlabel='steps',
            labels=self._run_labels,
            disable_ascii_labels=True,
            use_scatter=False
        )
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        title = 'Average Deltas Test'
        f = plot_multiple_runs(
            [self._sp_evaluation_period * i for i in range(0, len(self._average_delta_test[0]))],
            self._average_delta_test,
            title=title,
            ylabel='average_deltas',
            xlabel='steps',
            labels=self._run_labels,
            disable_ascii_labels=True,
            use_scatter=False
        )
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        title = 'Average boosting duration'
        f = plot_multiple_runs(
            [self._sp_evaluation_period * i for i in range(0, len(self._average_boosting_dur[0]))],
            self._average_boosting_dur,
            title=title,
            ylabel='duration',
            xlabel='steps',
            labels=labels,
            hide_legend=True
        )
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        doc.write_file(path.join(self._docs_folder, "main.html"))
        print('done')
    def publish_results(self, labels, date, docs_folder, doc):

        doc.add(f"<br><br><br><b>Results for layer {self._layer_id}</b><br>")

        prefix = 'L' + str(self._layer_id) + '--'

        testing_phase_ids = list(range(0, len(self._clustering_agreements[0][0])))  # x-axis values

        title = prefix + ' Weak classifier accuracy trained on SP outputs to labels'
        f = plot_multiple_runs_with_baselines(
            testing_phase_ids,
            self._weak_class_accuracy,
            self._base_weak_class_accuracy,
            title=title,
            ylabel='Accuracy (1 ~ 100%)',
            xlabel='steps',
            labels=labels,
            ylim=[-0.1, 1.1],
            hide_legend=True
        )

        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        title = prefix + '- Average boosting duration'
        f = plot_multiple_runs(
            self._sp_evaluation_steps[0],
            self._average_boosting_dur,
            title=title,
            ylabel='duration',
            xlabel='steps',
            labels=labels,
            hide_legend=True
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        title = prefix + '- Num boosted clusters'
        f = plot_multiple_runs(
            self._sp_evaluation_steps[0],
            self._num_boosted_clusters,
            title=title,
            ylabel='Num boosted clusters / total clusters',
            xlabel='steps',
            labels=labels,
            ylim=[-0.1, 1.1],
            hide_legend=True
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        title = prefix + '- Average_deltas'
        f = plot_multiple_runs(
            self._sp_evaluation_steps[0],
            self._average_deltas,
            title=title,
            ylabel='average_deltas',
            xlabel='steps',
            labels=labels,
            disable_ascii_labels=True,
            hide_legend=True
            # use_scatter=True
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        # if this is not the top layer, show conv agreements only if required
        if self._show_conv_agreements or self._layer_id == (self._num_layers - 1):
            agreements = self._clustering_agreements

            for run_id, run_agreements in enumerate(agreements):
                for expert_id, expert_agreements in enumerate(run_agreements):
                    self._plot_agreement(
                        prefix,
                        expert_id,
                        run_id,
                        testing_phase_ids,
                        expert_agreements,
                        docs_folder,
                        doc
                    )
    def _publish_results_to_doc(self, doc: Document, date: str, docs_folder: path):
        """Adds my results to the results produced by the base class"""
        super()._publish_results_to_doc(doc, date, docs_folder)

        doc.add(f"<br><br><br><b>Common results</b><br>")

        labels = ExperimentTemplateBase.extract_params_for_legend(self._topology_parameters_list)

        title = 'training_phase_id'
        f = plot_multiple_runs(
            self._steps,
            self._plotted_training_phase_id,
            title=title,
            ylabel='training_phase_id',
            xlabel='steps',
            labels=labels
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        title = 'testing_phase_id'
        f = plot_multiple_runs(
            self._steps,
            self._plotted_testing_phase_id,
            title=title,
            ylabel='testing_phase_id',
            xlabel='steps',
            labels=labels
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        if self._plot_is_learning:
            # plot the classification accuracy
            title = 'Is Learning'
            f = plot_multiple_runs(
                self._steps,
                self._is_learning_info,
                title=title,
                ylabel='learning=True?',
                xlabel='steps',
                ylim=[-0.1, 1.1],
                labels=labels
            )
            add_fig_to_doc(f, path.join(docs_folder, title), doc)

        testing_phase_ids = list(range(0, len(self._mutual_info[0])))  # x-axis values

        title = 'Top-layer L' + str(self._top_layer_id()) + ' Mutual information of SP output with labels'
        f = plot_multiple_runs_with_baselines(
            testing_phase_ids,
            self._mutual_info,
            self._base_mutual_info,
            title=title,
            ylabel='Normalized mutual information',
            xlabel='steps',
            labels=labels,
            ylim=[-0.1, 1.1],
            hide_legend=True
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        # plot the classification accuracy
        title = 'Label reconstruction accuracy (step-wise)'
        f = plot_multiple_runs_with_baselines(
            testing_phase_ids,
            self._model_accuracy,
            self._baseline_accuracy,
            title=title,
            ylabel='accuracy (1 ~ 100%)',
            xlabel='testing phase ID',
            ylim=[-0.1, 1.1],
            labels=labels,
            hide_legend=True
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        # plot the classification accuracy
        title = 'Label reconstruction SE accuracy'
        f = plot_multiple_runs_with_baselines(
            testing_phase_ids,
            self._model_se_accuracy,
            self._baseline_se_accuracy,
            title=title,
            ylabel='accuracy (1 ~ 100%)',
            xlabel='testing_phase ID',
            ylim=[-0.1, 1.1],
            labels=labels,
            hide_legend=True
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        # plot the MSE
        title = 'Mean Square Error of label reconstruction'
        f = plot_multiple_runs_with_baselines(
            testing_phase_ids,
            self._predicted_labels_mse,
            self._baseline_labels_mse,
            title=title,
            ylabel='MSE',
            xlabel='testing phase ID',
            labels=labels,
            hide_legend=True,
            ylim=[-0.1, 0.2]  # just for better resolution
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        for manager in reversed(self._layer_measurement_managers):
            manager.publish_results(labels=title, date=date, docs_folder=docs_folder, doc=doc)
    def publish_results(self, document: Document, docs_folder: str,
                        measurement_manager: MeasurementManager,
                        topology_parameters: List[str]):
        """An alternative to the _publish_results method, this is called from _publish_results now

        Draw and add your plots to the document here.
        """
        steps = measurement_manager.get_values_from_all_runs('current_step')
        plotted_training_phase_id = measurement_manager.get_values_from_all_runs(
            'training_phase_id')
        plotted_testing_phase_id = measurement_manager.get_values_from_all_runs(
            'testing_phase_id')

        labels = topology_parameters

        title = 'training_phase_id'
        f = plot_multiple_runs(steps,
                               plotted_training_phase_id,
                               title=title,
                               ylabel='training_phase_id',
                               xlabel='steps',
                               labels=labels)
        add_fig_to_doc(f, path.join(docs_folder, title), document)

        title = 'testing_phase_id'
        f = plot_multiple_runs(steps,
                               plotted_testing_phase_id,
                               title=title,
                               ylabel='testing_phase_id',
                               xlabel='steps',
                               labels=labels)
        add_fig_to_doc(f, path.join(docs_folder, title), document)

        max_x = max(steps[0])
        average_delta_train_0 = measurement_manager.get_items_from_all_runs(
            'average_delta0_train')
        test_sp_steps = [key for key, value in average_delta_train_0[0]]

        title = 'average_delta_train_layer0'
        f = plot_multiple_runs(test_sp_steps,
                               [[value for key, value in sequence]
                                for sequence in average_delta_train_0],
                               title=title,
                               ylabel='average_deltas',
                               xlabel='steps',
                               labels=labels,
                               xlim=[0, max_x],
                               disable_ascii_labels=True,
                               hide_legend=True
                               # use_scatter=True
                               )
        add_fig_to_doc(f, path.join(docs_folder, title), document)

        average_delta_train_1 = measurement_manager.get_items_from_all_runs(
            'average_delta1_train')
        title = 'average_delta_train_layer1'
        f = plot_multiple_runs(test_sp_steps,
                               [[value for key, value in sequence]
                                for sequence in average_delta_train_1],
                               title=title,
                               ylabel='average_deltas',
                               xlabel='steps',
                               labels=labels,
                               xlim=[0, max_x],
                               disable_ascii_labels=True,
                               hide_legend=True
                               # use_scatter=True
                               )
        add_fig_to_doc(f, path.join(docs_folder, title), document)

        predicted_labels_mse = measurement_manager.get_custom_data_from_all_runs(
            'predicted_labels_mse')
        testing_phases_x = list(range(0, len(predicted_labels_mse[0])))

        model_accuracy = measurement_manager.get_custom_data_from_all_runs(
            'model_accuracy')
        baseline_accuracy = measurement_manager.get_custom_data_from_all_runs(
            'baseline_accuracy')
        # plot the classification accuracy
        title = 'Label reconstruction accuracy (step-wise)'
        f = plot_multiple_runs_with_baselines(testing_phases_x,
                                              model_accuracy,
                                              baseline_accuracy,
                                              title=title,
                                              ylabel='accuracy (1 ~ 100%)',
                                              xlabel='testing phase ID',
                                              ylim=[-0.1, 1.1],
                                              labels=labels,
                                              hide_legend=True)
        add_fig_to_doc(f, path.join(docs_folder, title), document)

        model_se_accuracy = measurement_manager.get_custom_data_from_all_runs(
            'model_se_accuracy')
        baseline_se_accuracy = measurement_manager.get_custom_data_from_all_runs(
            'baseline_se_accuracy')

        # plot the classification accuracy
        title = 'Label reconstruction SE accuracy (step-wise)'
        f = plot_multiple_runs_with_baselines(testing_phases_x,
                                              model_se_accuracy,
                                              baseline_se_accuracy,
                                              title=title,
                                              ylabel='accuracy (1 ~ 100%)',
                                              xlabel='testing_phase ID',
                                              ylim=[-0.1, 1.1],
                                              labels=labels,
                                              hide_legend=True)
        add_fig_to_doc(f, path.join(docs_folder, title), document)

        baseline_labels_mse = measurement_manager.get_custom_data_from_all_runs(
            'baseline_labels_mse')

        # plot the MSE
        title = 'Mean Square Error of label reconstruction'
        f = plot_multiple_runs_with_baselines(
            testing_phases_x,
            predicted_labels_mse,
            baseline_labels_mse,
            title=title,
            ylabel='MSE',
            xlabel='testing phase ID',
            labels=labels,
            hide_legend=True,
            ylim=[-0.1, 0.2]  # just for better resolution
        )
        add_fig_to_doc(f, path.join(docs_folder, title), document)
Пример #10
0
    def _publish_results_to_doc(self, doc: Document, date: str, docs_folder: os.path):
        """An alternative to the _publish_results method, this is called from _publish_results now

        Draw and add your topologies to the document here.
        """

        labels = ExperimentTemplateBase.extract_params_for_legend(self._topology_parameters_list)

        title = 'training_phase_id'
        f = plot_multiple_runs(
            self._steps,
            self._plotted_training_phase_id,
            title=title,
            ylabel='training_phase_id',
            xlabel='steps',
            labels=labels
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        title = 'testing_phase_id'
        f = plot_multiple_runs(
            self._steps,
            self._plotted_testing_phase_id,
            title=title,
            ylabel='testing_phase_id',
            xlabel='steps',
            labels=labels
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        # plot the classification accuracy
        title = 'Is Learning'
        f = plot_multiple_runs(
            self._steps,
            self._is_learning_info,
            title=title,
            ylabel='learning=True?',
            xlabel='steps',
            ylim=[-0.1, 1.1],
            labels=labels
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        max_x = max(self._steps[0])

        title = 'average_delta_train_layer0'
        f = plot_multiple_runs(
            self._test_sp_steps[0],
            self._average_delta_train_0,
            title=title,
            ylabel='average_deltas',
            xlabel='steps',
            labels=labels,
            xlim=[0, max_x],
            disable_ascii_labels=True,
            hide_legend=True
            # use_scatter=True
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        title = 'average_delta_train_layer1'
        f = plot_multiple_runs(
            self._test_sp_steps[0],
            self._average_delta_train_1,
            title=title,
            ylabel='average_deltas',
            xlabel='steps',
            labels=labels,
            xlim=[0, max_x],
            disable_ascii_labels=True,
            hide_legend=True
            # use_scatter=True
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        testing_phases_x = list(range(0, len(self._predicted_labels_mse[0])))

        # plot the classification accuracy
        title = 'Label reconstruction accuracy (step-wise)'
        f = plot_multiple_runs_with_baselines(
            testing_phases_x,
            self._model_accuracy,
            self._baseline_accuracy,
            title=title,
            ylabel='accuracy (1 ~ 100%)',
            xlabel='testing phase ID',
            ylim=[-0.1, 1.1],
            labels=labels,
            hide_legend=True
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        # plot the classification accuracy
        title = 'Label reconstruction SE accuracy (step-wise)'
        f = plot_multiple_runs_with_baselines(
            testing_phases_x,
            self._model_se_accuracy,
            self._baseline_se_accuracy,
            title=title,
            ylabel='accuracy (1 ~ 100%)',
            xlabel='testing_phase ID',
            ylim=[-0.1, 1.1],
            labels=labels,
            hide_legend=True
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        # plot the MSE
        title = 'Mean Square Error of label reconstruction'
        f = plot_multiple_runs_with_baselines(
            testing_phases_x,
            self._predicted_labels_mse,
            self._baseline_labels_mse,
            title=title,
            ylabel='MSE',
            xlabel='testing phase ID',
            labels=labels,
            hide_legend=True,
            ylim=[-0.1, 0.2]  # just for better resolution
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)