def test_params_to_string():
    """We should be able to parse everything into reasonable string"""

    params, _, _ = make_test_params()

    # modify the default params with some more interesting configuration
    num_layers = 3
    conv_classes = [SpConvLayer, ConvLayer, SpConvLayer]

    for run_par in params:
        run_par['conv_layers_params'] = run_par['conv_layers_params'].change(
            num_conv_layers=num_layers, conv_classes=conv_classes)

    cp = params[0]['conv_layers_params']
    assert cp.conv_classes == conv_classes
    assert cp.num_conv_layers == num_layers

    unpacked = [
        ExperimentTemplateBase._unpack_params(run_par) for run_par in params
    ]

    conv_layers_str = ExperimentTemplateBase._param_value_to_string(
        unpacked[0]['conv_conv_classes'])

    assert conv_layers_str == '[SpConvLayer, ConvLayer, SpConvLayer]'

    # complete parsed string (other formating like the one above could be tested here)
    result = ExperimentTemplateBase.parameters_to_string(unpacked)
Exemplo n.º 2
0
def multiple_runs_class_filter_example(args, debug: bool = False):
    name = "example_multiple_runs"

    cp = MultipleLayersParams()
    cp.compute_reconstruction = True
    cp.conv_classes = [ConvLayer, ConvLayer]
    cp.num_conv_layers = 2
    cp.rf_stride = (8, 8)
    cp.rf_size = (8, 8)

    tp = MultipleLayersParams()
    tp.n_cluster_centers = 250

    # class filter changed
    params = [{
        'class_filter': [1, 2]
    }, {
        'class_filter': [1, 2, 3]
    }, {
        'class_filter': [1]
    }]

    common_params = {
        'conv_layers_params': cp,
        'top_layer_params': tp,
        'image_size': SeDatasetSize.SIZE_64,
        'noise_amp': 0.0,
        'model_seed': None,
        'baseline_seed': None
    }

    # merge the params and common params
    p = ExperimentTemplateBase.add_common_params(params, common_params)

    run_measurement(name, p, args, debug=debug)
def make_test_params():

    cp = MultipleLayersParams()
    cp.compute_reconstruction = True
    cp.conv_classes = ConvLayer
    cp.sp_buffer_size = 6000
    cp.sp_batch_size = 4000
    cp.learning_rate = 0.02
    cp.cluster_boost_threshold = 1000
    cp.max_encountered_seqs = 10000
    cp.max_frequent_seqs = 1000
    cp.seq_length = 4
    cp.seq_lookahead = 1
    cp.num_conv_layers = 1

    cp.n_cluster_centers = 400
    cp.rf_size = (8, 8)
    cp.rf_stride = (8, 8)

    tp = MultipleLayersParams()
    tp.n_cluster_centers = 250
    tp.sp_buffer_size = 4000
    tp.sp_batch_size = 500
    tp.learning_rate = 0.2
    tp.cluster_boost_threshold = 1000
    tp.compute_reconstruction = True

    common_params = {
        'conv_layers_params': cp,
        'top_layer_params': tp,
        'image_size': SeDatasetSize.SIZE_64,
        'class_filter': [1, 2, 3],
        'noise_amp': 0.0,
        'model_seed': None,
        'baseline_seed': None
    }

    changing_params = [{
        'conv_layers_params': cp.change(learning_rate=0.1)
    }, {
        'conv_layers_params': cp.change(learning_rate=0.2),
        'top_layer_params': tp.change(learning_rate=0.1)
    }, {
        'conv_layers_params': cp.change(learning_rate=0.3)
    }]

    params = ExperimentTemplateBase.add_common_params(changing_params,
                                                      common_params)

    return params, common_params, changing_params
def test_extract_constructor_params():
    """We should be able to extract the constructor params of the class and their value

    Parse the params into the str: Any dictionary,
    merge it with the params for each run and then start doing extraction of things that are duplicate
    """

    params, _, _ = make_test_params()

    constructor_params = ExperimentTemplateBase._get_default_topology_params(
        MockFluffyTopology)

    # test we've extracted all the params and with correct values
    mock_instance = MockFluffyTopology()
    assert len(constructor_params) == 2
    assert constructor_params[
        'constructor_param'] == mock_instance.constructor_param
    assert constructor_params['noise_amp'] == mock_instance.noise_amp

    complete_params = ExperimentTemplateBase.add_common_params(
        params, constructor_params)

    assert len(params) == len(complete_params)
    assert len(params[0]) + 1 == len(
        complete_params[0])  # one non-covered param is in the constructor

    # go thorough the complete params, for each param:
    #   if it is in the manually specified params, the value should be preserved
    #   if it was not specified, the value should be the one extracted from the constructor
    for complete_run_param, run_param in zip(complete_params, params):
        for key, value in complete_run_param.items():
            if key in run_param:
                assert complete_run_param[key] == run_param[key]
            else:
                assert key == 'constructor_param'
                assert value == mock_instance.constructor_param
Exemplo n.º 5
0
    def _publish_results(self):
        """Plot and save the results."""

        doc = Document()
        date = get_stamp()

        labels = ExperimentTemplateBase.parameters_to_string(
            self._topology_parameters_list)

        title = 'Mutual Information labels vs ' + self._experiment_name
        self.plot_save(title, self._mutual_info, self._baseline_mutual_info,
                       'Norm. mutual information', labels, date,
                       self._docs_folder, doc)

        title = 'Weak classifier accuracy labels vs ' + self._experiment_name
        self.plot_save(title, self._classifier_accuracy,
                       self._baseline_classifier_accuracy,
                       'Classifier accuracy', labels, date, self._docs_folder,
                       doc)  #, smoothing_size=3)

        title = 'average delta'
        f = plot_multiple_runs(
            self._different_steps[0],  # here the X axes are identical
            self._average_delta,
            title=title,
            ylabel='log(delta)',
            xlabel='steps',
            labels=labels)
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        title = 'average boosting duration'
        f = plot_multiple_runs(self._different_steps[0],
                               self._average_boosting_dur,
                               title=title,
                               ylabel='duration',
                               xlabel='steps',
                               labels=labels)
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        doc.write_file(
            path.join(self._docs_folder,
                      to_safe_name(self._complete_name() + date + ".html")))

        print('done')
def test_split_to_unique_and_changing_params():
    """We should be able to split unpacked params to the keys that have changing values and the others"""

    params, common_params, changing_params = make_test_params()

    # does not necessarily have to be here, but just to test everything:
    complete_params = ExperimentTemplateBase.add_common_params(
        params,
        ExperimentTemplateBase._get_default_topology_params(
            MockFluffyTopology))

    # add this common_param to the corresponding list for testing
    mock_instance = MockFluffyTopology()
    common_params['constructor_param'] = mock_instance.constructor_param

    unpacked_all = [
        ExperimentTemplateBase._unpack_params(run_par)
        for run_par in complete_params
    ]
    unpacked_common = ExperimentTemplateBase._unpack_params(common_params)
    unpacked_changing = [
        ExperimentTemplateBase._unpack_params(run_par)
        for run_par in changing_params
    ]

    del unpacked_common['conv_learning_rate']  # these things are changing
    del unpacked_common['top__learning_rate']

    # this should filter-out things that are changed between runs (different at least in one run)
    constant_params_filtered = ExperimentTemplateBase._find_constant_parameters(
        unpacked_all)
    assert constant_params_filtered == unpacked_common

    # this should remove the constant params from the complete list
    changing_params_filtered = ExperimentTemplateBase._remove_params(
        unpacked_all, constant_params_filtered)

    assert len(changing_params_filtered) == 3
    for cp, complete_par in zip(changing_params_filtered, unpacked_all):
        assert len(cp) == 2
        assert cp['conv_learning_rate'] == complete_par['conv_learning_rate']
        assert cp['top__learning_rate'] == complete_par['top__learning_rate']

    print("done")
def test_unpack_params():
    """Test the params are correctly unpacked to the dictionary {str: value}, with the data classes removed"""

    params, _, _ = make_test_params()

    unpacked_run_params = [
        ExperimentTemplateBase._unpack_params(run_pars) for run_pars in params
    ]

    separator = '_'
    conv_prefix = 'conv_layers_params'[0:4] + separator
    top_prefix = 'top_layer_params'[0:4] + separator

    for run_params in unpacked_run_params:
        assert run_params[conv_prefix + 'num_conv_layers'] == 1
        assert run_params[top_prefix + 'n_cluster_centers'] == 250

    assert unpacked_run_params[0][conv_prefix + 'learning_rate'] == 0.1
    assert unpacked_run_params[1][conv_prefix + 'learning_rate'] == 0.2
    assert unpacked_run_params[2][conv_prefix + 'learning_rate'] == 0.3
Exemplo n.º 8
0
def test_add_common_params():
    """Test the helper function that adds default parameters to all experiment runs"""

    cp = MultipleLayersParams()
    cp.compute_reconstruction = True
    cp.conv_classes = [ConvLayer, ConvLayer]
    cp.num_conv_layers = 2

    tp = MultipleLayersParams()
    tp.n_cluster_centers = 250

    # class filter changed
    params = [{
        'class_filter': [1, 2]
    }, {
        'class_filter': [1, 2, 3]
    }, {
        'class_filter': [1]
    }, {}]

    common_params = {
        'conv_layers_params': cp,
        'top_layer_params': tp,
        'image_size': SeDatasetSize.SIZE_64,
        'noise_amp': 0.12,
        'model_seed': None,
        'baseline_seed': None,
        'class_filter': [9]
    }

    p = ExperimentTemplateBase.add_common_params(params, common_params)

    assert len(p) == 4
    assert len(p[0]) == 7
    assert p[0]['class_filter'] == [1, 2]
    assert p[1]['class_filter'] == [1, 2, 3]
    assert p[2]['class_filter'] == [1]
    assert p[3]['class_filter'] == [9]

    for param in p:
        assert param['noise_amp'] == 0.12
    def _publish_results(self):
        """Plot and optionally save the results."""
        doc = Document()
        date = get_stamp()

        labels = ExperimentTemplateBase.parameters_to_string(self._topology_parameters_list)

        title = f'FPS vs. {self._experiment_name} (smoothing {self._smoothing_window_size})'
        self.plot_save(title, self._fps, 'FPS',
                       labels, date, self._docs_folder, doc, smoothing_window_size=self._smoothing_window_size)

        smoothing = len(self._fps[0]) // 100  # adaptive smoothing
        if smoothing % 2 == 0:
            smoothing += 1  # make odd
        title = 'FPS vs. ' + self._experiment_name + f' (smoothing {smoothing})'
        self.plot_save(title, self._fps, 'FPS', labels, date, self._docs_folder, doc,
                       smoothing_window_size=smoothing)

        title = f'max_memory_allocated() vs. {self._experiment_name} (smoothing {self._smoothing_window_size})'
        self.plot_save(title, self._max_mem, 'Max memory', labels, date, self._docs_folder, doc,
                       smoothing_window_size=self._smoothing_window_size)

        title = f'memory_allocated() vs. {self._experiment_name} (smoothing {self._smoothing_window_size})'
        self.plot_save(title, self._current_mem, 'Current memory', labels, date, self._docs_folder, doc,
                       smoothing_window_size=self._smoothing_window_size)

        title = f'max_cached() vs. {self._experiment_name} (smoothing {self._smoothing_window_size})'
        self.plot_save(title, self._max_cached, 'Max cached mem', labels, date, self._docs_folder, doc,
                       smoothing_window_size=self._smoothing_window_size)

        title = f'memory_cached() vs. {self._experiment_name} (smoothing {self._smoothing_window_size})'
        self.plot_save(title, self._current_cached, 'Current cached mem', labels, date, self._docs_folder, doc,
                       smoothing_window_size=self._smoothing_window_size)

        self._publish_aux_results(labels, date, self._docs_folder, doc)

        doc.write_file(path.join(self._docs_folder, f"{self._topology_class.__name__}_" + date + ".html"))
        print('done')
Exemplo n.º 10
0
def multiple_runs_lr_example(args, debug: bool = False):

    name = "example_multiple_runs"

    default_conv = MultipleLayersParams()
    default_conv.compute_reconstruction = True
    default_conv.conv_classes = [ConvLayer, SpConvLayer]
    default_conv.num_conv_layers = 2

    default_conv.rf_stride = (8, 8)
    default_conv.rf_size = (8, 8)

    tp = MultipleLayersParams()
    tp.n_cluster_centers = 250

    params = [{
        'conv_layers_params': default_conv.change(learning_rate=0.1)
    }, {
        'conv_layers_params': default_conv.change(learning_rate=0.2),
        'top_layer_params': tp.change(learning_rate=0.11)
    }, {
        'conv_layers_params': default_conv.change(learning_rate=0.3)
    }]

    common_params = {
        'top_layer_params': tp,
        'image_size': SeDatasetSize.SIZE_64,
        'noise_amp': 0.0,
        'model_seed': None,
        'baseline_seed': None
    }

    # merge the params and common params
    p = ExperimentTemplateBase.add_common_params(params, common_params)

    run_measurement(name, p, args, debug=debug)
    def _plot_agreement(self,
                        prefix: str,
                        expert_id: int,
                        run_id: int,
                        testing_phase_ids: List[int],
                        agreements: List[List[int]],
                        docs_folder,
                        doc):
        """Plot cluster agreements for one expert in a layer"""
        run_params = ExperimentTemplateBase.parameters_to_string([self._topology_parameters_list[run_id]])

        title = prefix + f'- E{expert_id}-Run{run_id} - Clustering agreements'
        f = plot_multiple_runs(
            testing_phase_ids,
            agreements,
            title=title,
            ylabel='agreement',
            # xlabel=f'params: {run_params}',
            xlabel=f'testing phases',
            disable_ascii_labels=True,
            hide_legend=True,
            ylim=[ClusterAgreementMeasurement.NO_VALUE - 0.1, 1.1]
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)
Exemplo n.º 12
0
    def _publish_results(self):
        """Plot and save the results."""

        doc = Document()
        date = get_stamp()

        labels = ExperimentTemplateBase.parameters_to_string(
            self._topology_parameters_list)

        for manager in self._layer_measurement_managers:
            manager.publish_results(labels=labels,
                                    date=date,
                                    docs_folder=self._docs_folder,
                                    doc=doc)

        # plot the running MSE
        title = 'Mean Square Error of TA classification'
        f = plot_multiple_runs_with_baselines(self._steps,
                                              self._predicted_labels_mse,
                                              self._baseline_labels_mse,
                                              title=title,
                                              ylabel='mse',
                                              xlabel='steps',
                                              labels=labels)
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        title = 'TA classification accuracy'
        f = plot_multiple_runs_with_baselines(
            self._steps,
            self._classification_accuracy,
            self._random_classification_accuracy,
            title=title,
            ylabel='accuracy (1 ~ 100%)',
            xlabel='steps',
            labels=labels)
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        title = 'TA classification accuracy - (SE-metric)'
        f = plot_multiple_runs_with_baselines(
            self._steps,
            self._se_classification_accuracy,
            self._se_random_classification_accuracy,
            title=title,
            ylabel='accuracy (1 ~ 100%)',
            xlabel='steps',
            labels=labels)
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        title = 'is_learning'
        f = plot_multiple_runs(
            self._steps,
            self._is_learning_info,
            title=title,
            ylabel='is learning',
            xlabel='steps',
        )
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        doc.write_file(
            path.join(self._docs_folder,
                      to_safe_name(self._complete_name() + date + ".html")))

        print('done')
    def _publish_results(self):
        doc = Document()
        doc.add(self._get_heading('_not set'))

        params_str = pprint.pformat(
            Task0TrainTestTemplateRelearning.pprint_parameters_to_string(self._topology_parameters_list), 1, 160
        )

        doc.add(f"<p><b>Parameters:</b><br /><pre>learning rate: {self._learning_rate},\n" + params_str + "</pre></p>")

        labels = ExperimentTemplateBase.parameters_to_string(self._topology_parameters_list)

        testing_phases = list(range(0, len(self._clustering_agreement_list[0][0])))

        for i, run_label in enumerate(self._run_labels):
            title = 'Clustering agreement ' + run_label
            f = plot_multiple_runs(
                testing_phases,
                self._clustering_agreement_list[i],
                title=title,
                ylabel='agreement',
                xlabel='testing training_phases',
                disable_ascii_labels=True,
                hide_legend=True,
                ylim=[ClusterAgreementMeasurement.NO_VALUE - 0.1, 1.1]
            )
            add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        title = 'Model classification accuracy (step-wise)'
        f = plot_multiple_runs_with_baselines(
            testing_phases,
            self._model_accuracy,
            self._baseline_accuracy,
            title=title,
            ylabel='accuracy (1 ~ 100%)',
            xlabel='testing phase',
            ylim=[-0.1, 1.1],
            labels=self._run_labels
        )
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)
        title = 'Model SE classification accuracy (step-wise)'
        f = plot_multiple_runs_with_baselines(
            testing_phases,
            self._model_se_accuracy,
            self._baseline_se_accuracy,
            title=title,
            ylabel='accuracy (1 ~ 100%)',
            xlabel='testing_phase',
            ylim=[-0.1, 1.1],
            labels=self._run_labels
        )
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        title = 'Weak classifier accuracy trained on SP outputs to labels'
        f = plot_multiple_runs_with_baselines(
            testing_phases,
            self._weak_class_accuracy,
            self._base_weak_class_accuracy,
            title=title,
            ylabel='Accuracy (1 ~ 100%)',
            xlabel='steps',
            labels=labels,
            ylim=[-0.1, 1.1],
            hide_legend=True
        )
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        title = 'Average Deltas Train'
        f = plot_multiple_runs(
            [self._sp_evaluation_period * i for i in range(0, len(self._average_delta_train[0]))],
            self._average_delta_train,
            title=title,
            ylabel='average_deltas',
            xlabel='steps',
            labels=self._run_labels,
            disable_ascii_labels=True,
            use_scatter=False
        )
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        title = 'Average Deltas Test'
        f = plot_multiple_runs(
            [self._sp_evaluation_period * i for i in range(0, len(self._average_delta_test[0]))],
            self._average_delta_test,
            title=title,
            ylabel='average_deltas',
            xlabel='steps',
            labels=self._run_labels,
            disable_ascii_labels=True,
            use_scatter=False
        )
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        title = 'Average boosting duration'
        f = plot_multiple_runs(
            [self._sp_evaluation_period * i for i in range(0, len(self._average_boosting_dur[0]))],
            self._average_boosting_dur,
            title=title,
            ylabel='duration',
            xlabel='steps',
            labels=labels,
            hide_legend=True
        )
        add_fig_to_doc(f, path.join(self._docs_folder, title), doc)

        doc.write_file(path.join(self._docs_folder, "main.html"))
        print('done')
    def _publish_results_to_doc(self, doc: Document, date: str, docs_folder: path):
        """Adds my results to the results produced by the base class"""
        super()._publish_results_to_doc(doc, date, docs_folder)

        doc.add(f"<br><br><br><b>Common results</b><br>")

        labels = ExperimentTemplateBase.extract_params_for_legend(self._topology_parameters_list)

        title = 'training_phase_id'
        f = plot_multiple_runs(
            self._steps,
            self._plotted_training_phase_id,
            title=title,
            ylabel='training_phase_id',
            xlabel='steps',
            labels=labels
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        title = 'testing_phase_id'
        f = plot_multiple_runs(
            self._steps,
            self._plotted_testing_phase_id,
            title=title,
            ylabel='testing_phase_id',
            xlabel='steps',
            labels=labels
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        if self._plot_is_learning:
            # plot the classification accuracy
            title = 'Is Learning'
            f = plot_multiple_runs(
                self._steps,
                self._is_learning_info,
                title=title,
                ylabel='learning=True?',
                xlabel='steps',
                ylim=[-0.1, 1.1],
                labels=labels
            )
            add_fig_to_doc(f, path.join(docs_folder, title), doc)

        testing_phase_ids = list(range(0, len(self._mutual_info[0])))  # x-axis values

        title = 'Top-layer L' + str(self._top_layer_id()) + ' Mutual information of SP output with labels'
        f = plot_multiple_runs_with_baselines(
            testing_phase_ids,
            self._mutual_info,
            self._base_mutual_info,
            title=title,
            ylabel='Normalized mutual information',
            xlabel='steps',
            labels=labels,
            ylim=[-0.1, 1.1],
            hide_legend=True
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        # plot the classification accuracy
        title = 'Label reconstruction accuracy (step-wise)'
        f = plot_multiple_runs_with_baselines(
            testing_phase_ids,
            self._model_accuracy,
            self._baseline_accuracy,
            title=title,
            ylabel='accuracy (1 ~ 100%)',
            xlabel='testing phase ID',
            ylim=[-0.1, 1.1],
            labels=labels,
            hide_legend=True
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        # plot the classification accuracy
        title = 'Label reconstruction SE accuracy'
        f = plot_multiple_runs_with_baselines(
            testing_phase_ids,
            self._model_se_accuracy,
            self._baseline_se_accuracy,
            title=title,
            ylabel='accuracy (1 ~ 100%)',
            xlabel='testing_phase ID',
            ylim=[-0.1, 1.1],
            labels=labels,
            hide_legend=True
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        # plot the MSE
        title = 'Mean Square Error of label reconstruction'
        f = plot_multiple_runs_with_baselines(
            testing_phase_ids,
            self._predicted_labels_mse,
            self._baseline_labels_mse,
            title=title,
            ylabel='MSE',
            xlabel='testing phase ID',
            labels=labels,
            hide_legend=True,
            ylim=[-0.1, 0.2]  # just for better resolution
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        for manager in reversed(self._layer_measurement_managers):
            manager.publish_results(labels=title, date=date, docs_folder=docs_folder, doc=doc)
Exemplo n.º 15
0
    def _publish_results(self):
        """Plot and optionally save the results."""

        mse = np.array(self._mse)
        mse_testing = np.array(self._mse_testing)

        memory_used = torch.tensor(self._memory_used, dtype=torch.float32)
        window_size = 201
        memory_used = (memory_used.view(-1, 1).expand(mse_testing.shape) /
                       (1024**2))
        error_memory_ratio = torch.tensor(mse_testing,
                                          dtype=torch.float32) * memory_used
        accuracy_memory_ratio = []
        for run_acc, run_mem in zip(self._weak_classifier_results,
                                    memory_used):
            accuracy_memory_ratio_run = []
            for acc, mem in zip(run_acc, run_mem):
                accuracy_memory_ratio_run.append((1 - acc) * mem)

            accuracy_memory_ratio.append(accuracy_memory_ratio_run)

        accuracy_memory_ratio = torch.tensor(accuracy_memory_ratio)

        doc = Document()

        figs = []
        xlabel = "steps"
        ylabel = "mean reconstruction error"
        title = "Influence of hyperparameters on reconstruction error (training)"
        figsize = (18, 12)
        date = get_stamp()

        fig = plot_multiple_runs(
            x_values=np.arange(len(mse[0])),
            y_values=mse,
            labels=ExperimentTemplateBase.parameters_to_string(
                self._topology_parameters_list),
            smoothing_window_size=window_size,
            xlabel=xlabel,
            ylabel=ylabel,
            title=title,
            figsize=figsize,
            hide_legend=True)
        add_fig_to_doc(fig, path.join(self._docs_folder, to_safe_name(title)),
                       doc)

        fig = plot_multiple_runs(
            x_values=np.arange(len(mse[0])),
            y_values=mse,
            labels=ExperimentTemplateBase.parameters_to_string(
                self._topology_parameters_list),
            xlabel=xlabel,
            ylabel=ylabel,
            title=title,
            figsize=figsize,
            hide_legend=True)
        add_fig_to_doc(
            fig, path.join(self._docs_folder,
                           to_safe_name(title) + "_smooth"), doc)

        title = "Influence of hyperparameters on reconstruction error (testing)"
        fig = plot_multiple_runs(
            x_values=np.array(self._training_steps_before_testing[0]),
            y_values=mse_testing,
            labels=ExperimentTemplateBase.parameters_to_string(
                self._topology_parameters_list),
            xlabel=xlabel,
            ylabel=ylabel,
            title=title,
            figsize=figsize,
        )
        add_fig_to_doc(fig, path.join(self._docs_folder, to_safe_name(title)),
                       doc)

        title = "Weak classifier accuracy"
        fig = plot_multiple_runs(
            x_values=np.array(self._training_steps_before_testing[0]),
            y_values=np.array(self._weak_classifier_results),
            labels=ExperimentTemplateBase.parameters_to_string(
                self._topology_parameters_list),
            xlabel="steps",
            ylabel="accuracy",
            title=title,
            figsize=figsize,
            hide_legend=True)
        add_fig_to_doc(fig, path.join(self._docs_folder, to_safe_name(title)),
                       doc)

        title = "Memory * reconstruction tradeoff (testing)"
        fig = plot_multiple_runs(
            x_values=np.array(self._training_steps_before_testing[0]),
            y_values=error_memory_ratio.numpy(),
            labels=ExperimentTemplateBase.parameters_to_string(
                self._topology_parameters_list),
            xlabel=xlabel,
            ylabel=
            "Mean Reconstruction Error times required meogabyte of memory",
            title=title,
            figsize=figsize,
            hide_legend=True)
        add_fig_to_doc(fig, path.join(self._docs_folder, to_safe_name(title)),
                       doc)

        title = "Memory * error (= 1 - acc) tradeoff"
        fig = plot_multiple_runs(
            x_values=np.array(self._training_steps_before_testing[0]),
            y_values=accuracy_memory_ratio.numpy(),
            labels=ExperimentTemplateBase.parameters_to_string(
                self._topology_parameters_list),
            xlabel=xlabel,
            ylabel=
            "Mean Reconstruction Error times required meogabyte of memory",
            title=title,
            figsize=figsize,
            hide_legend=True)
        add_fig_to_doc(fig, path.join(self._docs_folder, to_safe_name(title)),
                       doc)

        title = "Entropy and mean reconstruction error"
        fig = plot_multiple_runs(
            x_values=np.array(self._code_entropy),
            y_values=mse_testing,
            labels=ExperimentTemplateBase.parameters_to_string(
                self._topology_parameters_list),
            xlabel="entropy",
            ylabel="mean reconstruction error",
            title=title,
            figsize=figsize,
            hide_legend=True)
        add_fig_to_doc(fig, path.join(self._docs_folder, to_safe_name(title)),
                       doc)

        title = "Mean reconstruction error and classifier accuracy"
        fig = plot_multiple_runs(
            x_values=mse_testing,
            y_values=np.array(self._weak_classifier_results),
            labels=ExperimentTemplateBase.parameters_to_string(
                self._topology_parameters_list),
            xlabel="mean reconstruction error",
            ylabel="accuracy",
            title=title,
            figsize=figsize,
            hide_legend=True)
        add_fig_to_doc(fig, path.join(self._docs_folder, to_safe_name(title)),
                       doc)

        title = "Entropy and classifier accuracy"
        fig = plot_multiple_runs(
            x_values=np.array(self._code_entropy),
            y_values=np.array(self._weak_classifier_results),
            labels=ExperimentTemplateBase.parameters_to_string(
                self._topology_parameters_list),
            xlabel="entropy",
            ylabel="accuracy",
            title=title,
            figsize=figsize,
            hide_legend=True)
        add_fig_to_doc(fig, path.join(self._docs_folder, to_safe_name(title)),
                       doc)

        doc.write_file(
            path.join(self._docs_folder,
                      f"{self._topology_class.__name__}_" + date + ".html"))

        print(self._memory_used)
Exemplo n.º 16
0
    def _publish_results(self):

        doc = Document()

        xlabel = "steps"
        ylabel = "number of SP forward executions"
        title_fe_dt = "smoothed derivation of SP forward executions and TP forward executions (SP O QD)"
        figsize = (18, 12)
        date = get_stamp()

        layer_names = ['L0', 'L1', 'L2']
        nr_layers = len(layer_names)
        labels = list(map(lambda x: x + " forward execution", layer_names)) + \
                 list(map(lambda x: x + " qualitative difference", layer_names))

        colors = ['b', 'orange', 'g', 'r', 'p']
        color_params = [{'c': color} for color in colors[:nr_layers]]
        color_ls_params = [{
            'c': color,
            'ls': '--'
        } for color in colors[:nr_layers]]
        other_params = color_params + color_ls_params

        params_description = ExperimentTemplateBase.parameters_to_string(
            self._topology_parameters_list)
        for run in range(len(self.sp_executions)):
            sp_execution_dt = list(
                map(compute_derivations, self.sp_executions[run]))
            sp_output_dt = self.sp_output_stability[run]

            title = title_fe_dt + f" run {run} "
            fig = plot_multiple_runs(x_values=self.training_steps,
                                     y_values=sp_execution_dt + sp_output_dt,
                                     ylim=[0, 1],
                                     labels=labels,
                                     smoothing_window_size=501,
                                     xlabel=xlabel,
                                     ylabel=ylabel,
                                     title=title + params_description[run],
                                     figsize=figsize,
                                     hide_legend=False,
                                     other_params=other_params)
            add_fig_to_doc(fig,
                           path.join(self._docs_folder, to_safe_name(title)),
                           doc)

        title = "classification accuracy from reconstructed labels"
        fig = plot_multiple_runs(x_values=list(range(
            self._num_testing_phases)),
                                 y_values=self.classification_accuracy,
                                 ylim=[0, 1],
                                 labels=params_description,
                                 smoothing_window_size=None,
                                 xlabel="accuracy",
                                 ylabel="phases",
                                 title=title,
                                 figsize=figsize,
                                 hide_legend=False)
        add_fig_to_doc(fig, path.join(self._docs_folder, to_safe_name(title)),
                       doc)

        title = "SE classification accuracy from reconstructed labels"
        fig = plot_multiple_runs(x_values=list(range(
            self._num_testing_phases)),
                                 y_values=self.classification_accuracy_se,
                                 ylim=[0, 1],
                                 labels=params_description,
                                 smoothing_window_size=None,
                                 xlabel="SE accuracy",
                                 ylabel="phases",
                                 title=title,
                                 figsize=figsize,
                                 hide_legend=False)
        add_fig_to_doc(fig, path.join(self._docs_folder, to_safe_name(title)),
                       doc)

        doc.write_file(path.join(self._docs_folder, f"main.html"))
Exemplo n.º 17
0
    def _publish_results_to_doc(self, doc: Document, date: str, docs_folder: os.path):
        """An alternative to the _publish_results method, this is called from _publish_results now

        Draw and add your topologies to the document here.
        """

        labels = ExperimentTemplateBase.extract_params_for_legend(self._topology_parameters_list)

        title = 'training_phase_id'
        f = plot_multiple_runs(
            self._steps,
            self._plotted_training_phase_id,
            title=title,
            ylabel='training_phase_id',
            xlabel='steps',
            labels=labels
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        title = 'testing_phase_id'
        f = plot_multiple_runs(
            self._steps,
            self._plotted_testing_phase_id,
            title=title,
            ylabel='testing_phase_id',
            xlabel='steps',
            labels=labels
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        # plot the classification accuracy
        title = 'Is Learning'
        f = plot_multiple_runs(
            self._steps,
            self._is_learning_info,
            title=title,
            ylabel='learning=True?',
            xlabel='steps',
            ylim=[-0.1, 1.1],
            labels=labels
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        max_x = max(self._steps[0])

        title = 'average_delta_train_layer0'
        f = plot_multiple_runs(
            self._test_sp_steps[0],
            self._average_delta_train_0,
            title=title,
            ylabel='average_deltas',
            xlabel='steps',
            labels=labels,
            xlim=[0, max_x],
            disable_ascii_labels=True,
            hide_legend=True
            # use_scatter=True
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        title = 'average_delta_train_layer1'
        f = plot_multiple_runs(
            self._test_sp_steps[0],
            self._average_delta_train_1,
            title=title,
            ylabel='average_deltas',
            xlabel='steps',
            labels=labels,
            xlim=[0, max_x],
            disable_ascii_labels=True,
            hide_legend=True
            # use_scatter=True
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        testing_phases_x = list(range(0, len(self._predicted_labels_mse[0])))

        # plot the classification accuracy
        title = 'Label reconstruction accuracy (step-wise)'
        f = plot_multiple_runs_with_baselines(
            testing_phases_x,
            self._model_accuracy,
            self._baseline_accuracy,
            title=title,
            ylabel='accuracy (1 ~ 100%)',
            xlabel='testing phase ID',
            ylim=[-0.1, 1.1],
            labels=labels,
            hide_legend=True
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        # plot the classification accuracy
        title = 'Label reconstruction SE accuracy (step-wise)'
        f = plot_multiple_runs_with_baselines(
            testing_phases_x,
            self._model_se_accuracy,
            self._baseline_se_accuracy,
            title=title,
            ylabel='accuracy (1 ~ 100%)',
            xlabel='testing_phase ID',
            ylim=[-0.1, 1.1],
            labels=labels,
            hide_legend=True
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)

        # plot the MSE
        title = 'Mean Square Error of label reconstruction'
        f = plot_multiple_runs_with_baselines(
            testing_phases_x,
            self._predicted_labels_mse,
            self._baseline_labels_mse,
            title=title,
            ylabel='MSE',
            xlabel='testing phase ID',
            labels=labels,
            hide_legend=True,
            ylim=[-0.1, 0.2]  # just for better resolution
        )
        add_fig_to_doc(f, path.join(docs_folder, title), doc)