def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:
        exp_cm = self.result()
        phase_name, _ = phase_and_task(strategy)
        stream = stream_type(strategy.experience)
        metric_name = '{}/{}_phase/{}_stream' \
            .format(str(self),
                    phase_name,
                    stream)
        plot_x_position = self.get_global_counter()

        if self._save_image:
            class_order = self._get_display_class_order(exp_cm, strategy)

            cm_image = self._image_creator(
                exp_cm[class_order][:, class_order],
                class_order
            )
            metric_representation = MetricValue(
                self, metric_name, AlternativeValues(cm_image, exp_cm),
                plot_x_position)
        else:
            metric_representation = MetricValue(
                self, metric_name, exp_cm, plot_x_position)

        return [metric_representation]
Exemple #2
0
    def _package_result(self, strategy: 'BaseStrategy'):
        metric_value = self.result()

        metric_name = get_metric_name(self, strategy)
        plot_x_position = self.get_global_counter()

        return [MetricValue(self, metric_name, metric_value, plot_x_position)]
Exemple #3
0
    def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:
        exp_cpu = self.result()

        metric_name = get_metric_name(self, strategy, add_experience=True)
        plot_x_position = self._next_x_position(metric_name)

        return [MetricValue(self, metric_name, exp_cpu, plot_x_position)]
Exemple #4
0
    def test_publish_metric(self):
        ep = EvaluationPlugin()
        mval = MetricValue(self, "metric", 1.0, 0)
        ep.publish_metric_value(mval)

        # check key exists
        assert len(ep.get_all_metrics()["metric"][1]) == 1
Exemple #5
0
    def _package_result(self, strategy: 'PluggableStrategy') -> MetricResult:
        exp_time = self.result()

        metric_name = get_metric_name(self, strategy)
        plot_x_position = self._next_x_position(metric_name)

        return [MetricValue(self, metric_name, exp_time, plot_x_position)]
 def _package_result(self,
                     strategy: "SupervisedTemplate") -> "MetricResult":
     self.steps.append(self.global_it_counter)
     task2label2count = self.labels_repartition.result()
     for task, label2count in task2label2count.items():
         for label, count in label2count.items():
             self.task2label2counts[task].setdefault(
                 label, [0] * (len(self.steps) - 2)).extend((count, count))
     for task, label2counts in self.task2label2counts.items():
         for label, counts in label2counts.items():
             counts.extend([0] * (len(self.steps) - len(counts)))
     return [
         MetricValue(
             self,
             name=f"Repartition"
             f"/{self._mode}_phase"
             f"/{stream_type(strategy.experience)}_stream"
             f"/Task_{task:03}",
             value=AlternativeValues(
                 self.image_creator(label2counts, self.steps),
                 label2counts,
             ) if self.image_creator is not None else label2counts,
             x_plot=strategy.clock.train_iterations,
         ) for task, label2counts in self.task2label2counts.items()
     ]
    def _package_result(self, strategy: 'PluggableStrategy') -> MetricResult:
        ram_usage = self.result()

        metric_name = get_metric_name(self, strategy, add_experience=True)
        plot_x_position = self._next_x_position(metric_name)

        return [MetricValue(self, metric_name, ram_usage, plot_x_position)]
Exemple #8
0
    def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:
        exp_time = self.result()

        metric_name = get_metric_name(self, strategy, add_experience=True)
        plot_x_position = self.get_global_counter()

        return [MetricValue(self, metric_name, exp_time, plot_x_position)]
Exemple #9
0
    def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:
        metric_value = self.result()

        metric_name = get_metric_name(self, strategy)
        plot_x_position = self._next_x_position(metric_name)

        return [MetricValue(self, metric_name, metric_value, plot_x_position)]
    def _package_result(self, strategy: 'PluggableStrategy') -> 'MetricResult':
        phase_name, _ = phase_and_task(strategy)
        experience_gpu = self.result()

        metric_name = 'GPU_usage/{}'.format(phase_name)
        plot_x_position = self._next_x_position(metric_name)

        return [MetricValue(self, metric_name, experience_gpu, plot_x_position)]
Exemple #11
0
    def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:
        average_epoch_time = self.result()

        metric_name = get_metric_name(self, strategy)
        plot_x_position = self.get_global_counter()

        return [
            MetricValue(self, metric_name, average_epoch_time, plot_x_position)
        ]
    def _package_result(self, strategy: "BaseStrategy") -> MetricResult:
        metric_value = self.result()

        phase_name, _ = phase_and_task(strategy)
        stream = stream_type(strategy.experience)
        metric_name = "{}/{}_phase/{}_stream".format(str(self), phase_name,
                                                     stream)
        plot_x_position = strategy.clock.train_iterations

        return [MetricValue(self, metric_name, metric_value, plot_x_position)]
Exemple #13
0
    def _package_result(self, strategy: "BaseStrategy") -> "MetricResult":
        label_cat2mean_score: Dict[LabelCat, float] = self.result()

        for label_cat, m in label_cat2mean_score.items():
            self.label_cat2step2mean[label_cat][self.global_it_counter] = m

        base_metric_name = get_metric_name(
            self, strategy, add_experience=False, add_task=False
        )

        rv = [
            MetricValue(
                self,
                name=base_metric_name + f"/{label_cat}_classes",
                value=m,
                x_plot=self.global_it_counter,
            )
            for label_cat, m in label_cat2mean_score.items()
        ]
        if "old" in label_cat2mean_score and "new" in label_cat2mean_score:
            rv.append(
                MetricValue(
                    self,
                    name=base_metric_name + f"/new_old_diff",
                    value=label_cat2mean_score["new"]
                    - label_cat2mean_score["old"],
                    x_plot=self.global_it_counter,
                )
            )
        if self.image_creator is not None:
            rv.append(
                MetricValue(
                    self,
                    name=base_metric_name,
                    value=AlternativeValues(
                        self.image_creator(self.label_cat2step2mean),
                        self.label_cat2step2mean,
                    ),
                    x_plot=self.global_it_counter,
                )
            )

        return rv
Exemple #14
0
 def _package_result(self, strategy) -> "MetricResult":
     weights = self.result()
     metric_name = get_metric_name(
         self, strategy, add_experience=True, add_task=False
     )
     return [
         MetricValue(
             self, metric_name, weights, strategy.clock.train_iterations
         )
     ]
    def _package_result(self, strategy: "BaseStrategy") -> MetricResult:

        shifting = self.result(k=self.eval_exp_id)
        metric_name = get_metric_name(self, strategy, add_experience=True)
        plot_x_position = self.get_global_counter()

        metric_values = [
            MetricValue(self, metric_name, shifting, plot_x_position)
        ]
        return metric_values
Exemple #16
0
    def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:
        exp_cm = self.result()
        phase_name, _ = phase_and_task(strategy)
        stream = stream_type(strategy.experience)
        metric_name = '{}/{}_phase/{}_stream' \
            .format(str(self),
                    phase_name,
                    stream)
        plot_x_position = self._next_x_position(metric_name)

        if self._save_image:
            cm_image = self._image_creator(exp_cm)
            metric_representation = MetricValue(
                self, metric_name, AlternativeValues(cm_image, exp_cm),
                plot_x_position)
        else:
            metric_representation = MetricValue(
                self, metric_name, exp_cm, plot_x_position)

        return [metric_representation]
Exemple #17
0
    def _package_result(self, strategy: 'PluggableStrategy') \
            -> MetricResult:

        forgetting = self.result()
        metric_name = get_metric_name(self, strategy, add_experience=True)
        plot_x_position = self._next_x_position(metric_name)

        metric_values = [
            MetricValue(self, metric_name, forgetting, plot_x_position)
        ]
        return metric_values
    def _package_result(self, strategy: 'PluggableStrategy') -> MetricResult:
        ram_usage = self.result()

        phase_name, _ = phase_and_task(strategy)
        stream = stream_type(strategy.experience)
        metric_name = '{}/{}_phase/{}_stream' \
            .format(str(self),
                    phase_name,
                    stream)
        plot_x_position = self._next_x_position(metric_name)

        return [MetricValue(self, metric_name, ram_usage, plot_x_position)]
Exemple #19
0
    def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:
        exp_disk = self.result()

        phase_name, _ = phase_and_task(strategy)
        stream = stream_type(strategy.experience)
        metric_name = '{}/{}_phase/{}_stream' \
            .format(str(self),
                    phase_name,
                    stream)
        plot_x_position = self.get_global_counter()

        return [MetricValue(self, metric_name, exp_disk, plot_x_position)]
    def _package_result(self, strategy: "BaseStrategy") -> MetricResult:
        # Only after the previous experience was trained on can we return the
        # forward transfer metric for this experience.
        result = self.result(k=self.eval_exp_id)
        if result is not None:
            metric_name = get_metric_name(self, strategy, add_experience=True)
            plot_x_position = strategy.clock.train_iterations

            metric_values = [
                MetricValue(self, metric_name, result, plot_x_position)
            ]
            return metric_values
Exemple #21
0
    def _package_result(self, strategy: 'BaseStrategy') -> \
            MetricResult:
        metric_value = self.result()
        plot_x_position = self.get_global_counter()
        results = []
        for k, v in metric_value.items():
            metric_name = get_metric_name(self,
                                          strategy,
                                          add_experience=False,
                                          add_task=k)
            results.append(MetricValue(self, metric_name, v, plot_x_position))

        return results
    def _package_result(self, strategy: "SupervisedTemplate") -> MetricResult:
        # this checks if the evaluation experience has been
        # already encountered at training time
        # before the last training.
        # If not, forgetting should not be returned.
        forgetting = self.result(k=self.eval_exp_id)
        if forgetting is not None:
            metric_name = get_metric_name(self, strategy, add_experience=True)
            plot_x_position = strategy.clock.train_iterations

            metric_values = [
                MetricValue(self, metric_name, forgetting, plot_x_position)
            ]
            return metric_values
Exemple #23
0
    def _package_result(self, strategy: 'BaseStrategy') \
            -> MetricResult:
        # this checks if the evaluation experience has been
        # already encountered at training time
        # before the last training.
        # If not, forgetting should not be returned.
        forgetting = self.result(k=self.eval_exp_id)
        if forgetting is not None:
            metric_name = get_metric_name(self, strategy, add_experience=True)
            plot_x_position = self.get_global_counter()

            metric_values = [
                MetricValue(self, metric_name, forgetting, plot_x_position)
            ]
            return metric_values
Exemple #24
0
    def _make_grid_sample(self, strategy: "BaseStrategy") -> "MetricResult":
        self._load_sorted_images(strategy)

        return [
            MetricValue(
                self,
                name=get_metric_name(
                    self,
                    strategy,
                    add_experience=self.mode == "eval",
                    add_task=True,
                ),
                value=TensorImage(
                    make_grid(list(self.images),
                              normalize=False,
                              nrow=self.n_cols)),
                x_plot=strategy.clock.train_iterations,
            )
        ]
Exemple #25
0
    def _package_result(self, strategy):
        base_metric_name = get_metric_name(self,
                                           strategy,
                                           add_experience=True,
                                           add_task=False)
        plot_x_position = strategy.clock.train_iterations
        result_dict = self.result()

        if result_dict is None:
            return

        metric_values = []
        for iou, iou_dict in result_dict.items():
            for metric_key, metric_value in iou_dict.items():
                metric_name = base_metric_name + f"/{iou}/{metric_key}"
                metric_values.append(
                    MetricValue(self, metric_name, metric_value,
                                plot_x_position))

        return metric_values
    def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:
        outputs, targets = self.result()
        phase_name, _ = phase_and_task(strategy)
        stream = stream_type(strategy.experience)
        metric_name = '{}/{}_phase/{}_stream' \
            .format(str(self),
                    phase_name,
                    stream)
        plot_x_position = self.get_global_counter()

        # compute predicted classes
        preds = torch.argmax(outputs, dim=1).cpu().numpy()
        result = wandb.plot.confusion_matrix(preds=preds,
                                             y_true=targets.cpu().numpy(),
                                             class_names=self.class_names)

        metric_representation = MetricValue(
            self, metric_name, AlternativeValues(result),
            plot_x_position)

        return [metric_representation]
Exemple #27
0
 def _package_result(self, strategy) -> 'MetricResult':
     weights = self.result()
     metric_name = get_metric_name(self, strategy, 
                                   add_experience=True, add_task=False)
     return [MetricValue(self, metric_name, weights, 
                         self.get_global_counter())]