def _package_result(self, strategy: 'BaseStrategy'): metric_value = self.result() metric_name = get_metric_name(self, strategy) plot_x_position = self.get_global_counter() return [MetricValue(self, metric_name, metric_value, plot_x_position)]
def _package_result(self, strategy: 'BaseStrategy') -> MetricResult: exp_cpu = self.result() metric_name = get_metric_name(self, strategy, add_experience=True) plot_x_position = self._next_x_position(metric_name) return [MetricValue(self, metric_name, exp_cpu, plot_x_position)]
def _package_result(self, strategy: 'PluggableStrategy') -> MetricResult: exp_time = self.result() metric_name = get_metric_name(self, strategy) plot_x_position = self._next_x_position(metric_name) return [MetricValue(self, metric_name, exp_time, plot_x_position)]
def _package_result(self, strategy: 'BaseStrategy') -> MetricResult: exp_time = self.result() metric_name = get_metric_name(self, strategy, add_experience=True) plot_x_position = self.get_global_counter() return [MetricValue(self, metric_name, exp_time, plot_x_position)]
def _package_result(self, strategy: 'PluggableStrategy') -> MetricResult: ram_usage = self.result() metric_name = get_metric_name(self, strategy, add_experience=True) plot_x_position = self._next_x_position(metric_name) return [MetricValue(self, metric_name, ram_usage, plot_x_position)]
def _package_result(self, strategy: 'BaseStrategy') -> MetricResult: metric_value = self.result() metric_name = get_metric_name(self, strategy) plot_x_position = self._next_x_position(metric_name) return [MetricValue(self, metric_name, metric_value, plot_x_position)]
def _package_result(self, strategy: 'BaseStrategy') -> MetricResult: average_epoch_time = self.result() metric_name = get_metric_name(self, strategy) plot_x_position = self.get_global_counter() return [ MetricValue(self, metric_name, average_epoch_time, plot_x_position) ]
def _package_result(self, strategy) -> "MetricResult": weights = self.result() metric_name = get_metric_name( self, strategy, add_experience=True, add_task=False ) return [ MetricValue( self, metric_name, weights, strategy.clock.train_iterations ) ]
def _package_result(self, strategy: "BaseStrategy") -> MetricResult: shifting = self.result(k=self.eval_exp_id) metric_name = get_metric_name(self, strategy, add_experience=True) plot_x_position = self.get_global_counter() metric_values = [ MetricValue(self, metric_name, shifting, plot_x_position) ] return metric_values
def _package_result(self, strategy: 'PluggableStrategy') \ -> MetricResult: forgetting = self.result() metric_name = get_metric_name(self, strategy, add_experience=True) plot_x_position = self._next_x_position(metric_name) metric_values = [ MetricValue(self, metric_name, forgetting, plot_x_position) ] return metric_values
def _package_result(self, strategy: "BaseStrategy") -> MetricResult: # Only after the previous experience was trained on can we return the # forward transfer metric for this experience. result = self.result(k=self.eval_exp_id) if result is not None: metric_name = get_metric_name(self, strategy, add_experience=True) plot_x_position = strategy.clock.train_iterations metric_values = [ MetricValue(self, metric_name, result, plot_x_position) ] return metric_values
def _package_result(self, strategy: 'BaseStrategy') -> \ MetricResult: metric_value = self.result() plot_x_position = self.get_global_counter() results = [] for k, v in metric_value.items(): metric_name = get_metric_name(self, strategy, add_experience=False, add_task=k) results.append(MetricValue(self, metric_name, v, plot_x_position)) return results
def _package_result(self, strategy: "SupervisedTemplate") -> MetricResult: # this checks if the evaluation experience has been # already encountered at training time # before the last training. # If not, forgetting should not be returned. forgetting = self.result(k=self.eval_exp_id) if forgetting is not None: metric_name = get_metric_name(self, strategy, add_experience=True) plot_x_position = strategy.clock.train_iterations metric_values = [ MetricValue(self, metric_name, forgetting, plot_x_position) ] return metric_values
def _package_result(self, strategy: 'BaseStrategy') \ -> MetricResult: # this checks if the evaluation experience has been # already encountered at training time # before the last training. # If not, forgetting should not be returned. forgetting = self.result(k=self.eval_exp_id) if forgetting is not None: metric_name = get_metric_name(self, strategy, add_experience=True) plot_x_position = self.get_global_counter() metric_values = [ MetricValue(self, metric_name, forgetting, plot_x_position) ] return metric_values
def _make_grid_sample(self, strategy: "BaseStrategy") -> "MetricResult": self._load_sorted_images(strategy) return [ MetricValue( self, name=get_metric_name( self, strategy, add_experience=self.mode == "eval", add_task=True, ), value=TensorImage( make_grid(list(self.images), normalize=False, nrow=self.n_cols)), x_plot=strategy.clock.train_iterations, ) ]
def _package_result(self, strategy: "BaseStrategy") -> "MetricResult": label_cat2mean_score: Dict[LabelCat, float] = self.result() for label_cat, m in label_cat2mean_score.items(): self.label_cat2step2mean[label_cat][self.global_it_counter] = m base_metric_name = get_metric_name( self, strategy, add_experience=False, add_task=False ) rv = [ MetricValue( self, name=base_metric_name + f"/{label_cat}_classes", value=m, x_plot=self.global_it_counter, ) for label_cat, m in label_cat2mean_score.items() ] if "old" in label_cat2mean_score and "new" in label_cat2mean_score: rv.append( MetricValue( self, name=base_metric_name + f"/new_old_diff", value=label_cat2mean_score["new"] - label_cat2mean_score["old"], x_plot=self.global_it_counter, ) ) if self.image_creator is not None: rv.append( MetricValue( self, name=base_metric_name, value=AlternativeValues( self.image_creator(self.label_cat2step2mean), self.label_cat2step2mean, ), x_plot=self.global_it_counter, ) ) return rv
def _package_result(self, strategy): base_metric_name = get_metric_name(self, strategy, add_experience=True, add_task=False) plot_x_position = strategy.clock.train_iterations result_dict = self.result() if result_dict is None: return metric_values = [] for iou, iou_dict in result_dict.items(): for metric_key, metric_value in iou_dict.items(): metric_name = base_metric_name + f"/{iou}/{metric_key}" metric_values.append( MetricValue(self, metric_name, metric_value, plot_x_position)) return metric_values
def _package_result(self, strategy) -> 'MetricResult': weights = self.result() metric_name = get_metric_name(self, strategy, add_experience=True, add_task=False) return [MetricValue(self, metric_name, weights, self.get_global_counter())]