def __init__(self, coefficient: float = None, name: str = "Mixup", options: "MixupTask.Options" = None): super().__init__(coefficient=coefficient, name=name, options=options) self.options: MixupTask.Options logger = get_logger(__file__) # Exponential moving average versions of the encoder and output head. self.mean_encoder: nn.Module = deepcopy(AuxiliaryTask.encoder) self.mean_classifier: nn.Module = deepcopy(AuxiliaryTask.encoder) self.previous_task: Optional[Task] = None self.epoch_in_task: Optional[int] = 0 self.epoch_length: Optional[int] = 0 self.update_number: Optional[int] = 0 self.consistency_criterion = softmax_mse_loss
from typing import Dict, List, Union, ClassVar from pathlib import Path import wandb import matplotlib.pyplot as plt from simple_parsing import field, list_field, mutable_field from sequoia.common.loss import Loss from sequoia.common.metrics import ClassificationMetrics, Metrics, RegressionMetrics from sequoia.utils.logging_utils import get_logger from sequoia.utils.plotting import PlotSectionLabel, autolabel from sequoia.utils.utils import mean from sequoia.settings.assumptions.incremental import IncrementalSetting from .. import Results logger = get_logger(__file__) class ClassIncrementalResults(IncrementalSetting.Results): """Results for a ClassIncrementalSetting. The main objective in this setting is the average test accuracy over all tasks. The plots to generate are: - Accuracy per task - Average Test Accuray over the course of testing - Confusion matrix at the end of testing All of these will be created from the list of test metrics (Classification metrics for now).