def __init__(self, slda_model, criterion, input_size, num_classes, output_layer_name=None, shrinkage_param=1e-4, streaming_update_sigma=True, train_epochs: int = 1, train_mb_size: int = 1, eval_mb_size: int = 1, device='cpu', plugins: Optional[Sequence['StrategyPlugin']] = None, evaluator=default_logger, eval_every=-1): """ Init function for the SLDA model. :param slda_model: a PyTorch model :param criterion: loss function :param output_layer_name: if not None, wrap model to retrieve only the `output_layer_name` output. If None, the strategy assumes that the model already produces a valid output. You can use `FeatureExtractorBackbone` class to create your custom SLDA-compatible model. :param input_size: feature dimension :param num_classes: number of total classes in stream :param train_mb_size: batch size for feature extractor during training. Fit will be called on a single pattern at a time. :param eval_mb_size: batch size for inference :param shrinkage_param: value of the shrinkage parameter :param streaming_update_sigma: True if sigma is plastic else False feature extraction in `self.feature_extraction_wrapper' :param plugins: list of StrategyPlugins :param evaluator: Evaluation Plugin instance :param eval_every: run eval every `eval_every` epochs. See `BaseStrategy` for details. """ if plugins is None: plugins = [] slda_model = slda_model.eval() if output_layer_name is not None: slda_model = FeatureExtractorBackbone(slda_model.to(device), output_layer_name).eval() super(StreamingLDA, self).__init__( slda_model, None, criterion, train_mb_size, train_epochs, eval_mb_size, device=device, plugins=plugins, evaluator=evaluator, eval_every=eval_every) # SLDA parameters self.input_size = input_size self.shrinkage_param = shrinkage_param self.streaming_update_sigma = streaming_update_sigma # setup weights for SLDA self.muK = torch.zeros((num_classes, input_size)).to(self.device) self.cK = torch.zeros(num_classes).to(self.device) self.Sigma = torch.ones((input_size, input_size)).to(self.device) self.num_updates = 0 self.Lambda = torch.zeros_like(self.Sigma).to(self.device) self.prev_num_updates = -1
class FeatureBasedExemplarsSelectionStrategy(ExemplarsSelectionStrategy, ABC): """Base class to select exemplars from their features""" def __init__(self, model: Module, layer_name: str): self.feature_extractor = FeatureExtractorBackbone(model, layer_name) @torch.no_grad() def make_sorted_indices(self, strategy: "SupervisedTemplate", data: AvalancheDataset) -> List[int]: self.feature_extractor.eval() features = cat([ self.feature_extractor(x.to(strategy.device)) for x, *_ in DataLoader(data, batch_size=strategy.eval_mb_size) ]) return self.make_sorted_indices_from_features(features) @abstractmethod def make_sorted_indices_from_features(self, features: Tensor) -> List[int]: """
def __init__(self, model: Module, layer_name: str): self.feature_extractor = FeatureExtractorBackbone(model, layer_name)