Example #1
0
    def _extract_features_and_mean(self, dataloader):
        features = []

        with torch.no_grad():
            for images, _ in dataloader:
                images = images.to(Config.DEVICE)
                features.append(
                    l2_normalize(self.features_extractor(images), dim=1))

            features = torch.cat(features)
            mean = features.mean(dim=0)

        return features, l2_normalize(mean)
 def forward(self, x):
     """This function predicts the style."""
     x = self.conv_layers(x)
     x = torch.mean(x, dim=[2,3])
     x = self.lin_layers(x)
     x = l2_normalize(x)
     return x
Example #3
0
    def _nearest_prototype(self, centers, features):
        pred_labels = []

        for feature in features:
            norm_feature = l2_normalize(feature)
            distances = torch.pow(centers - norm_feature, 2).sum(-1)
            pred_labels.append(distances.argmin().item())

        return torch.from_numpy(np.array(pred_labels))
Example #4
0
    def classify(self, batch_images):
        assert self.exemplars_means is not None
        assert self.exemplars_means.shape[0] == self.n_classes

        self.features_extractor.train(False)

        features = self.features_extractor(batch_images)
        features = l2_normalize(features, dim=1)

        if self.require_numpy:
            X = []
            for feature in features:
                X.append(feature.squeeze().cpu().detach().numpy())
            features = X

        preds = self.classifier_model.predict(features)
        return torch.tensor(preds)
Example #5
0
    def after_task(self, train_loader, targets):
        super().after_task(train_loader, targets)

        if self.require_train:
            if type(self.classifier_model) is CosineSimilarityClassifier:
                self.classifier_model.fit(self.exemplars_means)
            else:
                X, y = [], []
                for class_idx in range(len(self.exemplars)):
                    for exemplar in self.exemplars[class_idx]:
                        # The features extractor expect a 4-dimensional tensor (for batchs),
                        # therefore we add a extra dimension with unsqueeze(0).
                        exemplar = exemplar.to(Config.DEVICE).unsqueeze(0)
                        # Then the feature is in batch, it needs to remove on dimension with squeeze.
                        feature = self.features_extractor(exemplar).squeeze()
                        X.append(l2_normalize(feature).cpu().detach().numpy())
                    y.extend([class_idx] * len(self.exemplars[class_idx]))

                self.classifier_model.fit(X, y)
Example #6
0
 def _get_closest_feature(self, center, features):
     normalized_features = l2_normalize(features, dim=1)
     distances = torch.pow(center - normalized_features, 2).sum(-1)
     return distances.argmin().item()