Пример #1
0
    def herding_construct_exemplar_set(self, indexes, images, label, m):
        exemplar_set = ExemplarSet(images, [label] * len(images),
                                   utils.get_train_eval_transforms()[1])
        loader = utils.get_eval_loader(exemplar_set, self.batch_size)

        self.net.eval()
        flatten_features = []
        not_normalized_features = []
        with torch.no_grad():
            for images, _ in loader:
                images = images.to(self.device)
                features = self._extract_features(images, normalize=False)
                not_normalized_features.append(features.detach())
                features = features / features.norm(dim=1).unsqueeze(1)
                flatten_features.append(features)

            # storing features to calculate mean and std
            not_normalized_features = torch.cat(
                not_normalized_features).cpu().numpy()
            #self.generator.add_data(not_normalized_features, [label]*not_normalized_features.shape[0])

            # computing mean per class
            flatten_features = torch.cat(flatten_features).cpu().numpy()
            class_mean = np.mean(flatten_features, axis=0)
            class_mean = class_mean / np.linalg.norm(class_mean)
            # class_mean = torch.from_numpy(class_mean).to(self.device)
            flatten_features = torch.from_numpy(flatten_features).to(
                self.device)

        exemplars = set(
        )  # lista di exemplars selezionati per la classe corrente
        exemplar_feature = [
        ]  # lista di features per ogni exemplars già selezionato
        feature_to_generalize = []
        for k in range(m):
            S = 0 if k == 0 else torch.stack(exemplar_feature).sum(0)
            phi = flatten_features
            mu = class_mean
            mu_p = ((phi + S) / (k + 1)).cpu().numpy()
            mu_p = mu_p / np.linalg.norm(mu_p)
            distances = np.sqrt(np.sum((mu - mu_p)**2, axis=1))
            # Evito che si creino duplicati
            sorted_indexes = np.argsort(distances)
            for i in sorted_indexes:
                if indexes[i] not in exemplars:
                    exemplars.add(indexes[i])
                    exemplar_feature.append(flatten_features[i])
                    feature_to_generalize.append(not_normalized_features[i])
                    break

        assert len(exemplars) == m
        self.exemplar_sets.append(list(exemplars))
        self.generator.add_data(feature_to_generalize,
                                [label] * len(feature_to_generalize))
Пример #2
0
    def _nme(self, images):
        if self.compute_means:
            exemplar_means = []
            for exemplar_class_idx in self.exemplar_sets:
                imgs, labs = self.dataset.get_items_of(exemplar_class_idx)
                exemplars = ExemplarSet(imgs, labs,
                                        utils.get_train_eval_transforms()[1])
                loader = utils.get_eval_loader(exemplars, self.batch_size)

                flatten_features = []
                with torch.no_grad():
                    for imgs, _ in loader:
                        imgs = imgs.to(self.device)
                        features = self._extract_features(imgs)
                        flatten_features.append(features)

                    flatten_features = torch.cat(
                        flatten_features).cpu().numpy()
                    class_mean = np.mean(flatten_features, axis=0)
                    class_mean = class_mean / np.linalg.norm(class_mean)
                    class_mean = torch.from_numpy(class_mean).to(self.device)
                    exemplar_means.append(class_mean)

            self.compute_means = False
            self.exemplar_means = exemplar_means

        exemplar_means = self.exemplar_means
        means = torch.stack(exemplar_means)  # (n_classes, feature_size)
        means = torch.stack(
            [means] * len(images))  # (batch_size, n_classes, feature_size)
        means = means.transpose(1, 2)  # (batch_size, feature_size, n_classes)

        with torch.no_grad():
            feature = self._extract_features(images)
            feature = feature.unsqueeze(2)  # (batch_size, feature_size, 1)
            feature = feature.expand_as(
                means)  # (batch_size, feature_size, n_classes)

            dists = (feature -
                     means).pow(2).sum(1).squeeze()  # (batch_size, n_classes)
            _, preds = dists.min(1)

        return preds
    def _compute_means(self):
        exemplar_means = []
        for exemplar_class_idx in self.exemplar_sets:
            imgs, labs = self.dataset.get_items_of(exemplar_class_idx)
            exemplars = ExemplarSet(imgs, labs, utils.get_train_eval_transforms()[1])
            loader = utils.get_eval_loader(exemplars, self.batch_size)

            flatten_features = []
            with torch.no_grad():
                for imgs, _ in loader:
                    imgs = imgs.to(self.device)
                    features = self._extract_features(imgs)
                    flatten_features.append(features)

                flatten_features = torch.cat(flatten_features).to(self.device)
                class_mean = flatten_features.mean(0)
                class_mean = class_mean / class_mean.norm()
                exemplar_means.append(class_mean)

        self.compute_means = False
        self.exemplar_means = exemplar_means
    def construct_exemplar_set(self,
                               single_class_dataset,
                               label,
                               m,
                               device,
                               herding=True):
        if len(single_class_dataset) < m:
            raise ValueError("Number of images can't be less than m")

        map_subset_to_cifar = np.array(single_class_dataset.indices)
        loader = utils.get_eval_loader(single_class_dataset, batch_size=256)
        features = []
        if herding:
            self.net.eval()
            with torch.no_grad():
                for images, _ in loader:
                    images = images.to(device)
                    feat = self._extract_feature(images)
                    features.append(feat)

                flatten_features = torch.cat(features).to(device)
                class_mean = flatten_features.mean(0)
                class_mean = (class_mean / class_mean.norm()).to(device)

            exemplars_indexes = set()
            for k in range(m):
                exemplars = self.exemplar_sets[label]['features']

                if len(exemplars) > 0:
                    exemplars = torch.stack(exemplars).to(device)

                sum_exemplars = 0 if k == 0 else exemplars.sum(0)
                mean_exemplars = (flatten_features.to(device) +
                                  sum_exemplars) / (k + 1)
                mean_exemplars = torch.stack([
                    torch.dist(class_mean.to(device), e_sum)
                    for e_sum in mean_exemplars
                ])
                idxs = torch.argsort(mean_exemplars)
                min_index = 0
                for i in idxs:
                    i = int(i)
                    if i not in exemplars_indexes:
                        exemplars_indexes.add(i)
                        min_index = i
                        break

                self.exemplar_sets[label]['indexes'].append(
                    map_subset_to_cifar[min_index])
                self.exemplar_sets[label]['features'].append(
                    flatten_features[min_index].cpu())

        else:
            self.net.eval()
            indexes = []
            with torch.no_grad():
                for i, (images, _) in enumerate(loader):
                    choices = np.arange(len(images))
                    samples = np.random.choice(choices, m, replace=False)
                    images = images[samples].to(device)
                    feat = self._extract_feature(images)
                    features.append(feat)
                    curr_idx = (i * 256) + samples
                    curr_idx = [map_subset_to_cifar[i] for i in curr_idx]
                    indexes.extend(curr_idx)

                flatten_features = torch.cat(features)
                self.exemplar_sets[label]['indexes'] = indexes
                self.exemplar_sets[label]['features'] = list(flatten_features)