def plot_tsne(self):
     if self.folder_result:
         loader = self.inc_dataset.get_custom_loader(
             [], memory=self.get_memory())[1]
         embeddings, targets = utils.extract_features(self._network, loader)
         utils.plot_tsne(
             os.path.join(self.folder_result, "tsne_{}".format(self._task)),
             embeddings, targets)
    def compute_accuracy(model, loader, class_means):
        features, targets_ = utils.extract_features(model, loader)

        features = (features.T /
                    (np.linalg.norm(features.T, axis=0) + EPSILON)).T

        # Compute score for iCaRL
        sqd = cdist(class_means, features, 'sqeuclidean')
        score_icarl = (-sqd).T

        return score_icarl, targets_
def minimize_confusion(inc_dataset, network, memory, class_index, nb_examplars):
    _, new_loader = inc_dataset.get_custom_loader(class_index, mode="test")
    new_features, _ = utils.extract_features(network, new_loader)
    new_mean = np.mean(new_features, axis=0)

    from sklearn.cluster import KMeans

    n_clusters = 4
    model = KMeans(n_clusters=n_clusters)
    model.fit(new_features)

    indexes = []
    for i in range(n_clusters):
        cluster = model.cluster_centers_[i]
        distances = _l2_distance(cluster, new_features)

        indexes.append(distances.argsort()[:nb_examplars // n_clusters])

    return np.concatenate(indexes)

    if memory is None:
        # First task
        #return icarl_selection(new_features, nb_examplars)
        return np.random.permutation(new_features.shape[0])[:nb_examplars]

    distances = _l2_distance(new_mean, new_features)

    data_memory, targets_memory = memory
    for indexes in _split_memory_per_class(targets_memory):
        _, old_loader = inc_dataset.get_custom_loader(
            [], memory=(data_memory[indexes], targets_memory[indexes]), mode="test"
        )

        old_features, _ = utils.extract_features(network, old_loader)
        old_mean = np.mean(old_features, axis=0)

        # The larger the distance to old mean
        distances -= _l2_distance(old_mean, new_features)

    return distances.argsort()[:int(nb_examplars)]
    def add_imprinted_classes(self,
                              class_indexes,
                              inc_dataset,
                              network,
                              multi_class_diff="normal",
                              type=None):
        if self.proxy_per_class > 1:
            logger.info("Multi class diff {}.".format(multi_class_diff))

        weights_norm = self.weights.data.norm(dim=1, keepdim=True)
        avg_weights_norm = torch.mean(weights_norm, dim=0).cpu()

        new_weights = []
        for class_index in class_indexes:
            _, loader = inc_dataset.get_custom_loader([class_index])
            features, _ = utils.extract_features(
                network, loader, use_sim_clr=self.args['use_sim_clr'])

            features_normalized = F.normalize(torch.from_numpy(features),
                                              p=2,
                                              dim=1)
            class_embeddings = torch.mean(features_normalized, dim=0)
            class_embeddings = F.normalize(class_embeddings, dim=0, p=2)

            if self.proxy_per_class == 1:
                new_weights.append(class_embeddings * avg_weights_norm)
            else:
                if multi_class_diff == "normal":
                    std = torch.std(features_normalized, dim=0)
                    for _ in range(self.proxy_per_class):
                        new_weights.append(
                            torch.normal(class_embeddings, std) *
                            avg_weights_norm)
                elif multi_class_diff == "kmeans":
                    clusterizer = KMeans(n_clusters=self.proxy_per_class)
                    clusterizer.fit(features_normalized.numpy())

                    for center in clusterizer.cluster_centers_:
                        new_weights.append(
                            torch.tensor(center) * avg_weights_norm)
                else:
                    raise ValueError(
                        "Unknown multi class differentiation for imprinted weights: {}."
                        .format(multi_class_diff))

        new_weights = torch.stack(new_weights)
        self._weights.append(nn.Parameter(new_weights))

        self.to(self.device)
        self.n_classes += len(class_indexes)

        return self
    def build_examplars(self,
                        inc_dataset,
                        herding_indexes,
                        memory_per_class=None,
                        data_source="train"):
        logger.info("Building & updating memory.")
        memory_per_class = memory_per_class or self._memory_per_class
        herding_indexes = copy.deepcopy(herding_indexes)

        data_memory, targets_memory = [], []
        class_means = np.zeros((self._n_classes, self._network.features_dim))

        for class_idx in range(self._n_classes):
            # We extract the features, both normal and flipped:
            inputs, loader = inc_dataset.get_custom_loader(
                class_idx, mode="test", data_source=data_source)
            features, targets = utils.extract_features(self._network, loader)
            features_flipped, _ = utils.extract_features(
                self._network,
                inc_dataset.get_custom_loader(class_idx,
                                              mode="flip",
                                              data_source=data_source)[1])

            if class_idx >= self._n_classes - self._task_size:
                # New class, selecting the examplars:
                if self._herding_selection["type"] == "icarl":
                    selected_indexes = herding.icarl_selection(
                        features, memory_per_class)
                elif self._herding_selection["type"] == "closest":
                    selected_indexes = herding.closest_to_mean(
                        features, memory_per_class)
                elif self._herding_selection["type"] == "random":
                    selected_indexes = herding.random(features,
                                                      memory_per_class)
                elif self._herding_selection["type"] == "kmeans":
                    selected_indexes = herding.kmeans(
                        features,
                        memory_per_class,
                        k=self._herding_selection["k"])
                elif self._herding_selection["type"] == "confusion":
                    selected_indexes = herding.confusion(
                        *self._last_results,
                        memory_per_class,
                        class_id=class_idx,
                        minimize_confusion=self.
                        _herding_selection["minimize_confusion"])
                elif self._herding_selection["type"] == "var_ratio":
                    selected_indexes = herding.var_ratio(
                        memory_per_class, self._network, loader,
                        **self._herding_selection)
                elif self._herding_selection["type"] == "mcbn":
                    selected_indexes = herding.mcbn(memory_per_class,
                                                    self._network, loader,
                                                    **self._herding_selection)
                else:
                    raise ValueError("Unknown herding selection {}.".format(
                        self._herding_selection))

                herding_indexes.append(selected_indexes)

            # Reducing examplars:
            try:
                selected_indexes = herding_indexes[
                    class_idx][:memory_per_class]
                herding_indexes[class_idx] = selected_indexes
            except:
                import pdb
                pdb.set_trace()

            # Re-computing the examplar mean (which may have changed due to the training):
            examplar_mean = self.compute_examplar_mean(features,
                                                       features_flipped,
                                                       selected_indexes,
                                                       memory_per_class)

            data_memory.append(inputs[selected_indexes])
            targets_memory.append(targets[selected_indexes])

            class_means[class_idx, :] = examplar_mean

        data_memory = np.concatenate(data_memory)
        targets_memory = np.concatenate(targets_memory)

        return data_memory, targets_memory, herding_indexes, class_means