示例#1
0
 def test_eval_full(self):
     loader = DataLoader(dataset_cls=MNIST)
     n_eval = 0
     for x, y in loader.eval():
         n_eval += x.shape[0]
     self.assertEqual(loader.eval_size, 60_000)
     self.assertEqual(n_eval, loader.eval_size)
示例#2
0
def kwta_inverse(embedding_dim=10000, sparsity=0.05, dataset_cls=MNIST):
    loader = DataLoader(dataset_cls, transform=TransformDefault.mnist(), batch_size=32)
    images, labels = loader.sample()
    batch_size, channels, height, width = images.shape
    images_binary = (images > 0).type(torch.float32)
    sparsity_input = images_binary.mean()
    print(f"Sparsity input raw image: {sparsity_input:.3f}")
    images_flatten = images.flatten(start_dim=2)
    weights = torch.randn(images_flatten.shape[2], embedding_dim)
    embeddings = images_flatten @ weights
    kwta_embeddings = KWinnersTakeAllFunction.apply(embeddings.clone(), sparsity)
    before_inverse = kwta_embeddings @ weights.transpose(0, 1)
    restored = KWinnersTakeAllFunction.apply(before_inverse.clone(), sparsity_input)

    kwta_embeddings = kwta_embeddings.view(batch_size, channels, *factors_root(embedding_dim))
    restored = restored.view_as(images)

    images = undo_normalization(images, loader.normalize_inverse)

    viz = VisdomMighty(env="kWTA inverse")
    resize = torchvision.transforms.Resize(size=128, interpolation=Image.NEAREST)
    transformed_images = []
    for orig, kwta, restored in zip(images, kwta_embeddings, restored):
        transformed_images.append(resize(orig))
        transformed_images.append(resize(kwta))
        transformed_images.append(resize(restored))
    transformed_images = torch.stack(transformed_images, dim=0)  # (3*B, 1, 128, 128)
    print(transformed_images.shape)
    viz.images(transformed_images, win='images', nrow=3, opts=dict(
        title=f"Original | kWTA(n={embedding_dim}, sparsity={sparsity}) | Restored",
    ))
示例#3
0
 def test_data_loader(self):
     batch_size = 17
     loader = DataLoader(dataset_cls=MNIST, batch_size=batch_size)
     x, y = loader.sample()
     self.assertEqual(x.shape, (batch_size, 1, 28, 28))
     self.assertEqual(y.shape, (batch_size, ))
     self.assertTrue(loader.has_labels)
示例#4
0
def kwta_translation_similarity(embedding_dim=10000, sparsity=0.05,
                                translate=(1, 1), dataset_cls=MNIST):
    loader = DataLoader(dataset_cls, transform=TransformDefault.mnist())
    images, labels = loader.sample()
    images = (images > 0).type(torch.float32)

    images_translated = []
    for im in images:
        im_pil = F.to_pil_image(im)
        im_translated = F.affine(im_pil, angle=0, translate=translate, scale=1, shear=0)
        im_translated = F.to_tensor(im_translated)
        images_translated.append(im_translated)
    images_translated = torch.stack(images_translated, dim=0)
    assert images_translated.unique(sorted=True).tolist() == [0, 1]

    w, h = images.shape[2:]
    weights = torch.randn(w * h, embedding_dim)

    def apply_kwta(images_input):
        """
        :param images_input: (B, C, W, H) images tensor
        :return: (B, C, k_active) kwta encoded SDR tensor
        """
        images_flatten = images_input.flatten(start_dim=2)
        embeddings = images_flatten @ weights
        kwta_embedding = KWinnersTakeAllFunction.apply(embeddings.clone(), sparsity)
        return kwta_embedding

    kwta_orig = apply_kwta(images)
    kwta_translated = apply_kwta(images_translated)

    print(f"input image ORIG vs TRANSLATED similarity {calc_overlap(images, images_translated):.3f}")
    print(f"random-kWTA ORIG vs TRANSLATED similarity: {calc_overlap(kwta_orig, kwta_translated):.3f}")
示例#5
0
 def test_eval_short(self):
     eval_size = 711
     loader = DataLoader(dataset_cls=MNIST, eval_size=eval_size)
     n_eval = 0
     for x, y in loader.eval():
         n_eval += x.shape[0]
     self.assertEqual(loader.eval_size, eval_size)
     self.assertGreaterEqual(n_eval, loader.eval_size)
示例#6
0
 def test_normalize_inverse(self):
     loader1 = DataLoader(dataset_cls=MNIST)
     self.assertIsNone(loader1.normalize_inverse)
     loader2 = DataLoader(dataset_cls=MNIST,
                          transform=TransformDefault.mnist())
     norm_inv = NormalizeInverse(mean=(0.1307, ), std=(0.3081, ))
     assert_array_almost_equal(loader2.normalize_inverse.mean,
                               norm_inv.mean)
     assert_array_almost_equal(loader2.normalize_inverse.std, norm_inv.std)
def train_matching_pursuit(dataset_cls=MNIST, n_epochs=5):
    # Typically, the 'out_features', the second parameter of MatchingPursuit
    # model, should be greater than the 'in_features'.
    # In case of MNIST, it works even with the smaller values, but the
    # resulting embedding vector is dense.

    model = MatchingPursuit(784, 256, solver=BasisPursuitADMM())
    # model = LISTA(784, 128)

    data_loader = DataLoader(dataset_cls,
                             transform=TransformDefault.mnist(
                                 normalize=None
                             ))
    criterion = LossPenalty(nn.MSELoss(), lambd=model.lambd)
    optimizer, scheduler = get_optimizer_scheduler(model)
    trainer = TrainMatchingPursuit(model,
                                   criterion=criterion,
                                   data_loader=data_loader,
                                   optimizer=optimizer,
                                   scheduler=scheduler,
                                   accuracy_measure=AccuracyEmbedding())
    trainer.restore()
    n_epochs = max(n_epochs - trainer.epoch, 0)
    trainer.train(n_epochs=n_epochs, mutual_info_layers=0)
    return trainer.model
示例#8
0
def train_kwta_autoenc(dataset_cls=MNIST):
    kwta = KWinnersTakeAllSoft(sparsity=0.05, hardness=2)
    model = AutoencoderLinearKWTA(784, 2048, kwta)
    data_loader = DataLoader(dataset_cls,
                             transform=transforms.ToTensor(),
                             eval_size=10000)
    criterion = nn.BCEWithLogitsLoss()
    optimizer, scheduler = get_optimizer_scheduler(model)
    kwta_scheduler = KWTAScheduler(model=model,
                                   step_size=1,
                                   gamma_sparsity=0.7,
                                   min_sparsity=0.05,
                                   gamma_hardness=1.25,
                                   max_hardness=20)
    trainer = TrainerAutoencoderBinary(
        model,
        criterion=criterion,
        data_loader=data_loader,
        optimizer=optimizer,
        scheduler=scheduler,
        kwta_scheduler=kwta_scheduler,
        accuracy_measure=AccuracyEmbeddingKWTA(cache=True))
    # trainer.restore()
    # trainer.monitor.advanced_monitoring(level=MonitorLevel.FULL)
    # trainer.watch_modules = (KWinnersTakeAll,)
    trainer.train(n_epochs=40)
示例#9
0
def train_caltech(n_epoch=500, dataset_cls=Caltech10):
    dataset_name = dataset_cls.__name__
    models.caltech.set_out_features(key='softmax',
                                    value=int(dataset_name.lstrip("Caltech")))
    kwta = KWinnersTakeAllSoft(sparsity=0.05)
    model = models.caltech.resnet18(kwta=kwta)
    data_loader = DataLoader(dataset_cls)
    if kwta:
        criterion = ContrastiveLossSampler(nn.CosineEmbeddingLoss(margin=0.5))
        optimizer, scheduler = get_optimizer_scheduler(model)
        kwta_scheduler = KWTAScheduler(model=model,
                                       step_size=10,
                                       gamma_sparsity=0.7,
                                       min_sparsity=0.05,
                                       gamma_hardness=2,
                                       max_hardness=20)
        trainer = TrainerEmbeddingKWTA(model=model,
                                       criterion=criterion,
                                       data_loader=data_loader,
                                       optimizer=optimizer,
                                       scheduler=scheduler,
                                       kwta_scheduler=kwta_scheduler)
    else:
        criterion = nn.CrossEntropyLoss()
        optimizer, scheduler = get_optimizer_scheduler(model)
        trainer = TrainerGrad(model=model,
                              criterion=criterion,
                              data_loader=data_loader,
                              optimizer=optimizer,
                              scheduler=scheduler)
    trainer.train(n_epochs=n_epoch)
示例#10
0
def train_autoenc(n_epoch=60, dataset_cls=MNIST):
    kwta = KWinnersTakeAllSoft(hardness=2, sparsity=0.05)
    # kwta = SynapticScaling(kwta, synaptic_scale=3.0)
    model = AutoencoderLinearKWTA(input_dim=784, encoding_dim=256, kwta=kwta)
    # model = MLP(784, 128, 64)
    if isinstance(model, AutoencoderLinearKWTATanh):
        # normalize in range [-1, 1]
        normalize = transforms.Normalize(mean=(0.5, ), std=(0.5, ))
        criterion = nn.MSELoss()
        reconstr_thr = torch.linspace(-0.5, 0.9, steps=10, dtype=torch.float32)
    else:
        normalize = None
        criterion = nn.BCEWithLogitsLoss()
        reconstr_thr = torch.linspace(0.1, 0.95, steps=10, dtype=torch.float32)
    optimizer, scheduler = get_optimizer_scheduler(model)
    data_loader = DataLoader(
        dataset_cls, transform=TransformDefault.mnist(normalize=normalize))
    kwta_scheduler = KWTAScheduler(model=model,
                                   step_size=10,
                                   gamma_sparsity=0.7,
                                   min_sparsity=0.05,
                                   gamma_hardness=2,
                                   max_hardness=10)
    trainer = TrainerAutoencoderBinary(model,
                                       criterion=criterion,
                                       data_loader=data_loader,
                                       optimizer=optimizer,
                                       scheduler=scheduler,
                                       kwta_scheduler=kwta_scheduler,
                                       reconstruct_threshold=reconstr_thr,
                                       accuracy_measure=AccuracyEmbeddingKWTA(
                                           cache=model.encoding_dim <= 2048))
    # trainer.restore()  # uncomment to restore the saved state
    # trainer.monitor.advanced_monitoring(level=MonitorLevel.FULL)
    trainer.train(n_epochs=n_epoch)
示例#11
0
    def test_TrainerGrad_MutualInfo(self):
        set_seed(2)
        mi_history = []

        class MutualInfoNPEETDebug(MutualInfoNPEET):
            def plot(self_mi, viz):
                mi_history.append(self_mi.information['mlp.2'])
                super().plot(viz)

        data_loader = DataLoader(MNIST,
                                 eval_size=100,
                                 transform=TransformDefault.mnist())
        trainer = TrainerGrad(self.model,
                              criterion=nn.CrossEntropyLoss(),
                              data_loader=data_loader,
                              optimizer=self.optimizer,
                              mutual_info=MutualInfoNPEETDebug(
                                  data_loader=data_loader, pca_size=None),
                              scheduler=self.scheduler)
        trainer.monitor.advanced_monitoring(level=MonitorLevel.FULL)
        trainer.train(n_epochs=1, mutual_info_layers=1)
        mi_history = np.vstack(mi_history)
        assert_array_less(0, mi_history)
        x_last, y_last = mi_history[-1]

        # the model is well trained
        self.assertGreater(y_last, 2.5)
        self.assertGreater(x_last, 2.2)
        self.assertLessEqual(x_last, y_last)
示例#12
0
def test(model, n_epoch=500, dataset_cls=MNIST):
    model.eval()
    for param in model.parameters():
        param.requires_grad_(False)
    criterion = nn.CrossEntropyLoss()
    data_loader = DataLoader(dataset_cls, transform=TransformDefault.mnist())
    trainer = Test(model=model, criterion=criterion, data_loader=data_loader)
    trainer.train(n_epochs=n_epoch)
示例#13
0
 def setUpClass(cls):
     imsize = 5
     transform = Compose([Resize(imsize), ToTensor()])
     cls.data_loader = DataLoader(dataset_cls=MNIST,
                                  transform=transform,
                                  batch_size=20,
                                  eval_size=1000)
     cls.model = MLP(imsize**2, 10)
     cls.viz = VisdomMighty(env='test', offline=True)
示例#14
0
def train_grad(n_epoch=30, dataset_cls=MNIST):
    model = MLP(784, 128, 10)
    optimizer, scheduler = get_optimizer_scheduler(model)
    criterion = nn.CrossEntropyLoss()
    data_loader = DataLoader(dataset_cls, transform=TransformDefault.mnist())
    trainer = TrainerGrad(model,
                          criterion=criterion,
                          data_loader=data_loader,
                          optimizer=optimizer,
                          scheduler=scheduler)
    trainer.restore()  # uncomment to restore the saved state
    # trainer.monitor.advanced_monitoring(level=MonitorLevel.SIGNAL_TO_NOISE)
    trainer.train(n_epochs=n_epoch)
示例#15
0
def train_grad(n_epoch=10, dataset_cls=MNIST):
    model = MLP(784, 128, 10)
    optimizer, scheduler = get_optimizer_scheduler(model)
    data_loader = DataLoader(dataset_cls, transform=TransformDefault.mnist())
    trainer = TrainerGrad(model,
                          criterion=nn.CrossEntropyLoss(),
                          data_loader=data_loader,
                          optimizer=optimizer,
                          mutual_info=MutualInfoNeuralEstimation(data_loader),
                          scheduler=scheduler)
    # trainer.restore()  # uncomment to restore the saved state
    trainer.monitor.advanced_monitoring(level=MonitorLevel.SIGNAL_TO_NOISE)
    trainer.train(n_epochs=n_epoch, mutual_info_layers=1)
def test_matching_pursuit_lambdas(dataset_cls=MNIST):
    model = MatchingPursuit(784, 2048)
    data_loader = DataLoader(dataset_cls,
                             transform=TransformDefault.mnist(
                                 normalize=None
                             ))
    bmp_lambdas = torch.linspace(0.05, 0.95, steps=10)
    trainer = TestMatchingPursuitParameters(model,
                                            criterion=nn.MSELoss(),
                                            data_loader=data_loader,
                                            bmp_params_range=bmp_lambdas,
                                            param_name='lambda')
    trainer.train(n_epochs=1, mutual_info_layers=0)
示例#17
0
    def setUp(self):
        set_seed(1)
        self.model = MLP(784, 64, 10)
        self.data_loader = DataLoader(MNIST,
                                      eval_size=10000,
                                      transform=TransformDefault.mnist())

        self.optimizer = torch.optim.Adam(filter(
            lambda param: param.requires_grad, self.model.parameters()),
                                          lr=1e-3,
                                          weight_decay=1e-5)
        self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
            self.optimizer)
示例#18
0
def surfplot(dataset_cls=MNIST):
    loader = DataLoader(dataset_cls, transform=TransformDefault.mnist(), eval_size=1000)
    logdim = torch.arange(8, 14)
    embedding_dimensions = torch.pow(2, logdim)
    sparsities = [0.001, 0.01, 0.05, 0.1, 0.3, 0.5, 0.75]
    overlap_running_mean = defaultdict(lambda: defaultdict(MeanOnline))
    for images, labels in loader.eval(description=f"kWTA inverse overlap surfplot "
                                                  f"({dataset_cls.__name__})"):
        if torch.cuda.is_available():
            images = images.cuda()
        images_binary = (images > 0).type(torch.float32).flatten(start_dim=2)
        sparsity_channel = images_binary.mean()
        for i, embedding_dim in enumerate(embedding_dimensions):
            for j, sparsity in enumerate(sparsities):
                weights = torch.randn(images_binary.shape[2], embedding_dim, device=images_binary.device)
                embeddings = images_binary @ weights
                kwta_embeddings = KWinnersTakeAllFunction.apply(embeddings, sparsity)
                before_inverse = kwta_embeddings @ weights.transpose(0, 1)
                restored = KWinnersTakeAllFunction.apply(before_inverse, sparsity_channel)
                overlap = calc_overlap(images_binary, restored)
                overlap_running_mean[i][j].update(overlap)
    overlap = torch.empty(len(embedding_dimensions), len(sparsities))
    for i in range(overlap.shape[0]):
        for j in range(overlap.shape[1]):
            overlap[i, j] = overlap_running_mean[i][j].get_mean()

    viz = VisdomMighty(env="kWTA inverse")
    opts = dict(
        title=f"kWTA inverse overlap: {dataset_cls.__name__}",
        ytickvals=list(range(len(embedding_dimensions))),
        yticklabels=[f'2^{power}' for power in logdim],
        ylabel='embedding_dim',
        xtickvals=list(range(len(sparsities))),
        xticklabels=list(map(str, sparsities)),
        xlabel='sparsity',
    )
    viz.contour(X=overlap, win=f'overlap contour: {dataset_cls.__name__}', opts=opts)
    viz.surf(X=overlap, win=f'overlap surf: {dataset_cls.__name__}', opts=opts)
def test_matching_pursuit(dataset_cls=MNIST):
    # vanilla Matching Pursuit: the weights are fixed, no training
    model = MatchingPursuit(784, 2048)
    data_loader = DataLoader(dataset_cls,
                             eval_size=10000,
                             transform=TransformDefault.mnist(
                                 normalize=None
                             ))
    trainer = TestMatchingPursuit(model,
                                  criterion=nn.MSELoss(),
                                  data_loader=data_loader,
                                  optimizer=OptimizerStub(),
                                  accuracy_measure=AccuracyEmbedding())
    trainer.train(n_epochs=1, mutual_info_layers=0)
示例#20
0
def train_autoencoder(n_epoch=60, dataset_cls=MNIST):
    model = AutoencoderLinear(784, 128)
    data_loader = DataLoader(dataset_cls,
                             transform=TransformDefault.mnist(normalize=None))
    criterion = nn.BCEWithLogitsLoss()
    optimizer, scheduler = get_optimizer_scheduler(model)
    trainer = TrainerAutoencoder(
        model,
        criterion=criterion,
        data_loader=data_loader,
        optimizer=optimizer,
        scheduler=scheduler,
        accuracy_measure=AccuracyEmbedding(cache=True))
    # trainer.restore()  # uncomment to restore the saved state
    trainer.monitor.advanced_monitoring(level=MonitorLevel.SIGNAL_TO_NOISE)
    trainer.train(n_epochs=n_epoch, mutual_info_layers=0)
def train_lista(dataset_cls=MNIST):
    model_ref = train_matching_pursuit(dataset_cls=dataset_cls)
    model = LISTA(784, out_features=model_ref.out_features)
    data_loader = DataLoader(dataset_cls,
                             transform=TransformDefault.mnist(
                                 normalize=None
                             ))
    criterion = nn.MSELoss()
    optimizer, scheduler = get_optimizer_scheduler(model)
    trainer = TrainLISTA(model,
                         model_reference=model_ref,
                         criterion=criterion,
                         data_loader=data_loader,
                         optimizer=optimizer,
                         scheduler=scheduler,
                         accuracy_measure=AccuracyEmbedding())
    # trainer.monitor.advanced_monitoring(level=MonitorLevel.FULL)
    trainer.train(n_epochs=10, mutual_info_layers=0)
示例#22
0
def dataset_entropy(dataset_cls=MNIST):
    # log2(10) = 3.322
    # log2(100) = 6.644
    #
    # MNIST:         3.320 bits
    # FashionMNIST:  3.322 bits
    # CIFAR10:       3.322 bits
    # CIFAR100:      6.644 bits
    loader = DataLoader(dataset_cls).get(train=True)
    labels_full = []
    for images, labels in tqdm(
            loader, desc=f"Computing {dataset_cls.__name__} labels entropy"):
        labels_full.append(labels)
    labels_full = torch.cat(labels_full)
    labels_unique, labels_count = np.unique(labels_full, return_counts=True)
    label_appearance_proba = labels_count / len(labels_full)
    entropy = np.sum(label_appearance_proba *
                     np.log2(1 / label_appearance_proba))
    print(f"{dataset_cls.__name__} labels entropy: {entropy:.3f} bits")
示例#23
0
def dataset_mean(data_loader: DataLoader, verbose=True):
    # L1 sparsity: ||x||_1 / size(x)
    #
    # MNIST:         0.131
    # FashionMNIST:  0.286
    # CIFAR10:       0.473
    # CIFAR100:      0.478
    loader = data_loader.eval()
    sparsity_online = MeanOnline()
    for batch in tqdm(
            loader,
            desc=f"Computing {data_loader.dataset_cls.__name__} mean",
            disable=not verbose,
            leave=False):
        input = input_from_batch(batch)
        input = input.flatten(start_dim=1)
        sparsity = compute_sparsity(input)
        sparsity_online.update(sparsity)
    return sparsity_online.get_mean()
示例#24
0
 def __init__(self,
              model: nn.Module,
              criterion: nn.Module,
              data_loader: DataLoader,
              accuracy_measure: Accuracy = None,
              mutual_info=None,
              env_suffix='',
              checkpoint_dir=CHECKPOINTS_DIR):
     if torch.cuda.is_available():
         model.cuda()
     self.model = model
     self.criterion = criterion
     self.data_loader = data_loader
     self.train_loader = data_loader.get(train=True)
     if mutual_info is None:
         mutual_info = MutualInfoNeuralEstimation(data_loader)
     self.mutual_info = mutual_info
     self.checkpoint_dir = Path(checkpoint_dir)
     self.timer = timer
     self.timer.init(batches_in_epoch=len(self.train_loader))
     self.timer.set_epoch(0)
     criterion_name = self.criterion.__class__.__name__
     if isinstance(criterion, LossPenalty):
         criterion_name = f"{criterion_name}(" \
                          f"{criterion.criterion.__class__.__name__})"
     self.env_name = f"{time.strftime('%Y.%m.%d')} " \
                     f"{model.__class__.__name__}: " \
                     f"{data_loader.dataset_cls.__name__} " \
                     f"{self.__class__.__name__} " \
                     f"{criterion_name}"
     env_suffix = env_suffix.lstrip(' ')
     if env_suffix:
         self.env_name = f'{self.env_name} {env_suffix}'
     if accuracy_measure is None:
         if isinstance(self.criterion, PairLossSampler):
             accuracy_measure = AccuracyEmbedding()
         else:
             # cross entropy loss
             accuracy_measure = AccuracyArgmax()
     self.accuracy_measure = accuracy_measure
     self.monitor = self._init_monitor(mutual_info)
     self.online = self._init_online_measures()
     self.best_score = 0.
示例#25
0
def train_kwta(n_epoch=500, dataset_cls=MNIST):
    kwta = KWinnersTakeAllSoft(sparsity=0.05, hard=False, hardness=2)
    # kwta = SynapticScaling(kwta, synaptic_scale=3)
    model = MLP_kWTA(784, 64, 256, kwta=kwta)
    optimizer, scheduler = get_optimizer_scheduler(model)
    criterion = TripletLossSampler(TripletCosineLoss(margin=0.5))
    data_loader = DataLoader(dataset_cls, transform=TransformDefault.mnist())
    kwta_scheduler = KWTAScheduler(model=model,
                                   step_size=10,
                                   gamma_sparsity=0.7,
                                   min_sparsity=0.05,
                                   gamma_hardness=2,
                                   max_hardness=20)
    trainer = TrainerEmbeddingKWTA(
        model=model,
        criterion=criterion,
        data_loader=data_loader,
        optimizer=optimizer,
        scheduler=scheduler,
        kwta_scheduler=kwta_scheduler,
        accuracy_measure=AccuracyEmbeddingKWTA(cache=True),
        env_suffix='')
    # trainer.restore()
    trainer.train(n_epochs=n_epoch, mutual_info_layers=1)
示例#26
0
def train_pretrained(n_epoch=500, dataset_cls=CIFAR10):
    model = models.cifar.CIFAR10(pretrained=True)
    for param in model.parameters():
        param.requires_grad_(False)
    kwta = KWinnersTakeAllSoft(sparsity=0.3)
    model.classifier = nn.Sequential(nn.Linear(1024, 128, bias=False), kwta)
    optimizer, scheduler = get_optimizer_scheduler(model)
    criterion = ContrastiveLossSampler(nn.CosineEmbeddingLoss(margin=0.5))
    data_loader = DataLoader(dataset_cls, transform=TransformDefault.cifar10())
    kwta_scheduler = KWTAScheduler(model=model,
                                   step_size=15,
                                   gamma_sparsity=0.7,
                                   min_sparsity=0.05,
                                   gamma_hardness=2,
                                   max_hardness=10)
    trainer = TrainerEmbedding(model=model,
                               criterion=criterion,
                               data_loader=data_loader,
                               optimizer=optimizer,
                               scheduler=scheduler,
                               kwta_scheduler=kwta_scheduler,
                               accuracy_measure=AccuracyEmbeddingKWTA(),
                               env_suffix='')
    trainer.train(n_epochs=n_epoch, mutual_info_layers=1)
示例#27
0
from mighty.utils.common import input_from_batch
from mighty.utils.data import DataLoader
from mighty.utils.signal import compute_sparsity
from mighty.utils.var_online import MeanOnline


def dataset_mean(data_loader: DataLoader, verbose=True):
    # L1 sparsity: ||x||_1 / size(x)
    #
    # MNIST:         0.131
    # FashionMNIST:  0.286
    # CIFAR10:       0.473
    # CIFAR100:      0.478
    loader = data_loader.eval()
    sparsity_online = MeanOnline()
    for batch in tqdm(
            loader,
            desc=f"Computing {data_loader.dataset_cls.__name__} mean",
            disable=not verbose,
            leave=False):
        input = input_from_batch(batch)
        input = input.flatten(start_dim=1)
        sparsity = compute_sparsity(input)
        sparsity_online.update(sparsity)
    return sparsity_online.get_mean()


if __name__ == '__main__':
    sparsity = dataset_mean(DataLoader(MNIST))
    print(f"Input L1 sparsity: {sparsity:.3f}")
示例#28
0
 def test_cifar10(self):
     set_seed(0)
     transform = TransformDefault.cifar10()
     loader = DataLoader(dataset_cls=CIFAR10, transform=transform)
     self._test_dataset(loader)
示例#29
0
 def test_mnist(self):
     set_seed(0)
     transform = TransformDefault.mnist()
     loader = DataLoader(dataset_cls=MNIST, transform=transform)
     self._test_dataset(loader)
示例#30
0
 def test_get_test(self):
     loader = DataLoader(dataset_cls=MNIST)
     n_samples = 0
     for x, y in loader.get(train=False):
         n_samples += x.shape[0]
     self.assertGreaterEqual(n_samples, 10_000)