Example #1
0
def run(count, memory_size, device='cuda'):
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
    trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=10)

    testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
    testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=10)

    base_dir = os.path.join('cifar_' + str(memory_size), str(count))

    model = nn.DataParallel(CifarClassifier(count, memory_size))

    optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=0.001, momentum=0.9, weight_decay=5e-4)

    trial = Trial(model, optimizer, nn.NLLLoss(), [torchbearer.metrics.CategoricalAccuracy(), 'loss'], callbacks=[
        callbacks.MostRecent(os.path.join(base_dir, '{epoch:02d}.pt')),
        callbacks.GradientClipping(5),
        callbacks.MultiStepLR(milestones=[150, 250]),
        callbacks.TensorBoard(write_graph=False, comment=base_dir)
    ]).with_train_generator(trainloader).to(device)

    trial.run(350)

    trial.with_test_generator(testloader).evaluate(data_key=torchbearer.TEST_DATA)
Example #2
0
def run(count, memory_size, device='cuda'):
    traintransform = transforms.Compose([
        transforms.RandomRotation(20),
        transforms.ToTensor(),
        transforms.Normalize((0.1307, ), (0.3081, ))
    ])
    trainset = torchvision.datasets.MNIST(root='./data/mnist',
                                          train=True,
                                          download=True,
                                          transform=traintransform)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              pin_memory=True,
                                              batch_size=128,
                                              shuffle=True,
                                              num_workers=10)

    testtransform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize((0.1307, ), (0.3081, ))])
    testset = torchvision.datasets.MNIST(root='./data/mnist',
                                         train=False,
                                         download=True,
                                         transform=testtransform)
    testloader = torch.utils.data.DataLoader(testset,
                                             pin_memory=True,
                                             batch_size=128,
                                             shuffle=False,
                                             num_workers=10)

    base_dir = os.path.join('mnist_' + str(memory_size), str(count))

    model = MnistClassifier(count, memory_size)
    optimizer = optim.Adam(filter(lambda p: p.requires_grad,
                                  model.parameters()),
                           lr=0.001)

    trial = Trial(
        model,
        optimizer,
        nn.NLLLoss(), ['acc', 'loss'],
        callbacks=[
            callbacks.MostRecent(os.path.join(base_dir, '{epoch:02d}.pt')),
            callbacks.GradientClipping(5),
            callbacks.MultiStepLR(milestones=[50, 100, 150, 190, 195]),
            callbacks.ExponentialLR(0.99),
            callbacks.TensorBoard(write_graph=False, comment=base_dir)
        ]).with_train_generator(trainloader).to(device)

    trial.run(200)

    trial.with_test_generator(testloader).evaluate(
        data_key=torchbearer.TEST_DATA)
Example #3
0
    if mode == 'fmix':
        aug = [FMix(alpha=1, decay_power=3)]

    model = VAE(64, var=args.var)
    trial = Trial(model,
                  optim.Adam(model.parameters(), lr=5e-2),
                  nll,
                  metrics=[
                      metrics.MeanSquaredError(pred_key=SAMPLE),
                      metrics.mean(NLL),
                      metrics.mean(KL), 'loss'
                  ],
                  callbacks=[
                      sample,
                      kld(distributions.Normal(0, 1)),
                      init.XavierNormal(targets=['Conv']),
                      callbacks.MostRecent(args.dir + '/' + mode + '_' +
                                           str(args.i) + '.pt'),
                      callbacks.MultiStepLR([40, 80]),
                      callbacks.TensorBoard(write_graph=False,
                                            comment=mode + '_' + str(args.i),
                                            log_dir='vae_logs'), *aug
                  ])

    if mode in ['base', 'mix', 'cutmix']:
        trial = trial.load_state_dict(
            torch.load('vaes/' + '/' + mode + '_' + str(args.i) + '.pt'))

    trial.with_generators(train_loader,
                          test_loader).to('cuda').run(args.epochs, verbose=1)
Example #4
0
optimizer = torch.optim.SGD(model.parameters(),
                            0.1,
                            momentum=0.9,
                            weight_decay=1e-4)

# optimiser = optim.RMSprop(model.parameters(), alpha=0.9, lr=0.0001, weight_decay=1e-6)
loss_function = nn.CrossEntropyLoss()

# device = "cuda:0" if torch.cuda.is_available() else "cpu"
trial = Trial(model,
              optimizer,
              loss_function,
              metrics=['loss', 'acc', 'top_5_acc'],
              callbacks=[
                  callbacks.TensorBoard(write_graph=False,
                                        comment=f'resnet50_{n_bn}_{rep}'),
                  callbacks.MultiStepLR([30, 60]),
                  callbacks.MostRecent(dir + model_file + '_{epoch:02d}.pt')
              ]).to('cuda')
trial.with_generators(trainloader, test_generator=testloader)

pattern = re.compile(model_file + '_\d+.pt')
for filepath in os.listdir(dir):
    if pattern.match(filepath):
        trial.load_state_dict(torch.load(dir + filepath))

trial.run(epochs=90)
trial.evaluate(data_key=torchbearer.TEST_DATA)

torch.save(model.module.state_dict(), dir + model_file + '.pt')
Example #5
0
                                                shuffle=True,
                                                num_workers=8)

        model = MimeVGG(vgg, {k: cfgs['A'][k]() for k in [layer]})

        optimizer = optim.Adam(filter(lambda x: x.requires_grad,
                                      model.parameters()),
                               lr=5e-4)

        mi_false = mi(False)

        @callbacks.add_to_loss
        def mi_no_tanh(state):
            state[OTHER_MI] = mi_false(state)
            return 0

        trial = Trial(model,
                      optimizer,
                      mi(True),
                      metrics=['loss',
                               torchbearer.metrics.mean(OTHER_MI)],
                      callbacks=[
                          mi_no_tanh,
                          callbacks.TensorBoard(write_graph=False,
                                                comment='mi_' + args.model,
                                                log_dir='mi_data')
                      ])
        trial.with_generators(train_generator=trainloader,
                              val_generator=valloader).to('cuda')
        trial.run(20, verbose=1)