示例#1
0
def run(count, memory_size, device='cuda'):
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
    trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=10)

    testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
    testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=10)

    base_dir = os.path.join('cifar_' + str(memory_size), str(count))

    model = nn.DataParallel(CifarClassifier(count, memory_size))

    optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=0.001, momentum=0.9, weight_decay=5e-4)

    trial = Trial(model, optimizer, nn.NLLLoss(), [torchbearer.metrics.CategoricalAccuracy(), 'loss'], callbacks=[
        callbacks.MostRecent(os.path.join(base_dir, '{epoch:02d}.pt')),
        callbacks.GradientClipping(5),
        callbacks.MultiStepLR(milestones=[150, 250]),
        callbacks.TensorBoard(write_graph=False, comment=base_dir)
    ]).with_train_generator(trainloader).to(device)

    trial.run(350)

    trial.with_test_generator(testloader).evaluate(data_key=torchbearer.TEST_DATA)
示例#2
0
def run(count, memory_size, file, device='cuda'):
    transform_train = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.ColorJitter(0.25, 0.25, 0.25, 0.25),
        transforms.ToTensor()
    ])

    transform_test = transforms.Compose([transforms.ToTensor()])

    trainset = torchvision.datasets.CIFAR10(root='./data',
                                            train=True,
                                            download=True,
                                            transform=transform_train)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=128,
                                              shuffle=True,
                                              num_workers=10)

    testset = torchvision.datasets.CIFAR10(root='./data',
                                           train=False,
                                           download=True,
                                           transform=transform_test)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=100,
                                             shuffle=False,
                                             num_workers=10)

    base_dir = os.path.join('cifarss_' + str(memory_size), "16")

    model = CifarDraw(count, memory_size)
    model.load_state_dict(torch.load(file)[torchbearer.MODEL])
    model = SelfTaught(count, 512, memory_size, model.memory)
    for param in model.memory.parameters():
        param.requires_grad = False

    model.memory.decay.requires_grad = True
    model.memory.learn.requires_grad = True
    model.memory.learn2.requires_grad = True

    optimizer = optim.Adam(filter(lambda p: p.requires_grad,
                                  model.parameters()),
                           lr=1e-3)

    trial = Trial(model,
                  optimizer,
                  nn.NLLLoss(), ['acc', 'loss'],
                  pass_state=True,
                  callbacks=[
                      callbacks.MultiStepLR([25, 40, 45]),
                      callbacks.MostRecent(
                          os.path.join(base_dir, '{epoch:02d}.pt')),
                      callbacks.GradientClipping(5)
                  ]).with_generators(
                      train_generator=trainloader,
                      val_generator=testloader).for_val_steps(5).to(device)

    trial.run(50)
示例#3
0
def run(count, memory_size, device='cuda'):
    traintransform = transforms.Compose([
        transforms.RandomRotation(20),
        transforms.ToTensor(),
        transforms.Normalize((0.1307, ), (0.3081, ))
    ])
    trainset = torchvision.datasets.MNIST(root='./data/mnist',
                                          train=True,
                                          download=True,
                                          transform=traintransform)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              pin_memory=True,
                                              batch_size=128,
                                              shuffle=True,
                                              num_workers=10)

    testtransform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize((0.1307, ), (0.3081, ))])
    testset = torchvision.datasets.MNIST(root='./data/mnist',
                                         train=False,
                                         download=True,
                                         transform=testtransform)
    testloader = torch.utils.data.DataLoader(testset,
                                             pin_memory=True,
                                             batch_size=128,
                                             shuffle=False,
                                             num_workers=10)

    base_dir = os.path.join('mnist_' + str(memory_size), str(count))

    model = MnistClassifier(count, memory_size)
    optimizer = optim.Adam(filter(lambda p: p.requires_grad,
                                  model.parameters()),
                           lr=0.001)

    trial = Trial(
        model,
        optimizer,
        nn.NLLLoss(), ['acc', 'loss'],
        callbacks=[
            callbacks.MostRecent(os.path.join(base_dir, '{epoch:02d}.pt')),
            callbacks.GradientClipping(5),
            callbacks.MultiStepLR(milestones=[50, 100, 150, 190, 195]),
            callbacks.ExponentialLR(0.99),
            callbacks.TensorBoard(write_graph=False, comment=base_dir)
        ]).with_train_generator(trainloader).to(device)

    trial.run(200)

    trial.with_test_generator(testloader).evaluate(
        data_key=torchbearer.TEST_DATA)
示例#4
0
    if mode == 'fmix':
        aug = [FMix(alpha=1, decay_power=3)]

    model = VAE(64, var=args.var)
    trial = Trial(model,
                  optim.Adam(model.parameters(), lr=5e-2),
                  nll,
                  metrics=[
                      metrics.MeanSquaredError(pred_key=SAMPLE),
                      metrics.mean(NLL),
                      metrics.mean(KL), 'loss'
                  ],
                  callbacks=[
                      sample,
                      kld(distributions.Normal(0, 1)),
                      init.XavierNormal(targets=['Conv']),
                      callbacks.MostRecent(args.dir + '/' + mode + '_' +
                                           str(args.i) + '.pt'),
                      callbacks.MultiStepLR([40, 80]),
                      callbacks.TensorBoard(write_graph=False,
                                            comment=mode + '_' + str(args.i),
                                            log_dir='vae_logs'), *aug
                  ])

    if mode in ['base', 'mix', 'cutmix']:
        trial = trial.load_state_dict(
            torch.load('vaes/' + '/' + mode + '_' + str(args.i) + '.pt'))

    trial.with_generators(train_loader,
                          test_loader).to('cuda').run(args.epochs, verbose=1)
示例#5
0
def run(iteration, device='cuda:1'):
    transform_train = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.ColorJitter(0.25, 0.25, 0.25, 0.25),
        transforms.ToTensor()
    ])

    transform_test = transforms.Compose([transforms.ToTensor()])

    trainset = torchvision.datasets.CIFAR10(root='./data',
                                            train=True,
                                            download=True,
                                            transform=transform_train)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=128,
                                              shuffle=True,
                                              num_workers=10)

    testset = torchvision.datasets.CIFAR10(root='./data',
                                           train=False,
                                           download=True,
                                           transform=transform_test)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=100,
                                             shuffle=False,
                                             num_workers=10)

    base_dir = 'cifar_vae'

    model = CifarVAE()

    optimizer = optim.Adam(filter(lambda p: p.requires_grad,
                                  model.parameters()),
                           lr=5e-4)

    from datetime import datetime
    current_time = datetime.now().strftime('%b%d_%H-%M-%S')

    trial = Trial(
        model,
        optimizer,
        nn.MSELoss(reduction='sum'), ['acc', 'loss'],
        pass_state=True,
        callbacks=[
            tm.kl_divergence(MU, LOGVAR, beta=2),
            callbacks.MultiStepLR([50, 90]),
            callbacks.MostRecent(
                os.path.join(base_dir,
                             'iter_' + str(iteration) + '.{epoch:02d}.pt')),
            callbacks.GradientClipping(5),
            callbacks.TensorBoardImages(comment=current_time,
                                        name='Prediction',
                                        write_each_epoch=True,
                                        key=torchbearer.Y_PRED),
            callbacks.TensorBoardImages(comment=current_time + '_cifar_vae',
                                        name='Target',
                                        write_each_epoch=False,
                                        key=torchbearer.Y_TRUE),
        ]).with_generators(
            train_generator=trainloader,
            val_generator=testloader).for_val_steps(5).to(device)

    trial.run(100)
示例#6
0
optimizer = torch.optim.SGD(model.parameters(),
                            0.1,
                            momentum=0.9,
                            weight_decay=1e-4)

# optimiser = optim.RMSprop(model.parameters(), alpha=0.9, lr=0.0001, weight_decay=1e-6)
loss_function = nn.CrossEntropyLoss()

# device = "cuda:0" if torch.cuda.is_available() else "cpu"
trial = Trial(model,
              optimizer,
              loss_function,
              metrics=['loss', 'acc', 'top_5_acc'],
              callbacks=[
                  callbacks.TensorBoard(write_graph=False,
                                        comment=f'resnet50_{n_bn}_{rep}'),
                  callbacks.MultiStepLR([30, 60]),
                  callbacks.MostRecent(dir + model_file + '_{epoch:02d}.pt')
              ]).to('cuda')
trial.with_generators(trainloader, test_generator=testloader)

pattern = re.compile(model_file + '_\d+.pt')
for filepath in os.listdir(dir):
    if pattern.match(filepath):
        trial.load_state_dict(torch.load(dir + filepath))

trial.run(epochs=90)
trial.evaluate(data_key=torchbearer.TEST_DATA)

torch.save(model.module.state_dict(), dir + model_file + '.pt')