예제 #1
0
def main():
    global best_test_bpd

    last_checkpoints = []
    lipschitz_constants = []
    ords = []

    # if args.resume:
    #     validate(args.begin_epoch - 1, model, ema)

    #liveloss = PlotLosses()

    #liveloss = PlotLosses()
    liveloss = PlotLosses()

    for epoch in range(args.begin_epoch, args.nepochs):
        logs = {}

        logger.info('Current LR {}'.format(optimizer.param_groups[0]['lr']))

        running_loss = train(epoch, model)

        #train(epoch, model)
        lipschitz_constants.append(get_lipschitz_constants(model))

        #ords.append(get_ords(model))

        #ords.append(get_ords(model))
        ords.append(get_ords(model))

        logger.info('Lipsh: {}'.format(pretty_repr(lipschitz_constants[-1])))
        logger.info('Order: {}'.format(pretty_repr(ords[-1])))

        #epoch_loss = running_loss / len(dataloaders[phase].dataset)
        epoch_loss = running_loss / len(
            datasets.CIFAR10(
                args.dataroot, train=True, transform=transform_train))

        logs['log loss'] = epoch_loss.item()

        liveloss.update(logs)
        liveloss.draw()

        if args.ema_val:
            test_bpd = validate(epoch, model, ema)
        else:
            test_bpd = validate(epoch, model)

        if args.scheduler and scheduler is not None:
            scheduler.step()

        if test_bpd < best_test_bpd:
            best_test_bpd = test_bpd

            utils.save_checkpoint(
                {
                    'state_dict': model.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                    'args': args,
                    'ema': ema,
                    'test_bpd': test_bpd,
                },
                os.path.join(args.save, 'moMoModels'),
                epoch,
                last_checkpoints,
                num_checkpoints=5)
            """
            utils.save_checkpoint({
                'state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'args': args,
                'ema': ema,
                'test_bpd': test_bpd,
            }, os.path.join(args.save, 'mMoModels'), epoch, last_checkpoints, num_checkpoints=5)
            
            utils.save_checkpoint({
                'state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'args': args,
                'ema': ema,
                'test_bpd': test_bpd,
            }, os.path.join(args.save, 'mModels'), epoch, last_checkpoints, num_checkpoints=5)
            
            utils.save_checkpoint({
                'state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'args': args,
                'ema': ema,
                'test_bpd': test_bpd,
            }, os.path.join(args.save, 'models'), epoch, last_checkpoints, num_checkpoints=5)
            """

        torch.save(
            {
                'state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'args': args,
                'ema': ema,
                'test_bpd': test_bpd,
            }, os.path.join(args.save, 'models',
                            '010mmoosttMoosttRecentt.pth'))
        """
예제 #2
0
        init_layer = layers.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
    else:
        transform_train = transforms.Compose([
            transforms.Resize(args.imagesize),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            add_noise,
        ])
        transform_test = transforms.Compose([
            transforms.Resize(args.imagesize),
            transforms.ToTensor(),
            add_noise,
        ])
        init_layer = layers.LogitTransform(0.05)
    train_loader = torch.utils.data.DataLoader(
        datasets.CIFAR10(args.dataroot, train=True, transform=transform_train),
        batch_size=args.batchsize,
        shuffle=True,
        num_workers=args.nworkers,
    )
    test_loader = torch.utils.data.DataLoader(
        datasets.CIFAR10(args.dataroot, train=False, transform=transform_test),
        batch_size=args.val_batchsize,
        shuffle=False,
        num_workers=args.nworkers,
    )
elif args.data == 'mnist':
    im_dim = 1
    init_layer = layers.LogitTransform(1e-6)
    n_classes = 10
    train_loader = torch.utils.data.DataLoader(
예제 #3
0
        transform_test = transforms.Compose([
            transforms.Resize(args.imagesize),
            transforms.ToTensor(),
            add_noise,
        ])
        init_layer = layers.LogitTransform(0.05)

    #train_loader = torch.utils.data.DataLoader(
    #    datasets.CIFAR10(args.dataroot, train=True, transform=transform_train),
    #    batch_size=args.batchsize,
    #    shuffle=True,
    #    num_workers=args.nworkers,
    #)

    train_trainLoader = datasets.CIFAR10(args.dataroot,
                                         train=True,
                                         transform=transform_train)
    train_trainLoader2 = datasets.CIFAR10(args.dataroot,
                                          train=True,
                                          transform=transform_train)

    idx = torch.tensor(train_trainLoader.cifar10.train_labels) == 0
    train_trainLoader = torch.utils.data.dataset.Subset(
        train_trainLoader,
        np.where(idx == 1)[0])
    # train_trainLoader.mnist.train_data = train_trainLoader.mnist.train_data[idx]

    evens = list(range(0, len(train_trainLoader), 2))
    train_trainLoader = torch.utils.data.Subset(train_trainLoader, evens)

    evens = list(range(0, len(train_trainLoader), 2))