Exemple #1
0
def main(args):
    args = {
        "model": "vqvae",
        "batch_size": 1,
        # "hidden": 64, # 128,
        "hidden": 128,  # 128,
        # "k": 128, #  512,
        "k": 256,  #  512,
        "lr": 5e-7,  #5e-6,#2e-4,
        "n_recordings": n_recordings,
        "vq_coef": 2,  # ?
        "commit_coef": 2,  # ?
        "kl_coef": 1,  # ?
        # "noise_coef" : 1e3,
        "dataset": "imagenet",
        "epochs": 250 * 4,
        "cuda": torch.cuda.is_available(),
        "seed": 1,
        "gpus": "1",
        "log_interval": 50,
        "results_dir": "VAE_imagenet",
        "save_name": "first",
        "data_format": "json",
        "num_workers": 4,
        "num_folds": 5,
    }

    lr = args["lr"]  # or default_hyperparams[args["dataset"]]['lr']
    k = args["k"]  # or default_hyperparams[args["dataset"]]['k']
    hidden = args["hidden"] or default_hyperparams[args["dataset"]]['hidden']
    num_channels = dataset_sizes[args["dataset"]][0]

    results, save_path = setup_logging_and_results(args)

    torch.manual_seed(args["seed"])
    if args["cuda"]:
        torch.cuda.manual_seed_all(args["seed"])
        args["gpus"] = [int(i) for i in args["gpus"].split(',')]
        # torch.cuda.set_device(args["gpus"][0])
        cudnn.benchmark = True
        torch.cuda.manual_seed(args["seed"])

    model = models[args["dataset"]][args["model"]](hidden,
                                                   k=k,
                                                   num_channels=num_channels)
    #model = MyDataParallel(model)
    print("Number of Parameters in Model:",
          sum(p.numel() for p in model.parameters() if p.requires_grad))
    if args["cuda"]:
        model.cuda()

    # noise_adder = AddNoiseManual(b=.5)

    optimizer = optim.Adam(model.parameters(), lr=lr)
    scheduler = optim.lr_scheduler.StepLR(
        optimizer,
        10 if args["dataset"] == 'imagenet' else 30,
        0.5,
    )

    kwargs = {'num_workers': 1, 'pin_memory': True} if args["cuda"] else {}

    train_dataset = SeizureDataset(TRAIN_SEIZURE_FILE,
                                   num_folds=args["num_folds"],
                                   cross_val=False,
                                   split='train')
    print("len train dataset", len(train_dataset))
    #train_dataset = EstimatedDataset(800, transform=dataset_transforms[args["dataset"]])

    train_loader = data.DataLoader(dataset=train_dataset,
                                   shuffle=True,
                                   batch_size=args["batch_size"],
                                   num_workers=args["num_workers"])
    print("train_loader", len(train_loader))

    eval_dataset = SeizureDataset(DEV_SEIZURE_FILE,
                                  num_folds=args["num_folds"],
                                  cross_val=False,
                                  split='dev')  # TODO: enter right split
    # eval_dataset = EstimatedDataset(20, transform=dataset_transforms[args["dataset"]])
    eval_loader = data.DataLoader(dataset=eval_dataset,
                                  shuffle=False,
                                  batch_size=args["batch_size"],
                                  num_workers=args["num_workers"])

    print("eval_loader", len(eval_loader))

    print("Save path", save_path)
    for epoch in range(1, args["epochs"] + 1):
        train_losses = train(epoch, model, train_loader, optimizer,
                             args["cuda"], args["log_interval"], save_path,
                             args)
        test_losses = test_net(epoch, model, eval_loader, args["cuda"],
                               save_path, args)
        writer.flush()
        # writer.close()
        torch.save(model.state_dict(), "saved_models/" + save_filename + ".pt")
Exemple #2
0
def main(args):
    parser = argparse.ArgumentParser(description='Variational AutoEncoders')

    model_parser = parser.add_argument_group('Model Parameters')
    model_parser.add_argument('--model', default='vae', choices=['vae', 'vqvae'],
                              help='autoencoder variant to use: vae | vqvae')
    model_parser.add_argument('--batch-size', type=int, default=128, metavar='N',
                              help='input batch size for training (default: 128)')
    model_parser.add_argument('--hidden', type=int, metavar='N',
                              help='number of hidden channels')
    model_parser.add_argument('-k', '--dict-size', type=int, dest='k', metavar='K',
                              help='number of atoms in dictionary')
    model_parser.add_argument('--lr', type=float, default=None,
                              help='learning rate')
    model_parser.add_argument('--vq_coef', type=float, default=None,
                              help='vq coefficient in loss')
    model_parser.add_argument('--commit_coef', type=float, default=None,
                              help='commitment coefficient in loss')
    model_parser.add_argument('--kl_coef', type=float, default=None,
                              help='kl-divergence coefficient in loss')

    training_parser = parser.add_argument_group('Training Parameters')
    training_parser.add_argument('--dataset', default='cifar10', choices=['mnist', 'cifar10', 'imagenet'],
                                 help='dataset to use: mnist | cifar10')
    training_parser.add_argument('--data-dir', default='datasets/',
                                 help='directory containing the dataset')
    training_parser.add_argument('--epochs', type=int, default=20, metavar='N',
                                 help='number of epochs to train (default: 10)')
    training_parser.add_argument('--no-cuda', action='store_true', default=False,
                                 help='enables CUDA training')
    training_parser.add_argument('--seed', type=int, default=1, metavar='S',
                                 help='random seed (default: 1)')
    training_parser.add_argument('--gpus', default='0',
                                 help='gpus used for training - e.g 0,1,3')

    logging_parser = parser.add_argument_group('Logging Parameters')
    logging_parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                                help='how many batches to wait before logging training status')
    logging_parser.add_argument('--results-dir', metavar='RESULTS_DIR', default='./results',
                                help='results dir')
    logging_parser.add_argument('--save-name', default='',
                                help='saved folder')
    logging_parser.add_argument('--data-format', default='json',
                                help='in which format to save the data')
    args = parser.parse_args(args)
    args.cuda = not args.no_cuda and torch.cuda.is_available()

    lr = args.lr or default_hyperparams[args.dataset]['lr']
    k = args.k or default_hyperparams[args.dataset]['k']
    hidden = args.hidden or default_hyperparams[args.dataset]['hidden']
    num_channels = dataset_sizes[args.dataset][0]

    results, save_path = setup_logging_and_results(args)

    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed_all(args.seed)
        args.gpus = [int(i) for i in args.gpus.split(',')]
        torch.cuda.set_device(args.gpus[0])
        cudnn.benchmark = True
        torch.cuda.manual_seed(args.seed)

    model = models[args.dataset][args.model](hidden, k=k, num_channels=num_channels)
    if args.cuda:
        model.cuda()

    optimizer = optim.Adam(model.parameters(), lr=lr)
    scheduler = optim.lr_scheduler.StepLR(optimizer, 10 if args.dataset == 'imagenet' else 30, 0.5,)

    kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
    dataset_train_dir = os.path.join(args.data_dir, args.dataset)
    dataset_test_dir = os.path.join(args.data_dir, args.dataset)
    if 'imagenet' in args.dataset:
        dataset_train_dir = os.path.join(dataset_train_dir, 'train')
        dataset_test_dir = os.path.join(dataset_test_dir, 'val')
    train_loader = torch.utils.data.DataLoader(
        datasets_classes[args.dataset](dataset_train_dir,
                                       transform=dataset_transforms[args.dataset],
                                       **dataset_train_args[args.dataset]),
        batch_size=args.batch_size, shuffle=True, **kwargs)
    test_loader = torch.utils.data.DataLoader(
        datasets_classes[args.dataset](dataset_test_dir,
                                       transform=dataset_transforms[args.dataset],
                                       **dataset_test_args[args.dataset]),
        batch_size=args.batch_size, shuffle=True, **kwargs)

    for epoch in range(1, args.epochs + 1):
        train_losses = train(epoch, model, train_loader, optimizer, args.cuda, args.log_interval, save_path, args)
        test_losses = test_net(epoch, model, test_loader, args.cuda, save_path, args)
        results.add(epoch=epoch, **train_losses, **test_losses)
        for k in train_losses:
            key = k[:-6]
            results.plot(x='epoch', y=[key + '_train', key + '_test'])
        results.save()
        scheduler.step()
Exemple #3
0
def main(args):
    args = {
        "model": "vqvae",
        "batch_size": 8, # 7 * 9 = 63 (close to desired 64)
        "hidden": 128,
        "k": 512 * 1,
        "lr": 2e-4,
        "n_recordings" : n_recordings,
        "vq_coef": 1,  # ?
        "commit_coef": 1,  # ?
        "kl_coef": 1,  # ?
        # "noise_coef" : 1e3,
        "dataset": "imagenet",
        "epochs": 250 * 4,
        "cuda": torch.cuda.is_available(),
        "seed": 1,
        "gpus": "1",
        "log_interval": 50,
        "results_dir": "VAE_imagenet",
        "save_name": "first",
        "data_format": "json"
    }

    #
    # parser = argparse.ArgumentParser(description='Variational AutoEncoders')
    #
    # model_parser = parser.add_argument_group('Model Parameters')
    # model_parser.add_argument('--model', default='vae', choices=['vae', 'vqvae'],
    #                           help='autoencoder variant to use: vae | vqvae')
    # model_parser.add_argument('--batch-size', type=int, default=128, metavar='N',
    #                           help='input batch size for training (default: 128)')
    # model_parser.add_argument('--hidden', type=int, metavar='N',
    #                           help='number of hidden channels')
    # model_parser.add_argument('-k', '--dict-size', type=int, dest='k', metavar='K',
    #                           help='number of atoms in dictionary')
    # model_parser.add_argument('--lr', type=float, default=None,
    #                           help='learning rate')
    # model_parser.add_argument('--vq_coef', type=float, default=None,
    #                           help='vq coefficient in loss')
    # model_parser.add_argument('--commit_coef', type=float, default=None,
    #                           help='commitment coefficient in loss')
    # model_parser.add_argument('--kl_coef', type=float, default=None,
    #                           help='kl-divergence coefficient in loss')
    #
    # training_parser = parser.add_argument_group('Training Parameters')
    # training_parser.add_argument('--dataset', default='cifar10', choices=['mnist', 'cifar10', 'imagenet'],
    #                              help='dataset to use: mnist | cifar10')
    # training_parser.add_argument('--data-dir', default='/media/ssd/Datasets',
    #                              help='directory containing the dataset')
    # training_parser.add_argument('--epochs', type=int, default=20, metavar='N',
    #                              help='number of epochs to train (default: 10)')
    # training_parser.add_argument('--no-cuda', action='store_true', default=False,
    #                              help='enables CUDA training')
    # training_parser.add_argument('--seed', type=int, default=1, metavar='S',
    #                              help='random seed (default: 1)')
    # training_parser.add_argument('--gpus', default='0',
    #                              help='gpus used for training - e.g 0,1,3')
    #
    # logging_parser = parser.add_argument_group('Logging Parameters')
    # logging_parser.add_argument('--log-interval', type=int, default=10, metavar='N',
    #                             help='how many batches to wait before logging training status')
    # logging_parser.add_argument('--results-dir', metavar='RESULTS_DIR', default='./results',
    #                             help='results dir')
    # logging_parser.add_argument('--save-name', default='',
    #                             help='saved folder')
    # logging_parser.add_argument('--data-format', default='json',
    #                             help='in which format to save the data')
    # args = parser.parse_args(args)
    # args["cuda"] = not args["no_cuda"] and torch.cuda.is_available()

    lr = args["lr"]  # or default_hyperparams[args["dataset"]]['lr']
    k = args["k"]  # or default_hyperparams[args["dataset"]]['k']
    hidden = args["hidden"] or default_hyperparams[args["dataset"]]['hidden']
    num_channels = dataset_sizes[args["dataset"]][0]

    results, save_path = setup_logging_and_results(args)

    torch.manual_seed(args["seed"])
    if args["cuda"]:
        torch.cuda.manual_seed_all(args["seed"])
        args["gpus"] = [int(i) for i in args["gpus"].split(',')]
        # torch.cuda.set_device(args["gpus"][0])
        cudnn.benchmark = True
        torch.cuda.manual_seed(args["seed"])

    model = models[args["dataset"]][args["model"]](hidden, k=k, num_channels=num_channels)
    #model = MyDataParallel(model)
    print("Number of Parameters in Model:", sum(p.numel() for p in model.parameters() if p.requires_grad))
    if args["cuda"]:
        model.cuda()

    # noise_adder = AddNoiseManual(b=.5)

    optimizer = optim.Adam(model.parameters(), lr=lr)
    scheduler = optim.lr_scheduler.StepLR(optimizer, 10 if args["dataset"] == 'imagenet' else 30, 0.5, )

    kwargs = {'num_workers': 1, 'pin_memory': True} if args["cuda"] else {}
    train_dataset = EstimatedDataset(8 * 20 * 5, transform=dataset_transforms[args["dataset"]])
    train_loader = torch.utils.data.DataLoader(
        dataset=train_dataset,
        shuffle=True,
        batch_size=args["batch_size"],
    )
    test_dataset = EstimatedDataset(1, transform=dataset_transforms[args["dataset"]])
    test_loader = torch.utils.data.DataLoader(
        dataset=test_dataset,
        batch_size=args["batch_size"],
        shuffle=True
    )

    print("Save path", save_path)
    for epoch in range(1, args["epochs"] + 1):
        train_losses = train(epoch, model, train_loader, optimizer, args["cuda"], args["log_interval"], save_path, args)
        torch.save(model.state_dict(), "saved_models/" + save_filename + ".pt")
Exemple #4
0
def main(args):
    args = {
        "model": "vqvae",
        "batch_size": 16,  # anything too large and runs out of vRAM
        "hidden": 128,  # 128,
        "k": 256,  #  512,
        "lr": 5e-7,  #5e-6,#2e-4,
        "vq_coef": 2,
        "commit_coef": 2,
        "kl_coef": 1,
        "dataset": "imagenet",
        "epochs": 250 * 4,
        "cuda": torch.cuda.is_available(),
        "seed": 1,
        "gpus": "1",
        "log_interval": 50,
        "results_dir": "VAE_imagenet",
        "save_name": "first",
        "data_format": "json",
        "num_workers": 4,
        "num_folds": 5,
        "num_estimated_batches": 16
    }

    lr = args["lr"]  # or default_hyperparams[args["dataset"]]['lr']
    k = args["k"]  # or default_hyperparams[args["dataset"]]['k']
    hidden = args["hidden"] or default_hyperparams[args["dataset"]]['hidden']
    num_channels = dataset_sizes[args["dataset"]][0]

    results, save_path = setup_logging_and_results(args)

    torch.manual_seed(args["seed"])
    if args["cuda"]:
        torch.cuda.manual_seed_all(args["seed"])
        args["gpus"] = [int(i) for i in args["gpus"].split(',')]
        # torch.cuda.set_device(args["gpus"][0])
        cudnn.benchmark = True
        torch.cuda.manual_seed(args["seed"])

    model = models[args["dataset"]][args["model"]](hidden,
                                                   k=k,
                                                   num_channels=num_channels)

    if args["cuda"]:
        model.cuda()

    print("Number of Parameters in Model:",
          sum(p.numel() for p in model.parameters() if p.requires_grad))

    optimizer = optim.Adam(model.parameters(), lr=lr)
    scheduler = optim.lr_scheduler.StepLR(
        optimizer,
        10 if args["dataset"] == 'imagenet' else 30,
        0.5,
    )

    kwargs = {'num_workers': 1, 'pin_memory': True} if args["cuda"] else {}

    ### Load Training Data ###

    # ideally use a dataset of certin artificact not just the standard recordings
    train_dataset_artifacts = SeizureDataset(TRAIN_SEIZURE_FILE,
                                             num_folds=args["num_folds"],
                                             cross_val=False,
                                             split='train',
                                             transform=normalize)
    train_artifacts_loader = data.DataLoader(dataset=train_dataset_artifacts,
                                             shuffle=True,
                                             batch_size=args["batch_size"],
                                             num_workers=args["num_workers"])
    num_examples = len(train_dataset_artifacts)
    print("len train dataset", num_examples)

    # more examples would occupy more RAM and take a while to compute
    num_examples_estimated = args["batch_size"] * args["num_estimated_batches"]
    train_dataset_estimated = EstimatedDataset(num_examples_estimated,
                                               transform=normalize)
    train_estimated_loader = data.DataLoader(dataset=train_dataset_estimated,
                                             shuffle=True,
                                             batch_size=args["batch_size"],
                                             num_workers=args["num_workers"])

    ### Load Eval Data ###

    eval_artifcats_dataset = SeizureDataset(DEV_SEIZURE_FILE,
                                            num_folds=args["num_folds"],
                                            cross_val=False,
                                            split='dev',
                                            transform=normalize)
    eval_artificats_loader = data.DataLoader(
        dataset=eval_artifcats_dataset,
        shuffle=False,
        batch_size=args["batch_size"],
        num_workers=args["num_workers"],
    )

    print("eval loader", len(eval_artificats_loader))

    eval_dataset_estimated = EstimatedDataset(num_examples_estimated,
                                              transform=normalize)
    eval_estimated_loader = data.DataLoader(dataset=eval_dataset_estimated,
                                            shuffle=True,
                                            batch_size=args["batch_size"],
                                            num_workers=args["num_workers"])

    print("Save path", save_path)

    for epoch in range(1, args["epochs"] + 1):
        try:
            train_losses = train(epoch, model, train_estimated_loader,
                                 train_artifacts_loader, optimizer,
                                 args["cuda"], args["log_interval"], save_path,
                                 args)
            test_losses = test_net(epoch, model, eval_estimated_loader,
                                   eval_artificats_loader, args["cuda"],
                                   save_path, args)
            writer.flush()
            torch.save(model.state_dict(),
                       "saved_models/" + save_filename + ".pt")
        except KeyboardInterrupt:
            print("Early Ending")
            break

    writer.flush()
    writer.close()
    torch.save(model.state_dict(), "saved_models/" + save_filename + ".pt")