def main(args):
    with open(args.config, 'r') as f:
        config = json.load(f)

    if args.folder is not None:
        config['folder'] = args.folder
    if args.num_steps > 0:
        config['num_steps'] = args.num_steps
    if args.num_batches > 0:
        config['num_batches'] = args.num_batches
    device = torch.device(
        'cuda' if args.use_cuda and torch.cuda.is_available() else 'cpu')

    benchmark = get_benchmark_by_name(config['dataset'],
                                      config['folder'],
                                      config['num_ways'],
                                      config['num_shots'],
                                      config['num_shots_test'],
                                      hidden_size=config['hidden_size'])

    with open(config['model_path'], 'rb') as f:
        benchmark.model.load_state_dict(torch.load(f, map_location=device))

    meta_test_dataloader = BatchMetaDataLoader(benchmark.meta_test_dataset,
                                               batch_size=config['batch_size'],
                                               shuffle=True,
                                               num_workers=args.num_workers,
                                               pin_memory=True)
    metalearner = ModelAgnosticMetaLearning(
        benchmark.model,
        first_order=config['first_order'],
        num_adaptation_steps=config['num_steps'],
        step_size=config['step_size'],
        loss_function=benchmark.loss_function,
        device=device)

    results = metalearner.evaluate(meta_test_dataloader,
                                   max_batches=config['num_batches'],
                                   verbose=args.verbose,
                                   desc='Test')

    # Save results
    dirname = os.path.dirname(config['model_path'])
    with open(os.path.join(dirname, 'results.json'), 'w') as f:
        json.dump(results, f)
Beispiel #2
0
        folder = os.path.join(args.output_folder,
                              time.strftime('%Y-%m-%d_%H%M%S'))
        os.makedirs(folder)

        args.folder = os.path.abspath(args.folder)
        args.model_path = os.path.abspath(os.path.join(folder, 'model.th'))
        # Save the configuration in a config.json file
        with open(os.path.join(folder, 'config.json'), 'w') as f:
            json.dump(vars(args), f, indent=2)

    device = torch.device(
        'cuda' if args.use_cuda and torch.cuda.is_available() else 'cpu')

    benchmark = get_benchmark_by_name(args.dataset,
                                      args.folder,
                                      args.num_ways,
                                      args.num_shots,
                                      args.num_shots_test,
                                      hidden_size=args.hidden_size)

    meta_train_dataloader = BatchMetaDataLoader(benchmark.meta_train_dataset,
                                                batch_size=args.batch_size,
                                                shuffle=True,
                                                num_workers=args.num_workers,
                                                pin_memory=True)
    #    meta_val_dataloader = BatchMetaDataLoader(benchmark.meta_val_dataset,
    #                                              batch_size=args.batch_size,
    #                                              shuffle=True,
    #                                              num_workers=args.num_workers,
    #                                              pin_memory=True)

    benchmark = get_benchmark_by_name(args.dataset,
def main(args):
    logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
    device = torch.device(
        'cuda' if args.use_cuda and torch.cuda.is_available() else 'cpu')

    if (args.output_folder is not None):
        if not os.path.exists(args.output_folder):
            os.makedirs(args.output_folder)
            logging.debug('Creating folder `{0}`'.format(args.output_folder))

        folder = os.path.join(args.output_folder,
                              time.strftime('%Y-%m-%d_%H%M%S'))
        os.makedirs(folder)
        logging.debug('Creating folder `{0}`'.format(folder))

        args.folder = os.path.abspath(args.folder)
        args.model_path = os.path.abspath(os.path.join(folder, 'model.th'))
        # Save the configuration in a config.json file
        with open(os.path.join(folder, 'config.json'), 'w') as f:
            json.dump(vars(args), f, indent=2)
        logging.info('Saving configuration file in `{0}`'.format(
            os.path.abspath(os.path.join(folder, 'config.json'))))

    benchmark = get_benchmark_by_name(args.dataset,
                                      args.folder,
                                      args.num_ways,
                                      args.num_shots,
                                      args.num_shots_test,
                                      hidden_size=args.hidden_size)

    meta_train_dataloader = BatchMetaDataLoader(benchmark.meta_train_dataset,
                                                batch_size=args.batch_size,
                                                shuffle=True,
                                                num_workers=args.num_workers,
                                                pin_memory=True)
    meta_val_dataloader = BatchMetaDataLoader(benchmark.meta_val_dataset,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              num_workers=args.num_workers,
                                              pin_memory=True)

    meta_optimizer = torch.optim.Adam(benchmark.model.parameters(),
                                      lr=args.meta_lr)
    metalearner = ModelAgnosticMetaLearning(
        benchmark.model,
        meta_optimizer,
        first_order=args.first_order,
        num_adaptation_steps=args.num_steps,
        step_size=args.step_size,
        loss_function=benchmark.loss_function,
        device=device)

    best_value = None

    # Training loop
    all_results = []
    epoch_desc = 'Epoch {{0: <{0}d}}'.format(1 +
                                             int(math.log10(args.num_epochs)))
    for epoch in range(args.num_epochs):
        metalearner.train(meta_train_dataloader,
                          max_batches=args.num_batches,
                          verbose=args.verbose,
                          desc='Training',
                          leave=False)
        results = metalearner.evaluate(meta_val_dataloader,
                                       max_batches=args.num_batches,
                                       verbose=args.verbose,
                                       desc=epoch_desc.format(epoch + 1))
        print(results['accuracies_after'])
        all_results.append(results['accuracies_after'])
        with open(os.path.join(folder, 'results.json'), 'w') as f:
            json.dump(all_results, f, indent=2)

        # Save best model
        if 'accuracies_after' in results:
            if (best_value is None) or (best_value <
                                        results['accuracies_after']):
                best_value = results['accuracies_after']
                save_model = True
        elif (best_value is None) or (best_value > results['mean_outer_loss']):
            best_value = results['mean_outer_loss']
            save_model = True
        else:
            save_model = False

        if save_model and (args.output_folder is not None):
            with open(args.model_path, 'wb') as f:
                torch.save(benchmark.model.state_dict(), f)
    print(all_results)

    if hasattr(benchmark.meta_train_dataset, 'close'):
        benchmark.meta_train_dataset.close()
        benchmark.meta_val_dataset.close()
Beispiel #4
0
def main(args):
    logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
    device = torch.device('cuda' if args.use_cuda
                          and torch.cuda.is_available() else 'cpu')

    # Set up output folder, which will contain the saved model
    # and the config file for running test.py
    if (args.output_folder is not None):
        if not os.path.exists(args.output_folder):
            os.makedirs(args.output_folder)
            logging.debug('Creating output folder `{0}`'.format(args.output_folder))

        folder = os.path.join(args.output_folder,
                              time.strftime('%Y-%m-%d_%H%M%S'))
        os.makedirs(folder)
        logging.debug('Creating folder `{0}`'.format(folder))

        args.folder = os.path.abspath(args.folder)
        args.model_path = os.path.abspath(os.path.join(folder, 'model.th'))
        
        save_folder = os.path.abspath(folder)
        ckpt_folder = os.path.join(save_folder, 'checkpoints')
        if not os.path.exists(ckpt_folder):
            os.makedirs(ckpt_folder)
            logging.debug('Creating model checkpoint folder `{0}`'.format(ckpt_folder))

        # Save the configuration in a config.json file
        with open(os.path.join(folder, 'config.json'), 'w') as f:
            json.dump(vars(args), f, indent=2)
        logging.info('Saving configuration file in `{0}`'.format(
                     os.path.abspath(os.path.join(folder, 'config.json'))))

    # Load a pre-configured dataset, model, and loss function.
    benchmark = get_benchmark_by_name(args.dataset,
                                      args.folder,
                                      args.num_ways,
                                      args.num_shots,
                                      args.num_shots_test,
                                      hidden_size=args.hidden_size)

    # Set up dataloaders:
    # MetaDataset (collection of Tasks) > Task (iterable Dataset of OrderedDicts) 
    #   > task[i] (OrderedDict with shuffled train/test split) > (tuples of input & target tensors)

    # Train loader yields batches of tasks for meta-training (both inner and outer loop)
    meta_train_dataloader = BatchMetaDataLoader(benchmark.meta_train_dataset,
                                                batch_size=args.batch_size,
                                                shuffle=True,
                                                num_workers=args.num_workers,
                                                pin_memory=True)

    # Val loader has the same format, but is used only for evaluating adaptation ability
    # without taking gradient steps on the outer loss.
    meta_val_dataloader = BatchMetaDataLoader(benchmark.meta_val_dataset,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              num_workers=args.num_workers,
                                              pin_memory=True)

    # Initializer the meta-optimizer and metalearner (MAML)
    meta_optimizer = torch.optim.Adam(benchmark.model.parameters(), lr=args.meta_lr)
    metalearner = ModelAgnosticMetaLearning(benchmark.model,
                                            meta_optimizer,
                                            first_order=args.first_order,
                                            num_adaptation_steps=args.num_steps,
                                            step_size=args.step_size,
                                            loss_function=benchmark.loss_function,
                                            device=device)

    best_value = None

    def append_results_to_dict(d, results):
        for key, val in results.items():
            d[key] = d.get(key, []) + [val]

    # Training loop: each epoch goes through all tasks in the entire dataset, in batches
    epoch_desc = 'Epoch {{0: <{0}d}}'.format(1 + int(math.log10(args.num_epochs)))
    train_stats, val_stats = {}, {}
    for epoch in range(args.num_epochs):
        train_results = metalearner.train(meta_train_dataloader,
                            max_batches=args.num_batches,
                            verbose=args.verbose,
                            desc='Training',
                            leave=False)
        append_results_to_dict(train_stats, train_results)

        if epoch % args.validate_every == 0:
            val_results = metalearner.evaluate(meta_val_dataloader,
                                max_batches=args.num_batches,
                                verbose=args.verbose,
                                desc=epoch_desc.format(epoch + 1))
            append_results_to_dict(train_stats, train_results)

        if epoch % args.checkpoint_every == 0:
            ckpt_path = os.path.join(ckpt_folder, f'checkpoint-{epoch}.pt')
            with open(ckpt_path, 'wb') as f:
                torch.save(benchmark.model.state_dict(), f)

        # Save best model according to validation acc/loss
        if 'mean_accuracy_after' in val_results:
            if (best_value is None) or (best_value < val_results['mean_accuracy_after']):
                best_value = val_results['mean_accuracy_after']
                save_model = True
        elif (best_value is None) or (best_value > val_results['mean_outer_loss']):
            best_value = val_results['mean_outer_loss']
            save_model = True
        else:
            save_model = False

        if save_model and (args.output_folder is not None):
            with open(args.model_path, 'wb') as f:
                torch.save(benchmark.model.state_dict(), f)

    # Save train and val stats as serialized dictionaries
    with open(os.path.join(save_folder, 'train_stats.pkl'), 'wb') as f:
        pickle.dump(train_stats, f)
    with open(os.path.join(save_folder, 'val_stats.pkl'), 'wb') as f:
        pickle.dump(val_stats, f)

    if hasattr(benchmark.meta_train_dataset, 'close'):
        benchmark.meta_train_dataset.close()
        benchmark.meta_val_dataset.close()
Beispiel #5
0
def main(args):
    logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)

    np.random.seed(args.random_seed)
    torch.manual_seed(args.random_seed)
    torch.cuda.manual_seed(args.random_seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    device = torch.device(
        'cuda' if args.use_cuda and torch.cuda.is_available() else 'cpu')

    if (args.output_folder is not None):
        if not os.path.exists(args.output_folder):
            os.makedirs(args.output_folder)
            logging.debug('Creating folder `{0}`'.format(args.output_folder))

        folder = os.path.join(args.output_folder,
                              time.strftime('%Y-%m-%d_%H%M%S'))
        os.makedirs(folder)
        logging.debug('Creating folder `{0}`'.format(folder))

        args.folder = os.path.abspath(args.folder)
        args.model_path = os.path.abspath(os.path.join(folder, 'model.th'))
        outfile_path = os.path.abspath(
            os.path.join(folder, 'model_results.json'))
        # Save the configuration in a config.json file
        with open(os.path.join(folder, 'config.json'), 'w') as f:
            json.dump(vars(args), f, indent=2)
        logging.info('Saving configuration file in `{0}`'.format(
            os.path.abspath(os.path.join(folder, 'config.json'))))

    benchmark = get_benchmark_by_name(
        args.dataset,
        args.folder,
        args.num_ways,
        args.num_shots,
        args.num_shots_test,
        hidden_size=args.hidden_size,
        random_seed=args.random_seed,
        num_training_samples=args.num_training_samples)

    meta_train_dataloader = BatchMetaDataLoader(benchmark.meta_train_dataset,
                                                batch_size=args.batch_size,
                                                shuffle=True,
                                                num_workers=args.num_workers,
                                                pin_memory=True)
    meta_val_dataloader = BatchMetaDataLoader(benchmark.meta_val_dataset,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              num_workers=args.num_workers,
                                              pin_memory=True)

    meta_optimizer = torch.optim.Adam(benchmark.model.parameters(),
                                      lr=args.meta_lr)
    metalearner = ModelAgnosticMetaLearning(
        benchmark.model,
        meta_optimizer,
        first_order=args.first_order,
        num_adaptation_steps=args.num_steps,
        step_size=args.step_size,
        loss_function=benchmark.loss_function,
        device=device)
    #print(benchmark.model)

    best_value = None
    output = []
    pretty_print('epoch', 'train loss', 'train acc', 'train prec', 'val loss',
                 'val acc', 'val prec')

    # Training loop
    epoch_desc = 'Epoch {{0: <{0}d}}'.format(1 +
                                             int(math.log10(args.num_epochs)))
    for epoch in range(args.num_epochs):
        train_results = metalearner.train(meta_train_dataloader,
                                          max_batches=args.num_batches,
                                          verbose=args.verbose,
                                          desc='Training',
                                          leave=False)
        val_results = metalearner.evaluate(meta_val_dataloader,
                                           max_batches=args.num_batches,
                                           verbose=args.verbose,
                                           desc=epoch_desc.format(epoch + 1))
        pretty_print(
            (epoch + 1), train_results['mean_outer_loss'],
            train_results['accuracies_after'],
            train_results['precision_after'], val_results['mean_outer_loss'],
            val_results['accuracies_after'], val_results['precision_after'])

        # Save best model
        if 'accuracies_after' in val_results:
            if (best_value is None) or (best_value <
                                        val_results['accuracies_after']):
                best_value = val_results['accuracies_after']
                save_model = True
        elif (best_value is None) or (best_value >
                                      val_results['mean_outer_loss']):
            best_value = val_results['mean_outer_loss']
            save_model = True
        else:
            save_model = False

        if save_model and (args.output_folder is not None):
            with open(args.model_path, 'wb') as f:
                torch.save(benchmark.model.state_dict(), f)
        # saving results for later use - plotting, etc.
        output.append({
            'epoch': (epoch + 1),
            'train_loss': train_results['mean_outer_loss'],
            'train_acc': train_results['accuracies_after'],
            'train_prec': train_results['precision_after'],
            'val_loss': val_results['mean_outer_loss'],
            'val_acc': val_results['accuracies_after'],
            'val_prec': val_results['precision_after']
        })
        if (args.output_folder is not None):
            with open(outfile_path, 'w') as f:
                json.dump(output, f)

    if hasattr(benchmark.meta_train_dataset, 'close'):
        benchmark.meta_train_dataset.close()
        benchmark.meta_val_dataset.close()
Beispiel #6
0
def main(args):
    logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
    device = torch.device(
        'cuda' if args.use_cuda and torch.cuda.is_available() else 'cpu')

    wandb.init(project="geometric-meta-learning")

    if (args.output_folder is not None):
        if not os.path.exists(args.output_folder):
            os.makedirs(args.output_folder)
            logging.debug('Creating folder `{0}`'.format(args.output_folder))

        folder = os.path.join(args.output_folder,
                              time.strftime('%Y-%m-%d_%H%M%S'))
        os.makedirs(folder)
        logging.debug('Creating folder `{0}`'.format(folder))

        args.folder = os.path.abspath(args.folder)
        args.model_path = os.path.abspath(os.path.join(folder, 'model.th'))
        # Save the configuration in a config.json file
        with open(os.path.join(folder, 'config.json'), 'w') as f:
            json.dump(vars(args), f, indent=2)
        logging.info('Saving configuration file in `{0}`'.format(
            os.path.abspath(os.path.join(folder, 'config.json'))))

    ensemble_size = 0
    if args.ensemble:
        ensemble_size = args.ensemble_size
    benchmark = get_benchmark_by_name(args.dataset,
                                      args.folder,
                                      args.num_ways,
                                      args.num_shots,
                                      args.num_shots_test,
                                      hidden_size=args.hidden_size,
                                      meta_batch_size=args.batch_size,
                                      ensemble_size=ensemble_size)

    meta_train_dataloader = BatchMetaDataLoader(benchmark.meta_train_dataset,
                                                batch_size=args.batch_size,
                                                shuffle=True,
                                                num_workers=args.num_workers,
                                                pin_memory=True)
    meta_val_dataloader = BatchMetaDataLoader(benchmark.meta_val_dataset,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              num_workers=args.num_workers,
                                              pin_memory=True)

    warp_model = None
    if args.warp:
        warp_model = make_warp_model(benchmark.model, constant=args.constant)
        meta_optimizer = torch.optim.Adam(benchmark.model.parameters(),
                                          lr=args.meta_lr)
        warp_meta_optimizer = torch.optim.Adam(warp_model.parameters(),
                                               lr=args.warp_lr)

        #warp_meta_optimizer = None
        #meta_optimizer = torch.optim.Adam(warp_model.parameters(), lr=args.warp_lr)
    else:
        meta_optimizer = torch.optim.Adam(benchmark.model.parameters(),
                                          lr=args.meta_lr)
        warp_meta_optimizer = None

    if args.link_ensemble:
        ensembler = nn.Identity(
        )  #TransformerEnsembler(args.num_ways, benchmark.model.feature_size)
        ensembler_optimizer = None  #torch.optim.Adam(ensembler.parameters(), lr=args.warp_lr)
    else:
        ensembler = None
        ensembler_optimizer = None

    warp_scheduler = None  #get_linear_schedule_with_warmup(warp_meta_optimizer, 200, 100000, last_epoch=-1, phase_shift=-math.pi)
    ensembler_scheduler = None  #get_linear_schedule_with_warmup(ensembler_optimizer, 200, 100000, last_epoch=-1, phase_shift=-math.pi)

    scheduler = None  #get_linear_schedule_with_warmup(meta_optimizer, 200, 100000, last_epoch=-1)

    metalearner = ModelAgnosticMetaLearning(
        benchmark.model,
        meta_optimizer,
        scheduler=scheduler,
        warp_optimizer=warp_meta_optimizer,
        warp_model=warp_model,
        warp_scheduler=warp_scheduler,
        first_order=args.first_order,
        num_adaptation_steps=args.num_steps,
        step_size=args.step_size,
        learn_step_size=False,
        loss_function=benchmark.loss_function,
        device=device,
        num_maml_steps=args.num_maml_steps,
        ensembler=ensembler,
        ensembler_optimizer=ensembler_optimizer,
        ensemble_size=ensemble_size,
        ensembler_scheduler=ensembler_scheduler)

    best_value = None

    # Training loop
    epoch_desc = 'Epoch {{0: <{0}d}}'.format(1 +
                                             int(math.log10(args.num_epochs)))
    for epoch in tqdm(range(args.num_epochs)):
        metalearner.train(meta_train_dataloader,
                          max_batches=args.num_batches,
                          verbose=args.verbose,
                          desc='Training',
                          leave=False)
        results = metalearner.evaluate(meta_val_dataloader,
                                       max_batches=args.num_eval_batches,
                                       verbose=args.verbose,
                                       desc=epoch_desc.format(epoch + 1))

        # Save best model
        if 'accuracies_after' in results:
            if (best_value is None) or (best_value <
                                        results['accuracies_after']):
                best_value = results['accuracies_after']
                save_model = True
        elif (best_value is None) or (best_value > results['mean_outer_loss']):
            best_value = results['mean_outer_loss']
            save_model = True
        else:
            save_model = False

        if save_model and (args.output_folder is not None):
            with open(args.model_path, 'wb') as f:
                torch.save(benchmark.model.state_dict(), f)

    if hasattr(benchmark.meta_train_dataset, 'close'):
        benchmark.meta_train_dataset.close()
        benchmark.meta_val_dataset.close()
Beispiel #7
0
    with open(args.config, 'r') as f:
        config = json.load(f)

    if args.folder is not None:
        config['folder'] = args.folder
    if args.num_steps > 0:
        config['num_steps'] = args.num_steps
    if args.num_batches > 0:
        config['num_batches'] = args.num_batches
    device = torch.device(
        'cuda' if args.use_cuda and torch.cuda.is_available() else 'cpu')

    benchmark = get_benchmark_by_name(config['dataset'],
                                      config['folder'],
                                      config['num_ways'],
                                      config['num_shots'],
                                      config['num_shots_test'],
                                      hidden_size=config['hidden_size'])

    with open(config['model_path'], 'rb') as f:
        benchmark.model.load_state_dict(torch.load(f, map_location=device))

    meta_test_dataloader = BatchMetaDataLoader(benchmark.meta_test_dataset,
                                               batch_size=config['batch_size'],
                                               shuffle=True,
                                               num_workers=args.num_workers,
                                               pin_memory=True)
    metalearner = ModelAgnosticMetaLearning(
        benchmark.model,
        first_order=config['first_order'],
        num_adaptation_steps=config['num_steps'],