Exemplo n.º 1
0
def evaluate(model, _config, _run):

    training_config = _config['training']
    cuda = _config['gpu']['cuda']
    drop_rate = training_config['nx_drop_rate']
    seed = training_config['seed']

    # Create Class Specific Dataset
    dataset = StandardDataset(
        dataset='mnist',
        data_dir='./data',
        batch_size=training_config['batch_size'],
        eval_samples=training_config['eval_samples'],
        validation_size=training_config['validation_size'],
        seed=training_config['seed'])
    eval_dataset = dataset.get_test_loader(bsize=1)

    # Load Vision Class-specific AE movel
    img_ae = get_ae(cuda)

    # Get real and fake images from dataset
    real_images = []
    fake_images = []
    with torch.no_grad():
        for idx, data in enumerate(tqdm(eval_dataset)):
            img = data[0]
            symbol = F.one_hot(torch.tensor(data[1]), 10).float()
            if cuda:
                img = img.cuda()
                symbol = symbol.cuda()
            real_images.append(img[0])
            # Encode Mod Latents
            img_out, _ = model.generate([None, symbol])
            nx_imgs = img_out[1]
            fake_images.append(nx_imgs[0])

    # Compute FID score
    fid_score = e_utils.compute_fid(real=real_images, fake=fake_images, ae_model=img_ae, batch_size=64)
    print("Rank = " + str(fid_score))

    # Log values in mongodb
    _run.log_scalar('Rank', fid_score)

    # Save
    with open(os.path.join(exp_dir_path('evaluation'), "fid_single_score_" + str(drop_rate)
                                                       + "_" +str(seed) + ".txt"), 'w') as f:
        print("Rank = " + str(fid_score), file=f)

    with open(os.path.join(exp_dir_path('evaluation'), "fid_single_score_" + str(drop_rate)
                                                       + "_" +str(seed) + ".pt"), 'wb') as f:
        torch.save(fid_score, f)
    ex.add_artifact(os.path.join(exp_dir_path('evaluation'),
                                 "fid_single_score_" + str(drop_rate)
                                                       + "_" +str(seed) + ".pt"), name="fid_single_score_" + str(drop_rate)
                                                       + "_" +str(seed) + ".pt")

    return
Exemplo n.º 2
0
def evaluate(model, img_class, _config, _run):

    results_dir = log_dir_path('evaluation')
    training_config = _config['training']
    eval_config = _config['evaluation']
    cuda = _config['gpu']['cuda']
    seed = training_config['seed']

    # Create Dataset
    dataset = StandardDataset(
        dataset='mnist',
        data_dir='./data',
        batch_size=training_config['batch_size'],
        eval_samples=training_config['eval_samples'],
        validation_size=training_config['validation_size'],
        seed=training_config['seed'])
    eval_dataset = dataset.get_test_loader(bsize=1)

    # Setup training
    n_samples = eval_config['eval_samples']
    _run.log_scalar('eval_samples', n_samples)

    # Evaluate recognition
    img_res, sym_res = evaluate_recon(model, img_class, eval_dataset,
                                      n_samples, cuda)

    # Log values in mongodb
    _run.log_scalar('Image Recon', img_res['recon'])
    _run.log_scalar('Symbol to Image Recon', img_res['cm'])
    _run.log_scalar('Symbol Recon', sym_res['recon'])
    _run.log_scalar('Image to Symbol Recon', sym_res['cm'])

    # Save
    with open(os.path.join(exp_dir_path('evaluation'), "recon_res.txt"),
              'w') as f:
        print('\nFinal Results:\nImage Recon: ' + str(img_res['recon']) +
              '\t' + '---> Symbol to Image Recon: ' + str(img_res['cm']) +
              '\n' + 'Symbol Recon: ' + str(sym_res['recon']) + '\t' +
              '---> Image to Symbol Recon: ' + str(sym_res['cm']) + '\n',
              file=f)

    with open(os.path.join(exp_dir_path('evaluation'), "img_recon_res.pt"),
              'wb') as f:
        torch.save(img_res, f)
    ex.add_artifact(os.path.join(exp_dir_path('evaluation'),
                                 "img_recon_res.pt"),
                    name='img_recon_res.pt')

    with open(os.path.join(exp_dir_path('evaluation'), "sym_recon_res.pt"),
              'wb') as f:
        torch.save(sym_res, f)
    ex.add_artifact(os.path.join(exp_dir_path('evaluation'),
                                 "sym_recon_res.pt"),
                    name='sym_recon_res.pt')
    return
Exemplo n.º 3
0
def evaluate(model, _config, _run):

    training_config = _config['training']
    cuda = _config['gpu']['cuda']

    # Create Dataset
    dataset = StandardDataset(dataset='mnist',
                              data_dir='./data',
                              batch_size=training_config['batch_size'],
                              seed=training_config['seed'])

    test_loader = dataset.test_loader

    accuracy = evaluate_acc(model, test_loader, cuda=cuda)
    _run.log_scalar('Test Accuracy', accuracy.item())

    return
Exemplo n.º 4
0
def train(_config, _run):

    # Read configs
    model_config = _config['model']
    training_config = _config['training']
    gpu_config = _config['gpu']

    # Set seeds
    torch.backends.cudnn.deterministic = True
    torch.manual_seed(training_config['seed'])
    np.random.seed(training_config['seed'])
    random.seed(training_config['seed'])
    torch.cuda.manual_seed(training_config['seed'])

    # Create Model
    nx_info, img_info, sym_info = t_utils.get_specs(model_config)
    model = NexusModel(nx_info=nx_info,
                       img_info=img_info,
                       sym_info=sym_info,
                       use_cuda=gpu_config['cuda'])

    # Create trainer
    trainer = Trainer(model, training_config, gpu_config['cuda'])

    # Create Dataset
    dataset = StandardDataset(
        dataset='fashion',
        data_dir='./data',
        batch_size=training_config['batch_size'],
        eval_samples=training_config['eval_samples'],
        validation_size=training_config['validation_size'],
        seed=training_config['seed'])

    post_epoch_cb = PostEpochCb(model, dataset)

    trainer.train(epochs=training_config['epochs'],
                  dataset=dataset,
                  cuda=gpu_config['cuda'],
                  post_epoch_cb=post_epoch_cb,
                  post_cb=post_cb)
Exemplo n.º 5
0
def train(_config, _run):

    # Read configs
    training_config = _config['training']
    gpu_config = _config['gpu']
    device = torch.device("cuda" if _config['gpu']['cuda'] else "cpu")
    results_dir = log_dir_path('results')
    artifact_storage_interval = _config['model_debug'][
        'artifact_storage_interval']

    # Set seeds
    torch.backends.cudnn.deterministic = True
    torch.manual_seed(training_config['seed'])
    np.random.seed(training_config['seed'])
    random.seed(training_config['seed'])
    torch.cuda.manual_seed(training_config['seed'])

    # Create Classifier
    model = MNIST_AE(b_dim=training_config['b_dim']).to(device)
    epochs = training_config['epochs']

    # Create Dataset
    dataset = StandardDataset(dataset='mnist',
                              data_dir='./data',
                              batch_size=training_config['batch_size'],
                              seed=training_config['seed'])

    train_loader, val_loader = dataset.train_loader, dataset.val_loader
    test_loader = dataset.get_test_loader(bsize=20)

    # Training objects
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=training_config['learning_rate'])
    best_loss = sys.maxsize

    for epoch in range(1, epochs + 1):

        train_loss = train_epoch(model,
                                 train_loader,
                                 optimizer,
                                 epoch,
                                 cuda=True)
        val_loss = test_epoch(model, val_loader, cuda=True)

        _run.log_scalar('train_loss', train_loss)
        _run.log_scalar('val_loss', val_loss)

        # Best Loss
        is_best = val_loss < best_loss
        best_loss = min(val_loss, best_loss)
        record_checkpoint(model=model,
                          loss=val_loss,
                          best_loss=best_loss,
                          optimizer=optimizer,
                          epoch=epoch,
                          is_best=is_best)

        if epoch % artifact_storage_interval == 0:

            # Data
            with torch.no_grad():

                model.eval()
                data = next(iter(test_loader))[0].to(device)

                # Generate modalities
                image_out = model(data)

                # Vision Recon
                image_comp = torch.cat([
                    data.view(-1, 1, 28, 28).cpu(),
                    image_out.view(-1, 1, 28, 28).cpu()
                ])

                torchvision.utils.save_image(
                    torchvision.utils.make_grid(image_comp,
                                                padding=5,
                                                pad_value=.5,
                                                nrow=data.size(0)),
                    os.path.join(results_dir,
                                 'mnist_ae_mod_e' + str(epoch) + '.png'))
                ex.add_artifact(os.path.join(
                    results_dir, "mnist_ae_mod_e" + str(epoch) + '.png'),
                                name="mnist_ae_recon_e" + str(epoch) + '.png')

    # Final Saving
    ex.add_artifact(os.path.join(log_dir_path('trained_models'),
                                 'mnist_ae_checkpoint.pth.tar'),
                    name='mnist_ae_last_checkpoint.pth.tar')
    ex.add_artifact(os.path.join(log_dir_path('trained_models'),
                                 'best_mnist_ae_model.pth.tar'),
                    name='best_mnist_ae_model.pth.tar')
Exemplo n.º 6
0
def train(_config, _run):

    # Read configs
    training_config = _config['training']
    gpu_config = _config['gpu']
    device = torch.device("cuda" if _config['gpu']['cuda'] else "cpu")

    # Set seeds
    torch.backends.cudnn.deterministic = True
    torch.manual_seed(training_config['seed'])
    np.random.seed(training_config['seed'])
    random.seed(training_config['seed'])
    torch.cuda.manual_seed(training_config['seed'])

    # Create Classifier
    model = MNISTClassifier().to(device)
    epochs = training_config['epochs']

    # Create Dataset
    dataset = StandardDataset(dataset='mnist',
                              data_dir='./data',
                              batch_size=training_config['batch_size'],
                              seed=training_config['seed'])

    train_loader, test_loader = dataset.train_loader, dataset.val_loader

    # Training objects
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=training_config['learning_rate'])
    best_loss = sys.maxsize

    for epoch in range(1, epochs + 1):

        train_loss = train_epoch(model,
                                 train_loader,
                                 optimizer,
                                 criterion,
                                 epoch,
                                 cuda=gpu_config)
        val_loss, val_accuracy = test_epoch(model,
                                            test_loader,
                                            criterion,
                                            cuda=gpu_config)

        _run.log_scalar('train_loss', train_loss)
        _run.log_scalar('val_loss', val_loss.item())
        _run.log_scalar('val_accuracy', val_accuracy.item())

        # Best Loss
        is_best = val_loss < best_loss
        best_loss = min(val_loss, best_loss)
        record_checkpoint(model=model,
                          loss=val_loss,
                          best_loss=best_loss,
                          optimizer=optimizer,
                          epoch=epoch,
                          is_best=is_best)

    # Final Saving
    ex.add_artifact(os.path.join(log_dir_path('trained_models'),
                                 'mnist_classifier_checkpoint.pth.tar'),
                    name='mnist_classifier_last_checkpoint.pth.tar')
    ex.add_artifact(os.path.join(log_dir_path('trained_models'),
                                 'best_mnist_classifier_model.pth.tar'),
                    name='best_mnist_classifier_model.pth.tar')