def main():
    parser = argparse.ArgumentParser()
    add_test_args(parser)
    add_common_args(parser)
    args = parser.parse_args()
    device = model_utils.get_device()

    assert args.name is not None
    os.makedirs(f'{args.save_dir}/{args.name}')

    # Load dataset from disk
    dev_dl = model_utils.load_test_data(args)
    dev_dl = dev_dl.take(args.num_images)

    # Initialize a model
    model = models.get_model(args.model)(args.size)

    # load from checkpoint if path specified
    assert args.load_path is not None
    model = model_utils.load_model(model, args.load_path)
    model.eval()

    # Move model to GPU if necessary
    model.to(device)

    # test!
    make_images(
        dev_dl,
        model,
        args,
    )
Beispiel #2
0
def main():
    parser = argparse.ArgumentParser()
    add_test_args(parser)
    add_common_args(parser)
    args = parser.parse_args()

    device = model_utils.get_device()

    # Load dataset from disk
    x_dev, y_dev, ground_truths, container = model_utils.load_test_data(
        args.dataset_dir, dev_frac=args.dev_frac, max_entries=args.dataset_cap)
    dev_dl = data.DataLoader(
        data.TensorDataset(x_dev, y_dev, ground_truths),
        batch_size=args.batch_size,
        shuffle=False,
    )

    # Initialize a model
    model = models.get_model(args.model)()

    # load from checkpoint if path specified
    assert args.load_path is not None
    model = model_utils.load_model(model, args.load_path)
    model.eval()

    # Move model to GPU if necessary
    model.to(device)

    # test!
    test_model(
        dev_dl,
        model,
        args,
        container,
    )
Beispiel #3
0
def main():
    parser = argparse.ArgumentParser()
    add_train_args(parser)
    add_common_args(parser)
    args = parser.parse_args()
    add_experiment(args)
    device = model_utils.get_device()

    # Load dataset from disk
    train_ds, dev_ds = model_utils.load_training_data(args)

    # Initialize a model
    model = models.get_model(args.model)(size=args.size)

    # load from checkpoint if path specified
    if args.load_path is not None:
        model = model_utils.load_model(model, args.load_path)

    # Move model to GPU if necessary
    model.to(device)

    # Initialize optimizer
    optimizer = optim.Adam(
        model.parameters(),
        lr=args.learning_rate,
        weight_decay=args.weight_decay,
    )

    # Scheduler
    scheduler = optim.lr_scheduler.StepLR(optimizer, 5, 0.1, verbose=True)

    os.makedirs(f'{args.save_path}/{args.experiment}')
    print(f'Created new experiment: {args.experiment}')
    save_arguments(args, f'{args.save_path}/{args.experiment}/args.txt')

    # Train!
    trained_model = train_model(
        train_ds,
        dev_ds,
        model,
        optimizer,
        scheduler,
        args,
    )

    # Save trained model
    filename = f'{args.save_path}/{args.experiment}/{model.__class__.__name__}_trained.checkpoint'
    model_utils.save_model(trained_model, filename)
Beispiel #4
0
def main():
    parser = argparse.ArgumentParser()
    add_test_args(parser)
    add_common_args(parser)
    add_wav_args(parser)
    args = parser.parse_args()

    model = models.get_model(args.model)()
    assert args.load_path is not None, "Did not specify model load path"
    model = model_utils.load_model(model, args.load_path)
    model = model.cpu()
    model.eval()

    assert args.input_file is not None, "Did not specify input file!"

    print("Processing file...")
    audio_data = TrumpRemover.process_through_model(args.input_file,
                                                    args.output_file, model,
                                                    args)
    play(audio_data)
    print("Done!")
def main():
    parser = argparse.ArgumentParser()
    add_test_args(parser)
    add_common_args(parser)
    args = parser.parse_args()
    device = model_utils.get_device()
    assert args.load_path is not None or args.load_dir is not None

    if args.load_dir is not None:
        prev_args: Dict = load_arguments(f'{args.load_dir}/args.txt')
        args.model = prev_args['model']
        args.size = prev_args['size']
        args.name = prev_args['experiment']

    assert args.name is not None
    os.makedirs(f'{args.save_dir}/{args.name}')

    # Load dataset from disk
    dev_dl = model_utils.load_test_data(args)

    # Initialize a model
    model = models.get_model(args.model)(size=args.size)

    # load from checkpoint if path specified
    if args.load_path is not None:
        model = model_utils.load_model(model, args.load_path)
    else:
        model = model_utils.load_model(
            model, f'{args.load_dir}/{args.model}_best_val.checkpoint')
    model.eval()

    # Move model to GPU if necessary
    model.to(device)

    # test!
    test_model(
        dev_dl,
        model,
        args,
    )
def main():
    parser = argparse.ArgumentParser()
    add_train_args(parser)
    add_common_args(parser)
    args = parser.parse_args()
    add_experiment(args)
    device = model_utils.get_device()

    # Load dataset from disk
    x_train, y_train_biden, y_train_trump, mask_train, x_dev, y_dev_biden, y_dev_trump, mask_dev, container = model_utils.load_data(
        args.dataset_dir, dev_frac=args.dev_frac, max_entries=args.dataset_cap)
    train_dl = data.DataLoader(
        data.TensorDataset(x_train, y_train_biden, y_train_trump, mask_train),
        batch_size=args.train_batch_size,
        shuffle=True,
    )
    dev_dl = data.DataLoader(
        data.TensorDataset(x_dev, y_dev_biden, y_dev_trump, mask_dev),
        batch_size=args.val_batch_size,
        shuffle=False,
    )

    # Initialize a model
    model = models.get_model(args.model)()

    # load from checkpoint if path specified
    if args.load_path is not None:
        model = model_utils.load_model(model, args.load_path)

    # Move model to GPU if necessary
    model.to(device)

    # Initialize optimizer
    optimizer = optim.Adam(
        model.parameters(),
        lr=args.learning_rate,
        weight_decay=args.weight_decay,
    )

    # Scheduler
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(
        optimizer,
        mode='min',
        factor=0.5,
        patience=30,
        verbose=True,
    )

    os.makedirs(f'{args.save_path}/{args.experiment}')
    print(f'Created new experiment: {args.experiment}')
    save_arguments(args, f'{args.save_path}/{args.experiment}/args.txt')

    # Train!
    trained_model = train_model(
        train_dl,
        dev_dl,
        model,
        optimizer,
        scheduler,
        args,
    )

    # Save trained model
    filename = f'{args.save_path}/{args.experiment}/{model.__class__.__name__}_trained.checkpoint'
    model_utils.save_model(trained_model, filename)