Exemple #1
0
def main(args: argparse.Namespace) -> None:
    """
    Run the training using the given arguments.
    """
    model = LieDetector(args.input_size, args.hidden_size)

    for name, _ in model.named_parameters():
        if name.startswith("bias"):
            bias = getattr(model, name)
            n = bias.size(0)
            start, end = n // 4, n // 2
            bias.data[start:end].fill_(-1.)

    if not args.no_cuda and torch.cuda.is_available():
        args.device = torch.device("cuda")
    else:
        args.device = torch.device("cpu")

    model = model.to(args.device)
    inputs, labels = pool_data(args.source)
    train_dataset, val_dataset, test_dataset = construct_datasets(
        inputs, labels, args.train_split, args.val_split)

    train_dataloader = DataLoader(
        train_dataset, args.batch_size, sampler=SequentialSampler(train_dataset), collate_fn=pad_and_sort_batch)
    val_dataloader = DataLoader(
        val_dataset, args.batch_size, sampler=SequentialSampler(val_dataset), collate_fn=pad_and_sort_batch)
    test_dataloader = DataLoader(
        test_dataset, args.batch_size, sampler=SequentialSampler(test_dataset), collate_fn=pad_and_sort_batch)

    train(model, args, train_dataloader, val_dataloader)

    accuracies = torch.empty(len(test_dataloader))
    for i, (inputs, inputs_lengths, labels) in enumerate(test_dataloader):
        inputs = minmax_normalize(inputs)
        inputs = inputs.permute(1, 0, 2).contiguous()
        logits = model(inputs.float().to(args.device), inputs_lengths.to(args.device))
        accuracies[i] = get_accuracy(logits, labels.to(args.device))

    print(f"Model test accuracy: {accuracies.mean():.3f}")
print 'Loading image data...'
img_data = utils.extract_images(fname='bin/img_data_extend.pkl', only_digits=False)
# img_data = utils.extract_images(fname='bin/img_data.pkl', only_digits=False)
# img_data_sets = dataset.construct_datasets(img_data)
print 'Loading joint motion data...'
fa_data, fa_mean, fa_std = utils.extract_jnt_fa_parms(fname='bin/jnt_ik_fa_data_extend.pkl', only_digits=False)
# fa_data, fa_mean, fa_std = utils.extract_jnt_fa_parms(fname='bin/jnt_fa_data.pkl', only_digits=False)
#normalize data
fa_data_normed = (fa_data - fa_mean) / fa_std

# fa_data_sets = dataset.construct_datasets(fa_data_normed)
print 'Constructing dataset...'
#put them together
aug_data = np.concatenate((img_data, fa_data_normed), axis=1)

data_sets = dataset.construct_datasets(aug_data, validation_ratio=.1, test_ratio=.1)
print 'Start training...'
batch_sizes = [64]
#n_z_array = [3, 5, 10, 20]
n_z_array = [4]
# assoc_lambda_array = [1, 3, 5, 10]
# assoc_lambda_array = [.1, .3, .5]
#assoc_lambda_array = [15, 40]
assoc_lambda_array = [8]
#weights_array = [[2, 1], [5, 1], [10, 1]]
# weights_array=[[30, 1], [50, 1]]
weights_array = [[50, 1]]

plt.style.use('ggplot')

for batch_size, n_z, assoc_lambda, weights in itertools.product(batch_sizes, n_z_array, assoc_lambda_array, weights_array):