Exemplo n.º 1
0
data_dir = os.path.join(testdir, '..', '..', 'data', 'dataset')
batch_size = 1
subseq_length = 50
step_size = 1

# Separate the sequences for which there is ground truth into test
# and train according to the paper's partition.
train_seqs = ['00', '02', '08', '09']
test_seqs = ['03', '04', '05', '06', '07', '10']

# Create a data loader to get batches one epoch at a time
epoch_data_loader = Epoch(datadir=data_dir,
                          flowdir=os.path.join(data_dir, "flows"),
                          train_seq_nos=train_seqs,
                          test_seq_nos=test_seqs,
                          window_size=subseq_length,
                          step_size=step_size,
                          batch_size=batch_size)

# For every sequence, load in the ground truth partitioned poses,
# reconstruct the partitions into one long sequence, and output
# to a file in test_results/sanity_check. This should be the
# identity function.
out_dir = os.path.join(testdir, '..', '..', 'test_results', 'sanity_check')
for seq_num in train_seqs + test_seqs:
    pose_labels = np.array(
        [y for x, y in epoch_data_loader.get_testing_samples(seq_num)])

    subseq_preds_to_full_pred(pose_labels,
                              os.path.join(out_dir, seq_num + '.csv'))
Exemplo n.º 2
0
print("[Batch] X.shape = {}".format(X.shape))
print("[Batch] Y.shape = {}".format(Y.shape))

random_sample_index = np.random.randint(0, len(X))

sample = X[random_sample_index]
label = Y[random_sample_index]

print("[Sample] X.shape = {}".format(sample.shape))
print("[Sample] Y.shape = {}".format(label.shape))

print("Example flow pixel value: {}".format(sample[0, 0, 0, 0:]))

print("Sample min: {}".format(np.min(sample)))
print("Sample max: {}".format(np.max(sample)))
print("Sample mean: {}".format(np.mean(sample)))

f, axarr = plt.subplots(2, 2)
axarr[0, 0].imshow(sample[0][:, :, 0])
axarr[0, 1].imshow(sample[0][:, :, 1])
axarr[1, 0].imshow(sample[1][:, :, 0])
axarr[1, 1].imshow(sample[1][:, :, 1])

plt.savefig('sample.png')

# Split an entire test sequence
test_x, test_y = epoch_data_loader.get_testing_samples('01')

print(len(test_y))
print(test_y[:10])
Exemplo n.º 3
0
    print("TRAINING FINISHED. SAVING SNAPSHOT TO {}".format(snapshot_path))
    model.save(snapshot_path)

    # And tell tensorboard
    tensorboard.on_train_end(None)

elif args['mode'] == 'test':

    for kitti_seq in test_seqs:

        # Open output file to write pose results to
        out_fname = "test_results/{}.csv".format(kitti_seq)

        losses = []
        estimated_poses = []
        for X, Y in epoch_data_loader.get_testing_samples(kitti_seq):

            # batch size of 1
            X = X[np.newaxis, :]

            #print("TESTING X SIZE = {}".format(X.shape))

            # get pose estimate
            estimated_batch = model.predict_on_batch(X)

            # TODO: Fix this to be more general for different batch sizes
            estimated_pose = estimated_batch[0]

            estimated_poses.append(estimated_pose)

            # Get testing loss