Beispiel #1
0
def testing(dataloader, vis_para):
    forward_visual_result = []
    ground_truth = []
    epoch_loss = 0
    for i_batch, sample_batched in enumerate(dataloader):
        model.zero_grad()
        batch_loss, result = pad_update(
            model,
            sample_batched,
            with_attention_flag=args.attention_flag,
            pad_flag=flag.pad_flag)

        epoch_loss += batch_loss
        temp_f = weighted_mean_motion(result, args.attention_flag)
        gt_f_12 = sample_batched['motion_f_01'].numpy()
        forward_visual_result = np.append(forward_visual_result, temp_f)
        ground_truth = np.append(ground_truth, gt_f_12)
    epoch_loss_mean = epoch_loss * args.batch_size
    forward_result = forward_result.reshape(-1, 6) * kitti_dataset.motion_stds
    ground_truth = ground_truth.reshape(-1, 6) * kitti_dataset.motion_stds
    forward_visual_result_m = tf.ses2poses(forward_visual_result)
    ground_truth_m = tf.ses2poses(ground_truth)
    errors = evaluate.evaluate(ground_truth_m, forward_visual_result_m)
    vis_para.vis.plot_path_with_gt(forward_visual_result_m, ground_truth_m,
                                   vis_para.win_number, vis_para.title)
    return errors, epoch_loss_mean
Beispiel #2
0
    print(motion_files_path)
    print(path_files_path)
    # transform
    transforms_ = [
        transforms.Resize((188, 620)),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ]
    kitti_dataset = data.data_loader.SepeDataset(
        path_to_poses_files=motion_files_path,
        path_to_image_lists=path_files_path,
        transform_=transforms_)

    # testing data
    vo = np.loadtxt('dataset/robocar_pose.se')
    vo_m = tf.ses2poses(vo[:, 2:])
    np.savetxt('dataset/robocar_pose.txt', vo_m)
    motion_files_path_test = args.motion_path_test
    path_files_path_test = args.image_list_path_test
    print(motion_files_path_test)
    print(path_files_path_test)
    # transform
    transforms_ = [
        transforms.Resize((188, 620)),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ]

    kitti_dataset_test = data.data_loader.SepeDataset(
        path_to_poses_files=motion_files_path_test,
        path_to_image_lists=path_files_path_test,
Beispiel #3
0
data_path = sys.argv[1]
trans = np.load(data_path + '/trans.npy')
quats = np.load(data_path + '/quats.npy')
losses = np.load(data_path + '/patch_losses.npy')
ground_truth = np.loadtxt(data_path + '/09.txt')

print(losses.shape, trans.shape)
assert trans.shape[0] == losses.shape[0] and quats.shape[0] == losses.shape[0]
all_se = np.zeros((trans.shape[0], 6))
for j in range(0, 1):
    for i in range(0, trans.shape[0]):
        frame_loss = losses[i, :, :]
        min_loss = np.min(frame_loss)
        min_posi = np.argmin(frame_loss)
        best_tran = trans[i, min_posi, :]
        best_quat = quats[i, min_posi, :]
        best_so = tf.quat2so(best_quat)
        best_mov_so = np.concatenate((best_tran, best_so))
        #best_tran_ = trans[i,30+min_posi,:]
        #best_quat_ = quats[i,30+min_posi,:]
        #best_so_   = tf.quat2so(best_quat_)
        #best_mov_so_ = np.concatenate((best_tran_,best_so_))
        #all_se[i,:] = (-best_mov_so_+best_mov_so)/2
        print(best_mov_so)
        all_se[i, :] = +best_mov_so
    poses = tf.ses2poses(all_se)
    plt.plot(poses[:, 3], poses[:, 11])
    plt.plot(ground_truth[:, 3], ground_truth[:, 11])

    plt.show()
Beispiel #4
0
    #np.save('trans.npy',all_trans)
    #np.save('quats.npy',all_quats)
    data_length = len(
        kitti_dataset_test) // input_batch_size * input_batch_size
    forward_visual_result = forward_visual_result.reshape(
        -1,
        6) * kitti_dataset_test.motion_stds  #+kitti_dataset_test.motion_means
    backward_visual_result = backward_visual_result.reshape(
        -1,
        6) * kitti_dataset_test.motion_stds  #+kitti_dataset_test.motion_means
    forward_visual_opti = forward_visual_opti.reshape(
        -1, 6) * kitti_dataset_test.motion_stds
    ground_truth = ground_truth.reshape(
        -1, 6
    ) * kitti_dataset_test.motion_stds  # forward_visual_result = forward_visual_result*motion_stds_np+motion_means_np
    forward_visual_result_m = tf.ses2poses(forward_visual_result)
    backward_visual_result_m = tf.ses2poses(-backward_visual_result)
    forward_visual_opti_m = tf.ses2poses(forward_visual_opti)
    np.savetxt('../saved_data/' + args.model_name + '_path_result.txt',
               forward_visual_result_m)
    np.savetxt('../saved_data/' + args.model_name + '_path_result_opti.txt',
               forward_visual_opti_m)
    np.savetxt('../saved_data/' + args.model_name + '_reli_error.txt',
               reliability_error)
    ground_truth_m = tf.ses2poses(ground_truth)
    #vis.plot_three_path_with_gt(forward_visual_result_m,backward_visual_result_m,forward_visual_opti_m,ground_truth_m,3,'testing set forward')
    vis.plot_path_with_gt(forward_visual_opti_m, ground_truth_m, 15,
                          'testing set forward')

    print(evaluate.evaluate(ground_truth_m, forward_visual_result_m))
    print(np.mean(evaluate.evaluate(ground_truth_m, forward_visual_opti_m), 1))