Exemplo n.º 1
0
        solver_manager.join()  # wait to complete
        print('iteration ', i + 1, ' complete')

        motion_cov_inv = solver_manager.motion_cov_inv_final
        #motion_cov_inv = np.add(motion_cov_inv,solver_manager.motion_cov_inv_final)
        twist_prior = np.multiply(1.0,solver_manager.twist_final)
        #twist_prior = np.add(twist_prior,solver_manager.twist_final)
        #se3_estimate_acc = np.matmul(solver_manager.SE3_est_final,se3_estimate_acc)
        se3_estimate_acc = np.matmul(se3_estimate_acc,solver_manager.SE3_est_final)
        pose_estimate_list.append(se3_estimate_acc)
        vo_twist_list.append(solver_manager.twist_final)
        #print(solver_manager.twist_final)
    elif only_steering:
        vo_twist_list.append(linear_twist)
print("visualizing..")

if calc_vo or only_steering:
    FileIO.write_vo_output_to_file(name,info,output_dir_path,vo_twist_list)

visualizer.visualize_ground_truth(clear=True,draw=False)
if calc_vo:
    visualizer.visualize_poses(pose_estimate_list, draw= False)
visualizer.show()







Exemplo n.º 2
0
ground_truth_acc = np.identity(4, Utils.matrix_data_type)
#ground_truth_acc[0:3,0:3] = so3_prior
se3_estimate_acc = np.identity(4, Utils.matrix_data_type)
se3_estimate_acc_2 = np.identity(4, Utils.matrix_data_type)
se3_estimate_acc_3 = np.identity(4, Utils.matrix_data_type)
se3_estimate_acc_4 = np.identity(4, Utils.matrix_data_type)
pose_estimate_list = []
pose_estimate_list_2 = []
pose_estimate_list_3 = []
pose_estimate_list_4 = []
ground_truth_list = []
ref_image_list = []
target_image_list = []
encoder_list = []
vo_twist_list = []
pose_estimate_list_loaded, encoder_list_loaded = FileIO.load_vo_from_file(
    data_file_path)
if data_file_2:
    pose_estimate_list_loaded_2, encoder_list_loaded_2 = FileIO.load_vo_from_file(
        data_file_path_2)
if data_file_3:
    pose_estimate_list_loaded_3, encoder_list_loaded_3 = FileIO.load_vo_from_file(
        data_file_path_3)
if data_file_4:
    pose_estimate_list_loaded_4, encoder_list_loaded_4 = FileIO.load_vo_from_file(
        data_file_path_4)

start = ListGenerator.get_index_of_id(start_idx, rgb_files)
ref_id_list, target_id_list, ref_files_failed_to_load = ListGenerator.generate_files_to_load_match(
    rgb_files,
    start=start,
    max_count=max_count,
Exemplo n.º 3
0
alpha_step = float(parameters[3])
image_range_offset_start = bool(parameters[5])
use_robust = bool(parameters[6])
use_motion_prior = bool(parameters[7])
use_ackermann = bool(parameters[8])

ground_truth_acc = np.identity(4, Utils.matrix_data_type)
#ground_truth_acc[0:3,0:3] = so3_prior
se3_estimate_acc = np.identity(4, Utils.matrix_data_type)
pose_estimate_list = []
ground_truth_list = []
ref_image_list = []
target_image_list = []
encoder_list = []
vo_twist_list = []
pose_estimate_list_loaded, encoder_list_loaded = FileIO.load_vo_from_file(
    data_file_path)

start = ListGenerator.get_index_of_id(start_idx, rgb_files)

ref_id_list, target_id_list, ref_files_failed_to_load = ListGenerator.generate_files_to_load_match(
    rgb_files,
    start=start,
    max_count=max_count,
    offset=offset,
    ground_truth_dict=image_groundtruth_dict,
    match_dict=match_dict,
    reverse=False)

dt_list = ListGenerator.generate_time_step_list(rgb_files,
                                                start=start,
                                                max_count=max_count,