示例#1
0
                                                offset=offset)

ref_list_len = len(ref_id_list)

if count == -1:
    count = ref_list_len
#pose_estimate_list_loaded_len = len(pose_estimate_list_loaded)

for i in range(start_count, count):

    ref_id = ref_id_list[i]
    target_id = target_id_list[i]

    SE3_ref_target = Parser.generate_ground_truth_se3(groundtruth_dict,
                                                      image_groundtruth_dict,
                                                      ref_id,
                                                      target_id,
                                                      post_process_object=None)
    im_greyscale_reference, im_depth_reference = Parser.generate_image_depth_pair_match(
        dataset_root, rgb_text, depth_text, match_text, ref_id)
    im_greyscale_target, im_depth_target = Parser.generate_image_depth_pair_match(
        dataset_root, rgb_text, depth_text, match_text, ref_id)

    post_process_gt.post_process_in_mem(SE3_ref_target)

    ground_truth_acc = np.matmul(ground_truth_acc, SE3_ref_target)
    ground_truth_list.append(ground_truth_acc)

    ref_image_list.append((im_greyscale_reference, im_depth_reference))
    target_image_list.append((im_greyscale_target, im_depth_target))
示例#2
0
#rgb_id_ref = 1305031108.743502
#rgb_id_target = 1305031108.775493
#rgb_id_target = 1305031108.811244

#rgb_id_ref = 1305031119.615017
#rgb_id_target = 1305031119.647903

#rgb_id_ref = 1305031106.675279
#rgb_id_target = 1305031106.711508

image_groundtruth_dict = dict(associate.match(rgb_text, groundtruth_text))

groundtruth_ts_ref = image_groundtruth_dict[rgb_id_ref]
groundtruth_data_ref = associate.return_dictionary_data(
    groundtruth_text, groundtruth_ts_ref)
SE3_ref = Parser.generate_se3_from_groundtruth(groundtruth_data_ref)

groundtruth_ts_target = image_groundtruth_dict[rgb_id_target]
groundtruth_data_target = associate.return_dictionary_data(
    groundtruth_text, groundtruth_ts_target)
SE3_target = Parser.generate_se3_from_groundtruth(groundtruth_data_target)

SE3_ref_target = SE3.pose_pose_composition_inverse(SE3_ref, SE3_target)

rgb_ref_file_path, depth_ref_file_path = associate.return_rgb_depth_from_rgb_selection(
    rgb_text, depth_text, match_text, dataset_root, rgb_id_ref)
rgb_target_file_path, depth_target_file_path = associate.return_rgb_depth_from_rgb_selection(
    rgb_text, depth_text, match_text, dataset_root, rgb_id_target)

im_greyscale_reference = cv2.imread(
    rgb_ref_file_path, cv2.IMREAD_GRAYSCALE).astype(Utils.image_data_type)