offset=offset) ref_list_len = len(ref_id_list) if count == -1: count = ref_list_len #pose_estimate_list_loaded_len = len(pose_estimate_list_loaded) for i in range(start_count, count): ref_id = ref_id_list[i] target_id = target_id_list[i] SE3_ref_target = Parser.generate_ground_truth_se3(groundtruth_dict, image_groundtruth_dict, ref_id, target_id, post_process_object=None) im_greyscale_reference, im_depth_reference = Parser.generate_image_depth_pair_match( dataset_root, rgb_text, depth_text, match_text, ref_id) im_greyscale_target, im_depth_target = Parser.generate_image_depth_pair_match( dataset_root, rgb_text, depth_text, match_text, ref_id) post_process_gt.post_process_in_mem(SE3_ref_target) ground_truth_acc = np.matmul(ground_truth_acc, SE3_ref_target) ground_truth_list.append(ground_truth_acc) ref_image_list.append((im_greyscale_reference, im_depth_reference)) target_image_list.append((im_greyscale_target, im_depth_target))
image_groundtruth_dict = dict(associate.match(rgb_text, groundtruth_text)) #se3_ground_truth_prior = np.transpose(SE3.quaternion_to_s03(0.6132, 0.5962, -0.3311, -0.3986)) se3_ground_truth_prior = SE3.makeS03(0,0,pi) se3_ground_truth_prior = np.append(se3_ground_truth_prior,np.zeros((3,1),dtype=Utils.matrix_data_type),axis=1) se3_ground_truth_prior = SE3.append_homogeneous_along_y(se3_ground_truth_prior) #se3_ground_truth_prior = SE3.invert(se3_ground_truth_prior) se3_ground_truth_prior[0:3,3] = 0 for i in range(0, len(ref_id_list)): ref_id = ref_id_list[i] target_id = target_id_list[i] SE3_ref_target = Parser.generate_ground_truth_se3(groundtruth_text,image_groundtruth_dict,ref_id,target_id,None) im_greyscale_reference, im_depth_reference = Parser.generate_image_depth_pair(dataset_root,rgb_text,depth_text,match_text,ref_id) im_greyscale_target, im_depth_target = Parser.generate_image_depth_pair(dataset_root,rgb_text,depth_text,match_text,target_id) ground_truth_acc = np.matmul(SE3_ref_target,ground_truth_acc) ground_truth_list.append(ground_truth_acc) ref_image_list.append((im_greyscale_reference, im_depth_reference)) target_image_list.append((im_greyscale_target, im_depth_target)) im_greyscale_reference_1, im_depth_reference_1 = ref_image_list[0] (image_height, image_width) = im_greyscale_reference_1.shape se3_identity = np.identity(4, dtype=Utils.matrix_data_type) # image gradient induces a coordiante system where y is flipped i.e have to flip it here intrinsic_identity = Intrinsic.Intrinsic(-517.3, -516.5, 318.6, 239.5) # freiburg_1