コード例 #1
0
 def translate_z_neg(vis):
     global CURRENT_INDEX, offset
     translation = c3D.makeTranslation4x4(np.asarray([0., 0., -offset]))
     pcd_list[CURRENT_INDEX].transform(translation)
     vis.update_geometry(pcd_list[CURRENT_INDEX])
     all_transformations[CURRENT_INDEX][0:3, 3] = translation[
         0:3, 3] + all_transformations[CURRENT_INDEX][0:3, 3]
コード例 #2
0
    cloud_m = o3.read_point_cloud(args.model)
    """ if you use object model with meter scale, try this code to convert meter scale."""
    #cloud_m = c3D.Scaling( cloud_m, 0.001 )

    cloud_m_ds = o3.voxel_down_sample(cloud_m, voxel_size)
    """Loading of the initial transformation"""
    initial_trans = np.identity(4)
    if os.path.exists(args.init):
        initial_trans = c3D.load_transformation(args.init)
        print('Use initial transformation\n', initial_trans)
        all_transformation = np.dot(initial_trans, all_transformation)
    else:
        # if initial transformation is not avairable,
        # the object model is moved to its center.
        cloud_m_c, offset = c3D.Centering(cloud_m_ds)
        mat_centering = c3D.makeTranslation4x4(-1.0 * offset)
        all_transformation = np.dot(mat_centering, all_transformation)

    CLOUD_ROT = copy.deepcopy(cloud_m_ds)
    CLOUD_ROT.transform(all_transformation)

    mapping = Mapping('./data/realsense_intrinsic.json')
    img_mapped = mapping.Cloud2Image(CLOUD_ROT)
    """Mouse event"""
    window_name = '6DoF Pose Annotator'
    cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
    cv2.setMouseCallback(window_name, mouse_event,
                         [window_name, im_color, im_depth, mapping])

    generateImage(mapping, im_color)
    while (True):
コード例 #3
0
    global all_pcd_list
    for path in gt_paths:
        all_pcd_list.append(o3d.io.read_triangle_mesh(path))


all_pcd_list = []
load_gt_meshes(obj_gt_paths)

# Object parameters
CURRENT_INDEX = 0
MAX_OBJ = len(obj_gt_paths)

# Annotation parameters
offset = 0.01
rot_step = 0.1 * np.pi
all_transformations = [c3D.makeTranslation4x4([0, 0, 0]) for _ in obj_gt_paths]

# pipline parameters
seq = 0
step = -1
path = './data/'
scene_mesh = None
visible_objects = True

view_size = 2.
view_bounds = np.array([view_size] * 3)


# Load scene data
def load_step_data(path, seq, step, real_data=False):
    if not real_data: