示例#1
0
def visualize_pc(pc, mask=None):
    '''
    pc - 3 x n
    mask - 1 x n, [0,1]
    '''
    pc = pc.permute(1, 0)  # n x 3
    pc = pc.cpu().detach().numpy()
    pcd = geometry.PointCloud()
    pcd.points = utility.Vector3dVector(pc)
    if mask is not None:
        mask = mask / mask.max()
        #mask=mask.pow(0.5) #dilate the values
        mask = mask.expand(3, -1).permute(1, 0)  #nx3
        red = mask.new(mask.shape).zero_()
        red[:, 0] += 1.0
        green = mask.new(mask.shape).zero_()
        green[:, 1] += 1.0
        blue = mask.new(mask.shape).zero_()
        blue[:, 2] += 1.0
        less = ((2 * mask).mul(green) + (1.0 - (2 * mask)).mul(blue)).mul(
            mask.lt(0.5).float())
        more = (((mask - 0.5) * 2).mul(red) +
                (1.0 - ((mask - 0.5) * 2)).mul(green)).mul(
                    mask.ge(0.5).float())
        mask = less + more
        mask_max, _ = mask.max(dim=1, keepdim=True)
        mask = mask.div(mask_max)
        mask = mask.cpu().detach().numpy()
        pcd.colors = utility.Vector3dVector(mask)
        visualization.draw_geometries([pcd])
示例#2
0
def run_intro(args):
    text_list = [
        'Thank you for participating in the study.',
        'You will be shown two hand/object pairs',
        'and asked to choose which one looks more natural.',
        'Note, some of the difference are very subtle,',
        'you may find focusing on the fingertips is helpful.',
        'Please press the indicated key to choose.', ' ',
        'The view perspective can be shifted by:',
        'Clicking and dragging to rotate',
        'Scrolling to zoom, and Ctrl+clicking to pan',
        'You can practice here.', ' ',
        'Press Q to proceed to practice examples'
    ]

    geom_list = []
    for idx, t in enumerate(text_list):
        geom_list.append(
            text_3d(t, pos=[0, -0.04 * idx, 0], font_size=40, density=2))

    o3dv.draw_geometries(geom_list,
                         zoom=0.45,
                         front=[0, 0, 1],
                         lookat=[0.6, -0.2, 0],
                         up=[0, 1, 0])
示例#3
0
def grid_meshes_lists_visulation(pcds, viz=False) -> None:
    """
    Every list contains a list of points clouds to be visualized.
    Every element of the list of list is a point cloud in pcd format
    """
    # First normalize them
    for pcd_list in pcds:
        for index, p in enumerate(pcd_list):
            maxx = np.max(np.array(p.vertices), 0)
            minn = np.min(np.array(p.vertices), 0)
            points = np.array(p.vertices) - np.mean(np.array(p.vertices), 0).reshape(1, 3)
            points = points / np.linalg.norm(maxx - minn)
            p.vertices = Vector3dVector(points)

    new_meshes = []
    for j in range(len(pcds)):
        for i in range(len(pcds[j])):
            p = pcds[j][i]
            shift_y = j * 1.2
            shift_x = i * 1.2
            p.vertices = Vector3dVector(
                np.array(p.vertices) + np.array([shift_x, shift_y, 0])
            )
            new_meshes.append(p)
    if viz:
        visualization.draw_geometries(new_meshes)
    return new_meshes
示例#4
0
def draw_registration_result(source, target, transformation):
    source_temp = copy.deepcopy(source)
    target_temp = copy.deepcopy(target)
    source_temp.paint_uniform_color([1, 0.706, 0])
    target_temp.paint_uniform_color([0, 0.651, 0.929])
    source_temp.transform(transformation)
    draw_geometries([source_temp, target_temp])
示例#5
0
def visualize_registration(i, j):
    try:
        from open3d.geometry import PointCloud
        from open3d.utility import Vector3dVector
        from open3d.visualization import draw_geometries
    except:
        from open3d import PointCloud, Vector3dVector, draw_geometries
    model_ply = PLY_FILES[i]
    scene_ply = PLY_FILES[j]
    model_txt = ply2txt(model_ply)
    scene_txt = ply2txt(scene_ply)
    model = np.loadtxt(model_txt)
    scene = np.loadtxt(scene_txt)
    print(model_txt, scene_txt)
    pcloud_model = PointCloud()
    pcloud_model.points = Vector3dVector(model)
    pcloud_model.paint_uniform_color([1, 0, 0])  # red
    pcloud_scene = PointCloud()
    pcloud_scene.points = Vector3dVector(scene)
    pcloud_scene.paint_uniform_color([0, 1, 0])  # green
    draw_geometries([pcloud_model, pcloud_scene])
    res = run_rigid_pairwise(BINARY_FULLPATH, model, scene, CONFIG_FILE)
    print(res)
    transformed = np.loadtxt(os.path.join(TMP_PATH, 'transformed_model.txt'))
    pcloud_transformed = PointCloud()
    pcloud_transformed.points = Vector3dVector(transformed)
    pcloud_transformed.paint_uniform_color([0, 0, 1])  # blue
    draw_geometries([pcloud_transformed, pcloud_scene])
示例#6
0
def try_open3d_viz(args):
    """Try to visualize sparse 3d point cloud using open3d"""
    try:
        from open3d import visualization as viz
        from open3d import io
        ply_file = os.path.join(args.outdir, "sparse_inliers.ply")
        pc = io.read_point_cloud(ply_file)
        viz.draw_geometries([pc])
    except ImportError as err:
        print("Failed to import `open3d` package, can not visualize"
              " point-cloud, try installing open3d or use meshlab to visualize"
              " ply file.")
示例#7
0
def run_exit(args):
    text_list = [
        'The study is completed.', 'Thank you for participating!', ' ',
        'Press Q to exit.'
    ]

    geom_list = []
    for idx, t in enumerate(text_list):
        geom_list.append(
            text_3d(t, pos=[0, -0.04 * idx, 0], font_size=40, density=2))

    o3dv.draw_geometries(geom_list,
                         zoom=0.45,
                         front=[0, 0, 1],
                         lookat=[0.3, -0.05, 0],
                         up=[0, 1, 0])
示例#8
0
    def estimate_3Dpose(self):

        sample_pcd = self.sample_pcd
        XYZ_model = self.XYZ_model
        diameter_model = self.diameter_model
        model_pcd = self.model_pcd

        numpy_point_data = np.asarray(sample_pcd.points)
        print(numpy_point_data.shape)
        XYZ_min = np.min(numpy_point_data, axis=0)
        XYZ_max = np.max(numpy_point_data, axis=0)
        print('minimum and maximum: ', XYZ_min, XYZ_max)
        diameter_sample = np.sqrt(
            np.square(XYZ_max[0] - XYZ_min[0]) +
            np.square(XYZ_max[1] - XYZ_min[1]) +
            np.square(XYZ_max[2] - XYZ_min[2]))
        numpy_point_data = np.reshape(numpy_point_data, (-1, 3))
        sample_pcd.points = utility.Vector3dVector(numpy_point_data)
        # sample_pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])  # Flip it, otherwise the
        # pointcloud will be upside down
        sample_pcd.paint_uniform_color([0, 1, 0])

        ratio = diameter_sample / diameter_model  # Using diameter from pointcloud
        print('Ratio = mushroom_diameter/mushroom_diameter_model: ', ratio)
        XYZ_model_new = XYZ_model * ratio  # Resize the model points to match that of a sample
        model_pcd.points = utility.Vector3dVector(XYZ_model_new)
        # model_pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])  # Flip it, otherwise the
        # pointcloud will be upside down
        model_pcd.paint_uniform_color([1, 0, 0])
        visualization.draw_geometries([model_pcd + sample_pcd])
        rotation_matrix_estimated, quaternion_estimated = self.combined_registration(
            model_pcd, sample_pcd)
        print('rotation_matrix_estimated and rotation_vector_estimated: ',
              rotation_matrix_estimated)
        model_pcd.paint_uniform_color([1, 0, 0])
        sample_pcd.paint_uniform_color([0, 1, 0])
        visualization.draw_geometries([model_pcd + sample_pcd])
示例#9
0
        return (0, 0, 1)
    if x < 15:
        return (0, 1, 0)
    else:
        return (1, 0, 0)


first_px = get_first_plan(pc, "BreR")

ply = get_first_surface(first_px)
ply.compute_vertex_normals()
origin = TriangleMesh.create_coordinate_frame(size=100, origin=[0, 0, 0])
# open3d.draw_geometries([ply, origin])

pcd = PointCloud()
pcd2 = PointCloud()
pcd.points = ply.vertices
xyz = np.asarray(pcd.points) + 10
pcd2.points = Vector3dVector(xyz)
# open3d.draw_geometries([pcd, pcd2])
start = time.time()
diff = pcd2.compute_point_cloud_distance(pcd)
end = time.time()

print(f"The difference calculation was computed in {end - start} seconds.")
print(f"The min and max differences are {min(diff)} and {max(diff)}")

ply.vertex_colors = Vector3dVector(np.array(list(map(mag_to_color, diff))))

draw_geometries([ply, pcd2, origin], width=1080, height=640)
示例#10
0
    def vis_hand_object(self):
        """Runs Open3D visualizer window"""

        hand_mesh, obj_mesh = self.get_o3d_meshes(hand_contact=True)
        o3dv.draw_geometries([hand_mesh, obj_mesh])
示例#11
0
def show_contactmap(p_num,
                    intent,
                    object_name,
                    mode='simple',
                    joint_sphere_radius_mm=4.0,
                    bone_cylinder_radius_mm=2.5,
                    bone_color=np.asarray([224.0, 172.0, 105.0]) / 255):
    """
  mode =
  simple: just contact map
  simple_hands: skeleton + contact map
  semantic_hands_fingers: skeleton + contact map colored by finger proximity
  semantic_hands_phalanges: skeleton + contact map colored by phalange proximity
  """
    cp = ContactPose(p_num, intent, object_name)

    # read contactmap
    mesh = o3dio.read_triangle_mesh(cp.contactmap_filename)
    mesh.compute_vertex_normals()

    geoms = []
    # apply simple colormap to the mesh
    if 'simple' in mode:
        mesh = apply_colormap_to_mesh(mesh)
        geoms.append(mesh)

    if 'hands' in mode:
        # read hands
        line_ids = mutils.get_hand_line_ids()
        joint_locs = cp.hand_joints()

        # show hands
        hand_colors = [[0, 1, 0], [1, 0, 0]]
        for hand_idx, hand_joints in enumerate(joint_locs):
            if hand_joints is None:
                continue

            # joint locations
            for j in hand_joints:
                m = o3dg.TriangleMesh.create_sphere(
                    radius=joint_sphere_radius_mm * 1e-3, resolution=10)
                T = np.eye(4)
                T[:3, 3] = j
                m.transform(T)
                m.paint_uniform_color(hand_colors[hand_idx])
                m.compute_vertex_normals()
                geoms.append(m)

            # connecting lines
            for line_idx, (idx0, idx1) in enumerate(line_ids):
                bone = hand_joints[idx0] - hand_joints[idx1]
                h = np.linalg.norm(bone)
                l = o3dg.TriangleMesh.create_cylinder(
                    radius=bone_cylinder_radius_mm * 1e-3,
                    height=h,
                    resolution=10)
                T = np.eye(4)
                T[2, 3] = -h / 2.0
                l.transform(T)
                T = mutils.rotmat_from_vecs(bone, [0, 0, 1])
                T[:3, 3] = hand_joints[idx0]
                l.transform(T)
                l.paint_uniform_color(bone_color)
                l.compute_vertex_normals()
                geoms.append(l)

        if 'semantic' in mode:
            n_lines_per_hand = len(line_ids)
            n_parts_per_finger = 4
            # find line equations for hand parts
            lines = []
            for hand_joints in joint_locs:
                if hand_joints is None:
                    continue
                for line_id in line_ids:
                    a = hand_joints[line_id[0]]
                    n = hand_joints[line_id[1]] - hand_joints[line_id[0]]
                    n /= np.linalg.norm(n)
                    lines.append(np.hstack((a, n)))
            lines = np.asarray(lines)

            ops = np.asarray(mesh.vertices)
            d_lines = mutils.p_dist_linesegment(ops, lines)
            line_idx = np.argmin(d_lines, axis=1) % n_lines_per_hand
            finger_idx, part_idx = divmod(line_idx, n_parts_per_finger)
            if 'phalanges' in mode:
                mesh = apply_semantic_colormap_to_mesh(mesh, part_idx)
            elif 'fingers' in mode:
                mesh = apply_semantic_colormap_to_mesh(mesh, finger_idx)
            geoms.append(mesh)
    elif 'mano' in mode:
        for hand in cp.mano_meshes():
            if hand is None:
                continue
            mesh = o3dg.TriangleMesh()
            mesh.vertices = o3du.Vector3dVector(hand['vertices'])
            mesh.triangles = o3du.Vector3iVector(hand['faces'])
            mesh.paint_uniform_color(bone_color)
            mesh.compute_vertex_normals()
            geoms.append(mesh)

    o3dv.draw_geometries(geoms)
示例#12
0
def run_samples(fine_samples, args):
    for idx in range(3):
        sample = fine_samples[idx]
        hand_in, obj_in = get_meshes(sample['hand_verts_in'],
                                     sample['hand_faces'], sample['obj_verts'],
                                     sample['obj_faces'])
        hand_out, obj_out = get_meshes(sample['hand_verts_out'],
                                       sample['hand_faces'],
                                       sample['obj_verts'],
                                       sample['obj_faces'])
        hand_out.translate((0.0, 0.2, 0.0))
        obj_out.translate((0.0, 0.2, 0.0))

        if idx == 0:
            hand_in.translate((0.05, 0, 0.01))
            lbl_a = text_3d('A', pos=[-0.2, 0.0, 0], font_size=40, density=2)
            lbl_b = text_3d('B', pos=[-0.2, 0.2, 0], font_size=40, density=2)
        elif idx == 1:
            hand_out.translate((0, 0.01, -0.05))
            lbl_a = text_3d('A', pos=[-0.2, 0.2, 0], font_size=40, density=2)
            lbl_b = text_3d('B', pos=[-0.2, 0.0, 0], font_size=40, density=2)
        elif idx == 2:
            hand_out.translate((0, 0.02, 0))
            lbl_a = text_3d('A', pos=[-0.2, 0.0, 0], font_size=40, density=2)
            lbl_b = text_3d('B', pos=[-0.2, 0.2, 0], font_size=40, density=2)

        lbl_instr_1 = text_3d(
            'Practice example {}. Which looks more like the way'.format(idx +
                                                                        1),
            pos=[-0.25, 0.33, 0],
            font_size=25,
            density=2)
        lbl_instr_2 = text_3d('a person would grasp the object? Press A or B',
                              pos=[-0.25, 0.30, 0],
                              font_size=25,
                              density=2)

        geom_list = [
            hand_in, obj_in, hand_out, obj_out, lbl_a, lbl_b, lbl_instr_1,
            lbl_instr_2
        ]

        def press_a(vis):
            vis.close()

        def press_b(vis):
            vis.close()

        key_to_callback = {ord("A"): press_a, ord("B"): press_b}
        o3dv.draw_geometries_with_key_callbacks(geom_list, key_to_callback)

    text_list = [
        'End of practice samples.', ' ', 'Press Q to proceed to the study.'
    ]

    geom_list = []
    for idx, t in enumerate(text_list):
        geom_list.append(
            text_3d(t, pos=[0, -0.04 * idx, 0], font_size=40, density=2))

    o3dv.draw_geometries(geom_list,
                         zoom=0.45,
                         front=[0, 0, 1],
                         lookat=[0.3, -0.05, 0],
                         up=[0, 1, 0])
示例#13
0
def test_GFP():
    from open3d import visualization, geometry, utility
    test_points, test_labels = load_h5(test_h5_path)

    nr_points = ModellingParameters.NUM_POINTS_UPSAMPLE
    model = GFPNet(nr_points)

    model.compile(optimizer='adam',
                  loss='mean_squared_error',
                  metrics=['mse', 'accuracy'])

    # print the model summary
    model.load_weights(net_params.PATH_TO_WEIGHTS)
    print(model.summary())
    primitive_path = PATHS.PATH_TO_PRIMITIVES.CAR.root + "CarPrimitive_15_500.off"
    io_primitive = IoPrimitive(primitive_path)
    io_primitive.load_primitive(normalize=False)

    pathlist = Path(PATHS.NETWORK.root).glob('*.{0}'.format('off'))
    for path in pathlist:
        lidar_cloud_path = str(path)
        file_name = lidar_cloud_path.split("\\")[-1]
        label_path = PATHS.NETWORK.TEST_MODELLED + file_name
        cloud_path = PATHS.NETWORK.TEST_CLOUD + file_name

        observation_points = read_off_file(cloud_path)
        io_observation_cloud = IoObservation(observation_points)
        io_primitive.scale_relative_to(io_observation_cloud)
        io_primitive.align_primitive_on_z_axis(io_observation_cloud)
        io_primitive.compute_normals()

        modelled_primitive_points = read_off_file(label_path)
        io_modelled_primitive = IoObservation(modelled_primitive_points)

        eval_X, boolIndexes = make_samples(io_observation_cloud,
                                           io_primitive,
                                           io_modelled_primitive,
                                           usePrimitivePoints=False,
                                           generate_for='test')
        eval_X = upsample_point_set(
            sample_collection=eval_X,
            points_count=ModellingParameters.NUM_POINTS_UPSAMPLE)

        cloud_bef = geometry.PointCloud()
        cloud_bef.points = utility.Vector3dVector(
            np.asarray(io_primitive.point_cloud.points))
        cloud_bef.normals = utility.Vector3dVector(
            np.asarray(io_primitive.point_cloud.normals))
        cloud_bef.paint_uniform_color([255, 0, 0])

        cloud_lidar = geometry.PointCloud()
        cloud_lidar.points = utility.Vector3dVector(
            np.asarray(io_observation_cloud.point_cloud))
        cloud_lidar.paint_uniform_color([0, 0, 0])

        cloud_modelled = geometry.PointCloud()
        cloud_modelled.points = utility.Vector3dVector(
            np.asarray(io_modelled_primitive.point_cloud.points))
        cloud_modelled.paint_uniform_color([255, 255, 0])

        visualization.draw_geometries([cloud_bef, cloud_lidar, cloud_modelled])
        final_pts = []
        idx = 0
        for i in range(0, len(eval_X)):
            pred = eval_X[i].reshape(-1, nr_points, 3)
            points = model.predict(pred)
            final_pts.append(points)
            idx = i

        final_pts = np.reshape(final_pts, newshape=(len(final_pts), 3))
        print('Final pts len : ', len(final_pts))
        final_pts = [
            point * ModellingParameters.CAR.SCALE for point in final_pts
        ]
        true_indexes = [i for i, val in enumerate(boolIndexes) if val]
        for i in true_indexes:
            cloud_bef.colors[i] = [0, 255, 0]
        import pclpy.pcl.point_types as ptype
        aux = ptype.PointXYZ()
        new_points = []
        for i in range(0, len(final_pts)):
            val = io_primitive.point_cloud.points[true_indexes[i]] + (
                final_pts[i] - ModellingParameters.NORMALIZATION_CENTER)
            aux.x = val[0]
            aux.y = val[1]
            aux.z = val[2]
            new_points.append(val)
            # transform_neighbor_points_for(i, aux, srcPrimitive, None)
        cloud_aft = geometry.PointCloud()
        cloud_aft.points = utility.Vector3dVector(new_points)
        cloud_aft.paint_uniform_color([0, 0, 255])
        # cloud.normals = utility.Vector3dVector(cloud_points[:,3:6])
        visualization.draw_geometries(
            [cloud_bef, cloud_aft, cloud_lidar, cloud_modelled])
示例#14
0
def run_pairwise_registration(i, j, visualize=False, icp_refine=False):
    model_depth = cv2.imread(DEPTH_FILES[i], cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
    scene_depth = cv2.imread(DEPTH_FILES[j], cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
    model = convert_depth_to_pcloud(model_depth)
    scene = convert_depth_to_pcloud(scene_depth)
    pcloud_model = PointCloud()
    pcloud_model.points = Vector3dVector(model)
    pcloud_scene = PointCloud()
    pcloud_scene.points = Vector3dVector(scene)

    if visualize:
        pcloud_model.paint_uniform_color([1, 0, 0]) # red
        pcloud_scene.paint_uniform_color([0, 1, 0]) # green
        draw_geometries([pcloud_model, pcloud_scene])

    down_model = voxel_down_sample(pcloud_model, voxel_size=0.065)
    down_scene = voxel_down_sample(pcloud_scene, voxel_size=0.065)
    model_pts = np.array(down_model.points)
    scene_pts = np.array(down_scene.points)

    res = run_rigid_pairwise(BINARY_FULLPATH, model_pts, scene_pts, CONFIG_FILE)
    # http://www.boris-belousov.net/2016/12/01/quat-dist/
    print("Transformation estimated by gmmreg:")
    print(res[1])
    gt = np.dot(np.linalg.inv(GT_POS[j]), GT_POS[i])
    print("Transformation from ground truth:")
    print(gt)
    theta_before = np.arccos((np.trace(gt[:3,:3]) - 1) * 0.5)
    print("pose difference (in degrees) before alignment:", theta_before * 180 / np.pi)
    R = np.dot(gt[:3,:3].T, res[1][:3,:3])
    theta_after = np.arccos((np.trace(R) - 1) * 0.5)
    print("pose difference (in degrees) after alignment:", theta_after * 180 / np.pi)
    transformed = np.loadtxt(os.path.join(TMP_PATH, 'transformed_model.txt'))
    pcloud_transformed = PointCloud()
    pcloud_transformed.points = Vector3dVector(transformed)
    pcloud_transformed.paint_uniform_color([0, 0, 1]) # blue
    if visualize:
        draw_geometries([pcloud_transformed, down_model, down_scene])
        matrix = np.loadtxt(os.path.join(TMP_PATH, 'final_rigid_matrix.txt'))
        transformed = np.dot(model, matrix[:3,:3].T) + matrix[:3, 3].T
        pcloud_transformed.points = Vector3dVector(transformed)
        pcloud_transformed.paint_uniform_color([0, 0, 1]) # blue
        draw_geometries([pcloud_transformed, pcloud_scene])

    if icp_refine:
        print("Apply point-to-point ICP")
        threshold = 0.02
        trans_init = matrix
        t1 = time.time()
        reg_p2p = registration_icp(
                down_model, down_scene, threshold, trans_init,
                TransformationEstimationPointToPoint())
        t2 = time.time()
        print("ICP Run time : %s seconds" % (t2 - t1))
        print(reg_p2p)
        print("Transformation by ICP is:")
        print(reg_p2p.transformation)
        print("")
        R = np.dot(gt[:3,:3].T, reg_p2p.transformation[:3,:3])
        theta = np.arccos((np.trace(R) - 1) * 0.5)
        print("pose difference (in degrees) after icp-refinement:", theta * 180 / np.pi)
        if visualize:
            draw_registration_result(pcloud_model, pcloud_scene, reg_p2p.transformation)
    return res, theta_before * 180 / np.pi, theta_after * 180 / np.pi