Example #1
0
 def test_icp_registration(self):
     source = pp.PointCloud(np.random.rand(10, 3), pp.pointcloud.PointXYZField())
     angle = np.deg2rad(10.0)
     trans = np.identity(4)
     trans[0, 0] = np.cos(angle)
     trans[0, 1] = -np.sin(angle)
     trans[1, 0] = np.sin(angle)
     trans[1, 1] = np.cos(angle)
     target = source.transform(trans)
     res = pp.registration.icp_registration(source, target, 0.3)
     np.testing.assert_almost_equal(res, trans)
    # Getting the depth sensor's depth scale (see rs-align example for explanation)
    depth_scale = depth_sensor.get_depth_scale()

    # We will not display the background of objects more than
    #  clipping_distance_in_meters meters away
    clipping_distance_in_meters = 3  # 3 meter
    clipping_distance = clipping_distance_in_meters / depth_scale
    # print(depth_scale)

    # Create an align object
    # rs.align allows us to perform alignment of depth frames to others frames
    # The "align_to" is the stream type to which we plan to align depth frames.
    align_to = rs.stream.color
    align = rs.align(align_to)

    pcd = pp.PointCloud()
    flip_transform = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0],
                               [0, 0, 0, 1]])
    client = pp.VizClient(host=args.host)
    dist_fn = lambda c1, c2: pp.registration.compute_rmse(
        c1.feature, c2.feature, 50)
    assigner = pp.assignment.ClusterAssigner(dist_fn, 0.2)

    try:
        while True:
            cur_params = client.get_parameters()

            # Get frameset of color and depth
            frames = pipeline.wait_for_frames()

            # Align the depth frame to color frame
Example #3
0
    # Getting the depth sensor's depth scale (see rs-align example for explanation)
    depth_scale = depth_sensor.get_depth_scale()

    # We will not display the background of objects more than
    #  clipping_distance_in_meters meters away
    clipping_distance_in_meters = 3  # 3 meter
    clipping_distance = clipping_distance_in_meters / depth_scale
    # print(depth_scale)

    # Create an align object
    # rs.align allows us to perform alignment of depth frames to others frames
    # The "align_to" is the stream type to which we plan to align depth frames.
    align_to = rs.stream.color
    align = rs.align(align_to)

    pcd = pp.PointCloud()
    flip_transform = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0],
                               [0, 0, 0, 1]])
    client = pp.VizClient(host=args.host)
    outfh = h5py.File("sensor_data.h5", "a")
    if "pointclouds" in outfh:
        grp = outfh["pointclouds"]
    else:
        grp = outfh.create_group("pointclouds")

    try:
        while True:
            # Get frameset of color and depth
            frames = pipeline.wait_for_frames()

            # Align the depth frame to color frame
plane_pts = pc.points[mask, :]
not_plane_pts = pc.points[~mask, :]

# Cluster objects
db = DBSCAN(eps=0.1).fit(not_plane_pts)
labels = db.labels_
n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
print("Number of class:", n_clusters)
mask_0 = labels == 0
mask_1 = labels == 1

# Draw results
client = pp.VizClient()
res1 = client.post_pointcloud(
    pp.PointCloud(
        np.c_[plane_pts,
              np.tile([0.0, 0.0, 1.0], (len(plane_pts), 1))],
        pp.pointcloud.PointXYZRGBField()), 'plane')
res2 = client.post_pointcloud(
    pp.PointCloud(
        np.c_[not_plane_pts[mask_0],
              np.tile([0.0, 1.0, 0.0], (len(not_plane_pts[mask_0]), 1))],
        pp.pointcloud.PointXYZRGBField()), 'obj1')
res3 = client.post_pointcloud(
    pp.PointCloud(
        np.c_[not_plane_pts[mask_1],
              np.tile([1.0, 0.0, 1.0], (len(not_plane_pts[mask_1]), 1))],
        pp.pointcloud.PointXYZRGBField()), 'obj2')
plane_area = calc_convexhull_area(plane_pts[:, :2])
obj0_area = calc_convexhull_area(not_plane_pts[mask_0, :2])
obj1_area = calc_convexhull_area(not_plane_pts[mask_1, :2])
client.add_log(f"Plane Area: {plane_area:.3f}")