コード例 #1
0
ファイル: remove_outliers.py プロジェクト: jtpils/ais3d
import argparse

parser = argparse.ArgumentParser(
    description='PointCloudLibrary example: Remove outliers')
parser.add_argument('--Removal',
                    '-r',
                    choices=('Radius', 'Condition'),
                    default='',
                    help='RadiusOutlier/Condition Removal')
args = parser.parse_args()

# int main (int argc, char** argv)
# pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>);
# pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_filtered (new pcl::PointCloud<pcl::PointXYZ>);
cloud = pcl.PointCloud()
cloud_filtered = pcl.PointCloud()

# // Fill in the cloud data
# cloud->width  = 5;
# cloud->height = 1;
# cloud->points.resize (cloud->width * cloud->height);
#
# for (size_t i = 0; i < cloud->points.size (); ++i)
# {
# cloud->points[i].x = 1024 * rand () / (RAND_MAX + 1.0f);
# cloud->points[i].y = 1024 * rand () / (RAND_MAX + 1.0f);
# cloud->points[i].z = 1024 * rand () / (RAND_MAX + 1.0f);
# }
#
# x,y,z
コード例 #2
0
    def __init__(self, args, root_folder="./NaverML_indoor"):

        self.pose_ld = args.pose_ld
        self.pose_covisibility = args.pose_covisibility
        self.pose_pointcloud_load = args.pose_pointcloud_load
        self.dataset = args.dataset
        self.pose_timechecker = args.pose_timechecker
        self.pose_noniter = args.pose_noniter
        self.pose_cupy = args.pose_cupy

        if self.pose_ld == 0: # SIFT
            self.local_descriptor = he.SIFT(root=False)
        elif self.pose_ld == 1: # rootSIFT
            self.local_descriptor = he.SIFT(root=True)
        elif self.pose_ld == 2: # D2 SS
            self.local_descriptor = ex.D2Net_local_extractor(args=args, 
                                                             model_file="./arxiv/d2_tf.pth",
                                                             use_relu=True,
                                                             max_edge=1600,
                                                             max_sum_edges=2800,
                                                             preprocessing='caffe',
                                                             multiscale=False)
        elif self.pose_ld == 3: # D2 MS
            self.local_descriptor = ex.D2Net_local_extractor(args=args, 
                                                             model_file="./arxiv/d2_tf.pth",
                                                             use_relu=True,
                                                             max_edge=1600,
                                                             max_sum_edges=2800,
                                                             preprocessing='caffe',
                                                             multiscale=True)
        elif self.pose_ld == 4: # SuperGlue
            superPointdict=dict()
            superPointdict["nms_radius"]=4
            superPointdict["keypoint_threshold"]= 0.005
            superPointdict["max_keypoints"]=1024
            self.local_descriptor = ex.SuperPoint(superPointdict).eval().cuda()

        if self.dataset==0:
                self.cld_path = "img2pcds/indoor_b1"
        elif self.dataset==1:
                self.cld_path = "img2pcds/indoor_1f"

        if self.pose_pointcloud_load is False:
            if self.dataset==0:
                self.cloud = pcl.load(os.path.join(root_folder, "b1", "train", "PointCloud_all", "map.pcd"))
                self.cld_arr = self.cloud.to_array()
                mask = (self.cld_arr[:,2]>-5.8) & (self.cld_arr[:,2]<0)
                self.cld_arr = self.cld_arr[mask]
                self.cloud = pcl.PointCloud()
                self.cloud.from_array(self.cld_arr)           
                resolution = 0.2
                self.octree = self.cloud.make_octreeSearch(resolution)
                self.octree.add_points_from_input_cloud()

            elif self.dataset==1:
                self.cloud = pcl.load(os.path.join(root_folder, "1f", "train", "PointCloud_all", "map.pcd"))
                self.cld_arr = self.cloud.to_array()
                mask = (self.cld_arr[:,2]>0)
                self.cld_arr = self.cld_arr[mask]
                self.cloud = pcl.PointCloud()
                self.cloud.from_array(self.cld_arr)  
                resolution = 0.2
                self.octree = self.cloud.make_octreeSearch(resolution)
                self.octree.add_points_from_input_cloud()        
            else:
                raise ValueError("check PoseEstimation params")


        self.grayscele_fn = db.Grayscale()
コード例 #3
0
    '/nfs/diskstation/projects/dex-net/placing/datasets/real/sample_ims_05_22/depth_ims_numpy/image_000001.npy'
)
# Create a DepthImage using the array
ci = CameraIntrinsics.load(
    '/nfs/diskstation/projects/dex-net/placing/datasets/real/sample_ims_05_22/camera_intrinsics.intr'
)
di = DepthImage(image, frame=ci.frame)
pc = ci.deproject(di)

## Visualize the depth image
#vis2d.figure()
#vis2d.imshow(di)
#vis2d.show()

# Make and display a PCL type point cloud from the image
p = pcl.PointCloud(pc.data.T.astype(np.float32))

# Make a segmenter and segment the point cloud.
seg = p.make_segmenter()
seg.set_model_type(pcl.SACMODEL_PARALLEL_PLANE)
seg.set_method_type(pcl.SAC_RANSAC)
seg.set_distance_threshold(0.005)
indices, model = seg.segment()
print(model)

#pdb.set_trace()
vis3d.figure()
pc_plane = pc.data.T[indices]
pc_plane = pc_plane[np.where(pc_plane[::, 1] < 0.16)]

maxes = np.max(pc_plane, axis=0)
コード例 #4
0
 def setUp(self):
     self.a = np.array(np.mat(SEGDATA, dtype=np.float32))
     self.p = pcl.PointCloud()
     self.p.from_array(self.a)
     self.segment = self.p.make_segmenter()
コード例 #5
0
def main():
	baxter_test = Baxter_Test()
	rate = rospy.Rate(baxter_test._rate)
	#retrieve point cloud data from Baxter
	point_cloud_msg = ros_topic_get("/camera/depth_registered/points", PointCloud2)
	point_cloud_array = pointclouds.pointcloud2_to_xyz_array(point_cloud_msg)
	point_cloud_array = transform_matrix(point_cloud_array, "/camera_rgb_optical_frame", "/base")
	point_cloud = pcl.PointCloud() #create new pcl PointCloud
	point_cloud.from_list(point_cloud_array) #import data to PointCloud from 2d matrix

	#Experimental code for the hand mounted camera
	print "looking up hand camera"
	"""f200_cloud_msg = ros_topic_get("/camera/depth/points", PointCloud2)
	print f200_cloud_msg
	point_cloud_array = pointclouds.pointcloud2_to_xyz_array(f200_cloud_msg)
	print point_cloud_array"""

	#filter data
	fil = point_cloud.make_passthrough_filter()
	fil.set_filter_field_name("x")
	fil.set_filter_limits(0.5, 1)
	cloud_filtered_x = fil.filter()

	fil = cloud_filtered_x.make_passthrough_filter()
	fil.set_filter_field_name("z")
	fil.set_filter_limits(-0.1, 0.5)
	cloud_filtered = fil.filter()

	#segment data
	seg = cloud_filtered.make_segmenter()
	seg.set_model_type(pcl.SACMODEL_PLANE)
	seg.set_method_type(pcl.SAC_RANSAC)
	seg.set_distance_threshold(0.03)
	indices, model = seg.segment()

	#remove table plane based on model
	object_cloud = filter_plane(model, cloud_filtered, 0.1)
	print object_cloud

	X = object_cloud.to_array()
	#our PCA only needs to deal with x-y plane, remove z values
	X = X[:,[0,1]]
	pca = PCA(n_components=2)
	X = pca.fit_transform(X)

	#calculate object centroid
	object_centroid = get_centroid(object_cloud)

	#calculate object bounding box
	bounding_box = fit_bounding_box(object_cloud, pca.components_, object_centroid)
	#baxter_test.move_left_to_coord(object_centroid)

	
	publisher = rospy.Publisher("/baxter_test", MarkerArray, queue_size = 10)
	marker_array = MarkerArray()

	
	#PCA vector sanity check (if needed)
	"""count = 0
	for vector in pca.components_:
		vector_marker = Marker()
		vector_marker.header.frame_id = "/base"
		vector_marker.header.stamp = rospy.Time(0)
		vector_marker.type = vector_marker.ARROW
		vector_marker.action = vector_marker.ADD
		start = Point()
		vector_marker.points = [createPoint(object_centroid[0], object_centroid[1], 0),
								createPoint(object_centroid[0] + vector[0], 
								object_centroid[1] + vector[1], 0)] 
		if count == 0:
			vector_marker.color.a = 1.0 #first vector green
			vector_marker.color.r = 0.0
			vector_marker.color.g = 1.0
			vector_marker.color.b = 0.0
		else:
			vector_marker.color.a = 1.0 #second vector blue
			vector_marker.color.r = 0.0
			vector_marker.color.g = 0.0
			vector_marker.color.b = 1.0
		vector_marker.scale.x = 0.01
		vector_marker.scale.y = 0.02
		vector_marker.id = count
		marker_array.markers.append(vector_marker)
		count += 1"""

	#continually do stuff
	while not rospy.is_shutdown():
		publisher.publish(marker_array)
		#baxter_test.move_left_to_coord(object_centroid)
		rate.sleep()
コード例 #6
0
    def run(self):
        """
        This function holds the main loop that continuously finds walls and updates the wall distance
        """
        r = rospy.Rate(
            3
        )  #Attempts to run at a rate of 10 times a second (although never reaches this speed)
        while not rospy.is_shutdown():  #Start main while loop
            if not self.actualP is None and not self.position is None:  #and not np.any(self.camera_corners) is None: #Wait until phone has found a pointcloud and a phone position
                self.m.acquire()  # LOCK
                actualPCopy = self.pcloud_transform(self.actualP, 'odom')
                if actualPCopy is None:  #If something went wrong with the transform (if the transforms haven't been set yet or some timing issues happen)
                    pass  #continue loop
                else:  #otherwise:
                    self.CurrP = self.CloudList(
                        actualPCopy
                    )[::self.
                      resolutionfactor]  # squeeze out the third dimension and sample based on resolutionfactor.
                    cloud_filtered = pcl.PointCloud(
                        self.CurrP
                    )  #Convert points into pcl Point Cloud (different from Ros point cloud)
                    seg = cloud_filtered.make_segmenter(
                    )  #Set segmenter (tries to find segments in pointcloud)
                    seg.set_optimize_coefficients(True)  #Not sure what this is
                    seg.set_model_type(
                        pcl.SACMODEL_PLANE
                    )  #Set model that segmenter finds to a plane
                    seg.set_method_type(
                        pcl.SAC_RANSAC
                    )  #Set method for finding plane to RANSAC algorithm
                    seg.set_distance_threshold(
                        self.planeDistThreshold)  #Not sure what this is
                    match = False  #The variable that determines if the previous plane is the same plane as found now.
                    try:  #Start Try statment (a lot can go wrong in the following code from timing issues, to not having enough data to set a plane.  In the end, the try except is just a much easier and probably quicker way to skip the current iteration and move to the next one)
                        for i in range(
                                self.planetrynum
                        ):  #Continue trying to find planes for self.planetrynum times.
                            if self.CurrP.shape[
                                    0] < self.robustThreshold:  #make sure the pointcloud has enough points to find a plane.
                                print('pointcloud too empty')
                                break
                            check = False  #variable that determines if code should check if the current wall or previous wall is closer.
                            usesave = None  #variable that determines whether or not to use the current wall or the previous wall.
                            indices, plane_model = seg.segment(
                            )  #Receive plane_model [a, b, c, d] for equation (ax + by + cz + d = 0), also receive indices in the plane
                            if self.saved_plane_model is not None:  #If there is a previous plane
                                if (
                                        abs(plane_model[0] -
                                            self.saved_plane_model[0]) <
                                        self.abc_match_threshold
                                        and abs(plane_model[1] -
                                                self.saved_plane_model[1]) <
                                        self.abc_match_threshold
                                        and abs(plane_model[2] -
                                                self.saved_plane_model[2]) <
                                        self.abc_match_threshold
                                        and abs(plane_model[3] -
                                                self.saved_plane_model[3]) <
                                        self.d_match_threshold
                                ):  #See if current plane is relatively close to previous plane
                                    match = True  #The walls match
                                else:
                                    check = True  #The walls don't match and we should find which is closer.
                            if len(
                                    indices
                            ) < self.robustThreshold:  #If the wall isn't robust enough
                                print("No Robust Planes"
                                      )  #Print that the wall is bad
                                if self.saved_plane_model:  #if there is a saved_plane_model
                                    newdist = self.plane_distance(
                                        self.saved_plane_model,
                                        self.position)  #Use saved_plane_model
                                    self.linepoints = self.gettangentpoints(
                                        self.saved_plane_model
                                    )  #Find points for line to draw in rviz
                                    self.walldist = newdist  #Set walldistance
                                break  #skip to next iteration of while loop
                            if abs(
                                    plane_model[2]
                            ) < self.verticalityThreshold or match:  #If the plane is vertical enough or the plane matches !!!(the "or match" may allow for finding a vertical wall that slopes into something horizontal.)
                                new_plane_dist = self.plane_distance(
                                    plane_model, self.position
                                )  #Find distance to newly found plane
                                if check:  #if the current plane and previous plane were different (i.e. if check)
                                    if abs(
                                            new_plane_dist
                                    ) > self.walldist:  #If the old plane was closer
                                        plane_points = self.CurrP[
                                            indices]  #np.array(plist, dtype = np.float32)
                                        if self.PassedPlane(
                                                self.saved_plane_model,
                                                plane_points, self.position
                                        ):  #if newplane passes through where old plane was, then oldplane is gone.
                                            usesave = False  #Use the current plane
                                        else:
                                            usesave = True  #Use the save
                                    else:  #otherwise
                                        usesave = False  #Use the current plane
                                if usesave:  #If save is used
                                    print("SAVE USED"
                                          )  #Print that save is used
                                    newdist = self.plane_distance(
                                        self.saved_plane_model,
                                        self.position)  #set distance
                                    self.linepoints = self.gettangentpoints(
                                        self.saved_plane_model
                                    )  #make line for rviz
                                    self.walldist = newdist  # set walldist
                                    break  #skip to next iteration of while loop

                                self.walldist = new_plane_dist  #Set walldist to new plane distance

                                self.linepoints = self.gettangentpoints(
                                    plane_model)  #Make line for RVIZ

                                self.saved_plane_model = plane_model  #set saved plane to the current plane
                                break  #start next wall find
                            else:  #if the plane didn't make the cut, and wasn't a wall
                                self.CurrP = np.delete(
                                    self.CurrP, indices,
                                    0)  #delete all indices in the last wall.

                                if self.CurrP.shape[
                                        0] < 5:  #if there aren't enough points to continue
                                    print('pointcloud too empty')
                                    break
                                cloud_filtered = pcl.PointCloud(
                                    self.CurrP
                                )  # set new pcl pointcloud that uses the updated ROS pointcloud (which doesn't have the indices in the nonwall plane that was found)

                                #MAKE NEW SEGMENTER TO BE READY TO FIND THE PLANE
                                seg = cloud_filtered.make_segmenter()
                                seg.set_optimize_coefficients(True)
                                seg.set_model_type(pcl.SACMODEL_PLANE)
                                seg.set_method_type(pcl.SAC_RANSAC)
                                seg.set_distance_threshold(
                                    self.planeDistThreshold)
                                continue  #try to find another plane, ignore the one previously found
                    except Exception as inst:  #If something goes wrong
                        #pass #ignore it
                        print inst  #print it
                    print "wall_distance: " + str(abs(
                        self.walldist))  #print the current wall distance
                    self.dist_pub.publish(self.walldist)

                    if (self.visualized and self.linepoints):
                        self.vis_pub.publish(
                            Marker(header=Header(
                                frame_id="odom",
                                stamp=self.actualP.header.stamp),
                                   type=Marker.LINE_LIST,
                                   ns="current_plane",
                                   scale=Vector3(x=.1, y=.1, z=.1),
                                   points=self.linepoints,
                                   colors=self.linecolors))
                self.actualP = None  #reset pointcloud, so as to not accidentally do the same pointcloud twice and wait for the next one from the phone
                self.m.release()
            r.sleep()  #wait until next iteration
コード例 #7
0
 def setUp(self):
     self.p = pcl.PointCloud(_data)
     self.segment = pcl.MinCutSegmentation()
コード例 #8
0
    ros_pointnet2.stop_call()
    exit(1)


signal.signal(signal.SIGINT, keyboardInterruptHandler)

while True:
    # Create a pipeline object. This object configures the streaming camera and owns it's handle
    frames = pipeline.wait_for_frames()
    depth_frame = frames.get_depth_frame()
    color_frame = frames.get_color_frame()
    rs_pc = rs.pointcloud()
    points = rs_pc.calculate(depth_frame)
    v = points.get_vertices()
    verts = np.asanyarray(v).view(np.float32).reshape(-1, 3)  # xyz
    p = pcl.PointCloud()
    p.from_array(np.array(verts, dtype=np.float32))

    #print "original", p.size
    #Downsampling
    sor = p.make_voxel_grid_filter()
    sor.set_leaf_size(0.05, 0.05, 0.05)
    cloud_filtered = sor.filter()
    #print "after Downsampling", cloud_filtered.size

    #Passtrough filter
    passthrough = cloud_filtered.make_passthrough_filter()
    #passthrough = p.make_passthrough_filter()
    passthrough.set_filter_field_name("z")
    passthrough.set_filter_limits(0.0, 5.0)
    cloud_filtered = passthrough.filter()
コード例 #9
0
def preprocess_images(raw_color_im, raw_depth_im, camera_intr, T_camera_world,
                      workspace_box, workspace_im, image_proc_config):
    """ Preprocess a set of color and depth images. """
    # read params
    inpaint_rescale_factor = image_proc_config['inpaint_rescale_factor']
    cluster = image_proc_config['cluster']
    cluster_tolerance = image_proc_config['cluster_tolerance']
    min_cluster_size = image_proc_config['min_cluster_size']
    max_cluster_size = image_proc_config['max_cluster_size']

    # deproject into 3D world coordinates
    point_cloud_cam = camera_intr.deproject(raw_depth_im)
    point_cloud_cam.remove_zero_points()
    point_cloud_world = T_camera_world * point_cloud_cam

    # compute the segmask for points above the box
    seg_point_cloud_world, _ = point_cloud_world.box_mask(workspace_box)
    seg_point_cloud_cam = T_camera_world.inverse() * seg_point_cloud_world
    depth_im_seg = camera_intr.project_to_image(seg_point_cloud_cam)

    # mask out objects in the known workspace
    env_pixels = depth_im_seg.pixels_farther_than(workspace_im)
    depth_im_seg._data[env_pixels[:, 0], env_pixels[:, 1]] = 0

    # REMOVE NOISE
    # clip low points
    low_indices = np.where(
        point_cloud_world.data[2, :] < workspace_box.min_pt[2])[0]
    point_cloud_world.data[2, low_indices] = workspace_box.min_pt[2]

    # clip high points
    high_indices = np.where(
        point_cloud_world.data[2, :] > workspace_box.max_pt[2])[0]
    point_cloud_world.data[2, high_indices] = workspace_box.max_pt[2]

    # segment out the region in the workspace (including the table)
    workspace_point_cloud_world, valid_indices = point_cloud_world.box_mask(
        workspace_box)
    invalid_indices = np.setdiff1d(np.arange(point_cloud_world.num_points),
                                   valid_indices)

    if cluster:
        # create new cloud
        pcl_cloud = pcl.PointCloud(
            workspace_point_cloud_world.data.T.astype(np.float32))
        tree = pcl_cloud.make_kdtree()

        # find large clusters (likely to be real objects instead of noise)
        ec = pcl_cloud.make_EuclideanClusterExtraction()
        ec.set_ClusterTolerance(cluster_tolerance)
        ec.set_MinClusterSize(min_cluster_size)
        ec.set_MaxClusterSize(max_cluster_size)
        ec.set_SearchMethod(tree)
        cluster_indices = ec.Extract()
        num_clusters = len(cluster_indices)

        # read out all points in large clusters
        filtered_points = np.zeros([3, workspace_point_cloud_world.num_points])
        cur_i = 0
        for j, indices in enumerate(cluster_indices):
            num_points = len(indices)
            points = np.zeros([3, num_points])

            for i, index in enumerate(indices):
                points[0, i] = pcl_cloud[index][0]
                points[1, i] = pcl_cloud[index][1]
                points[2, i] = pcl_cloud[index][2]

            filtered_points[:, cur_i:cur_i + num_points] = points.copy()
            cur_i = cur_i + num_points

        # reconstruct the point cloud
        all_points = np.c_[filtered_points[:, :cur_i],
                           point_cloud_world.data[:, invalid_indices]]
    else:
        all_points = point_cloud_world.data
    filtered_point_cloud_world = PointCloud(all_points, frame='world')

    # compute the filtered depth image
    filtered_point_cloud_cam = T_camera_world.inverse(
    ) * filtered_point_cloud_world
    depth_im = camera_intr.project_to_image(filtered_point_cloud_cam)

    # form segmask
    segmask = depth_im_seg.to_binary()
    valid_px_segmask = depth_im.invalid_pixel_mask().inverse()
    segmask = segmask.mask_binary(valid_px_segmask)

    # inpaint
    color_im = raw_color_im.inpaint(rescale_factor=inpaint_rescale_factor)
    depth_im = depth_im.inpaint(rescale_factor=inpaint_rescale_factor)
    return color_im, depth_im, segmask
コード例 #10
0
ファイル: utils.py プロジェクト: SBPL-Cruz/perception
def process_cloud(np_points,
                  camera_matrix,
                  pub_filtered_cloud,
                  initial_rotation,
                  filter_params,
                  publish_filtered=True):
    '''
        1. Transform to robot frame
        2. Filter in z and x
        3. Downsample
        4. Publish and return filtered cloud
    '''
    # rospy.logdebug("Cloud received, frame : {}".format(cloud_msg.header.frame_id))

    # pc = ros_numpy.numpify(cloud_msg)
    # # pc_l = [np.asarray(x) for x in pc]
    # # print(pc_l)
    # # pc = np.asarray(pc_l)
    # # height = pc.shape[0]
    # # width = pc.shape[1]
    # # np_points = np.zeros((height * width, 3), dtype=np.float32)
    # np_points = np.zeros((pc.shape[0], 3), dtype=np.float32)
    # np_points[:, 0] = np.resize(pc['x'], pc.shape[0])
    # np_points[:, 1] = np.resize(pc['y'], pc.shape[0])
    # np_points[:, 2] = np.resize(pc['z'], pc.shape[0])
    # print(np_points.shape)
    # import pdb
    # pdb.set_trace()
    # np_points_appened = np.hstack((np_points, np.ones((np_points.shape[0], 1))))
    # np_points_transformed = np.matmul(total_transform, np.transpose(np_points_appened))
    # np_points_transformed = np.transpose(np_points_transformed)[:,:3]
    np_points_transformed = transform_cloud(np_points, mat=camera_matrix)
    pcl_cloud = pcl.PointCloud(np_points_transformed)

    passthrough = pcl_cloud.make_passthrough_filter()
    passthrough.set_filter_field_name("z")
    zmin = filter_params['zmin']
    passthrough.set_filter_limits(zmin, 1.0)
    cloud_filtered = passthrough.filter()

    passthrough = cloud_filtered.make_passthrough_filter()
    passthrough.set_filter_field_name("x")
    passthrough.set_filter_limits(filter_params['xmin'], filter_params['xmax'])
    cloud_filtered = passthrough.filter()

    passthrough = cloud_filtered.make_passthrough_filter()
    passthrough.set_filter_field_name("y")
    passthrough.set_filter_limits(filter_params['ymin'], filter_params['ymax'])
    cloud_filtered = passthrough.filter()

    # fil = cloud_filtered.make_statistical_outlier_filter()
    # fil.set_mean_k(100)
    # fil.set_std_dev_mul_thresh(0.1)
    # cloud_filtered = fil.filter()

    sor = cloud_filtered.make_voxel_grid_filter()
    sor.set_leaf_size(filter_params['downsampling_leaf_size'],
                      filter_params['downsampling_leaf_size'],
                      filter_params['downsampling_leaf_size'])
    cloud_filtered = sor.filter()

    fil = cloud_filtered.make_statistical_outlier_filter()
    fil.set_mean_k(100)
    fil.set_std_dev_mul_thresh(0.08)
    cloud_filtered = fil.filter()

    cloud_filtered_array = np.asarray(cloud_filtered)
    # cloud_filtered_array = np_points
    # cloud_filtered = pcl.PointCloud(np_points)

    mean = np.mean(cloud_filtered_array, axis=0)
    mean[2] = filter_params['zmin'] + filter_params['object_height'] / 2.0
    pose_estimate = {}
    # mean = mean
    pose_estimate['location'] = mean.tolist()
    # pose_estimate['quaternion'] = [0.02218474, -0.81609224, -0.57647473,  0.03432471]
    # pose_estimate['quaternion'] = [0,0,0,1]
    pose_estimate['quaternion'] = initial_rotation

    print("Num points after downsample and filter in input cloud : {}".format(
        cloud_filtered_array.shape[0]))

    if publish_filtered:
        cloud_color = np.zeros(cloud_filtered_array.shape[0])
        ros_msg = xyzrgb_array_to_pointcloud2(cloud_filtered_array,
                                              cloud_color, rospy.Time.now(),
                                              "/base_footprint")
        pub_filtered_cloud.publish(ros_msg)
    rospy.logdebug("Done cloud processing")

    return cloud_filtered, pose_estimate
コード例 #11
0
ファイル: utils.py プロジェクト: SBPL-Cruz/perception
def do_icp(cloud_in, camera_pose_matrix, pub_filtered_cloud, mesh_cloud,
           pub_pose_cloud, pose_estimate):

    # loc_scale = np.array([0.45, 1.5, 0.73])
    # ori = [-1, 0, 0, 1]
    loc_scale = pose_estimate['location']
    ori = pose_estimate['quaternion']
    ori_euler = tf.transformations.euler_from_quaternion(
        pose_estimate['quaternion'])

    R = tf.transformations.quaternion_matrix(ori)
    T = tf.transformations.translation_matrix(loc_scale)

    total_transform = tf.transformations.concatenate_matrices(T, R)
    cloud_filtered_array = transform_cloud(mesh_cloud,
                                           mat=np.copy(total_transform))
    cloud_color = np.zeros(cloud_filtered_array.shape[0])
    # mesh_cloud_cp = np.copy(mesh_cloud)
    # ros_msg = xyzrgb_array_to_pointcloud2(
    #     cloud_filtered_array, cloud_color, rospy.Time.now(), "/base_footprint"
    # )
    # pub_pose_cloud.publish(ros_msg)
    # print(total_transform)
    # rospy.logwarn("Num points after downsample and filter : {}".format(cloud_filtered_array.shape[0]))

    cloud_pose = pcl.PointCloud()
    cloud_pose.from_array(cloud_filtered_array)
    # print(cloud_filtered_array)
    print("Doing ICP")
    icp = cloud_pose.make_GeneralizedIterativeClosestPoint()
    converged, transf, estimate, fitness = icp.gicp(cloud_pose,
                                                    cloud_in,
                                                    max_iter=10)

    # icp = cloud_pose.make_IterativeClosestPoint()
    # converged, transf, estimate, fitness = icp.icp(cloud_pose, cloud_in)

    print('has converged:' + str(converged) + ' score: ' + str(fitness))
    print(str(transf))
    total_transform_icp = tf.transformations.concatenate_matrices(
        transf, total_transform)
    print(str(total_transform_icp))

    pose_output = {}
    pose_output['quaternion'] = tf.transformations.quaternion_from_matrix(
        total_transform_icp)
    pose_output['location'] = tf.transformations.translation_from_matrix(
        total_transform_icp)
    pose_output['matrix'] = total_transform_icp

    # Keep only x,y, and yaw from ICP result
    yaw_icp = math.atan2(transf[1, 0], transf[0, 0])
    yaw_in = ori_euler[2]
    cos_term = math.cos(yaw_in + yaw_icp)
    sin_term = math.sin(yaw_in + yaw_icp)
    total_yaw = math.atan2(sin_term, cos_term)
    pose_output['quaternion'] = tf.transformations.quaternion_from_euler(
        ori_euler[0], ori_euler[1], total_yaw)
    pose_output['location'][2] = loc_scale[2]
    R = tf.transformations.quaternion_matrix(pose_output['quaternion'])
    T = tf.transformations.translation_matrix(pose_output['location'])

    total_transform_icp = tf.transformations.concatenate_matrices(T, R)


    total_transform_icp_cam = \
        tf.transformations.concatenate_matrices(camera_pose_matrix, total_transform_icp)
    print(total_transform_icp_cam)
    pose_output_cam = {}
    pose_output_cam['quaternion'] = tf.transformations.quaternion_from_matrix(
        total_transform_icp_cam)
    pose_output_cam['location'] = tf.transformations.translation_from_matrix(
        total_transform_icp_cam)
    pose_output_cam['matrix'] = total_transform_icp_cam

    cloud_filtered_array = transform_cloud(mesh_cloud, mat=total_transform_icp)
    cloud_color = np.zeros(cloud_filtered_array.shape[0])
    ros_msg = xyzrgb_array_to_pointcloud2(cloud_filtered_array, cloud_color,
                                          rospy.Time.now(), "/base_footprint")
    pub_pose_cloud.publish(ros_msg)

    return pose_output, pose_output_cam
コード例 #12
0
ファイル: demo_odometry.py プロジェクト: jdmarr/se_project
"""
ax3.scatter(traj[0,2],
            traj[1,2],
            traj[2,2],
	    color='r')
"""
ax3.scatter(-dataset_raw.calib.T_cam0unrect_velo[0, 3],
            -dataset_raw.calib.T_cam0unrect_velo[1, 3],
            -dataset_raw.calib.T_cam0unrect_velo[2, 3],
            color='r')

plt.show()

source_points = velo_points_cframe[0:3, :].T
source_points = source_points.astype(np.float32)
pc_source = pcl.PointCloud()
pc_source.from_array(source_points)

target_points = velo_points_wframe[0:3, :].T
target_points = target_points.astype(np.float32)
pc_target = pcl.PointCloud()
pc_target.from_array(target_points)

_, alignment, _, _ = icp(pc_source, pc_target)

f5 = plt.figure()
ax5 = f5.add_subplot(111, projection='3d')

aligned_source = np.dot(alignment, velo_points_cframe)

ax5.scatter(aligned_source[0, :],
コード例 #13
0
    def filtering_raw_data(self, data):
        # plot original data
        if self.debug:
            self.plot_point_cloud('point_clouds', data)  # <-- plot

        ####
        # Removing Table <-- this code will be removed on real practice
        if not self.flip:
            data_y = data[:, 1]  # normal
        else:
            data_y = data[:, 0]  # flip
        data = data[np.where((data_y >= 0.4))]
        # self.plot_point_cloud('point_clouds', data) # <-- plot
        ####

        # flipping data between X Axes & Y Axes
        if not self.flip:
            data_y = data[:, 1]  # normal
        else:
            data_y = data[:, 0]  # flip

        # Sorting point cloud
        data = data[np.argsort(data_y)]  # Y axis
        if not self.flip:
            data_y = data[:, 1]  # normal
        else:
            data_y = data[:, 0]  # flip

        # square scaning
        ymin = np.round(np.min(data_y), 1) - 0.1
        ymax = np.round(np.max(data_y), 1) + 0.1
        y_step_size = 0.025  #0.01
        y_step = np.arange(ymin, ymax, y_step_size)

        # rospy.loginfo('[CAL] Y Point: {}'.format( (ymin, ymax) ))
        # print(y_step)
        # print()

        z_list = []
        z_prev = None

        for y in range(len(y_step)):
            y_ll = y_step[y]
            y_ul = y_step[y] + y_step_size
            # print('Y : {:.3f}, {:.3f}'.format(y_ll, y_ul) )

            layer = data[np.where((data_y >= y_ll) & (data_y < y_ul))]
            if layer.size != 0:
                zmax = np.max(layer[:, 2])
                # print('\t\ttotal data: {}, max z : {:.4f}'.format(len(layer), zmax ) )
                z_list.append(zmax)
                if z_prev != None:
                    diff = zmax - z_prev
                    if diff <= -0.5:
                        y_lim = y_ll
                        break
                z_prev = zmax
            # print()

        if self.debug:
            _, axes2D = plt.subplots(nrows=1, ncols=1)
            self.plots_(axes2D, y_step[:len(z_list)], z_list, legend_=None, scatter=False, \
                xlabel_="y-layer", ylabel_="z-height", title_="Filtering Human Body on Point Cloud")

        human_body = data[np.where((data_y >= ymin) & (data_y < y_lim))]
        flag_human_body = human_body
        # self.plot_point_cloud('human_body', human_body) # <-- plot

        # Reference point
        if not self.flip:
            x_ref = (np.min(human_body[:, 0]) + np.max(human_body[:, 0])) / 2
            y_ref = np.min(human_body[:, 1])
        else:
            x_ref = np.min(human_body[:, 0])
            y_ref = (np.min(human_body[:, 1]) + np.max(human_body[:, 1])) / 2

        z_ref = np.max(human_body[:, 2])
        ref_point = np.array([[x_ref, y_ref, z_ref]])
        # rospy.loginfo('[CAL] Ref. Point: {}'.format(ref_point))

        # Euclidean Distance of 3D Point
        eucl_dist = np.linalg.norm(ref_point - human_body[:, :3], axis=1)

        try:
            # Filter euclidean distance
            human_body = human_body[np.where((eucl_dist <= 0.8))]  #0.8
            # self.plot_point_cloud('human_body', human_body, big_point=True, color=True )

            p = pcl.PointCloud(np.array(human_body[:, :3], dtype=np.float32))
            sor = p.make_voxel_grid_filter()
            sor.set_leaf_size(0.01, 0.01, 0.01)
            filtered = sor.filter()

            filtered_human_body = np.asarray(filtered)
            if self.debug:
                self.plot_point_cloud(
                    'filtered_human_body',
                    filtered_human_body)  #, big_point=True, color=True )
            return filtered_human_body

        except:
            if self.debug:
                self.plot_point_cloud(
                    'human_body',
                    flag_human_body)  #, big_point=True, color=True )
            return flag_human_body
コード例 #14
0
def find_object(pc, obj_dims, ball_radius):
    npc = pc.to_array()
    # Crop to cylinder segment
    print('Finding object (#PC {})'.format(pc.size))
    npc = npc[(npc[:, 2] > 0.4) * (npc[:, 2] < 1.5)
              # * (np.sum(npc[:, :2]**2, axis=1) < 0.5**2)
              ]
    pc = pcl.PointCloud(npc)
    pcl.save(pc, 'tmp/scene_cropped.pcd')
    print('Cropped (#PC {})'.format(pc.size))

    # Downsample
    point_distance = 0.01
    face_density = 1 / point_distance**2
    vg = pc.make_voxel_grid_filter()
    vg.set_leaf_size(*(3 * [point_distance]))
    pc = vg.filter()
    npc = pc.to_array()
    pcl.save(pc, 'tmp/scene_downsampled.pcd')
    print('Downsampled (#PC {})'.format(pc.size))

    # Remove extension of planes of considerable goodness
    psegm = PlaneSegmenter(distance_tolerance=0.001,
                           normal_distance_weight=0.1,
                           consume_distance=0.005,
                           maximum_iterations=1e4,
                           minimum_plane_points=500,
                           minimum_density=0.3 * face_density)
    planes, pc = psegm(pc)
    npc = pc.to_array()
    pcl.save(pc, 'tmp/scene_curved.pcd')
    print('Filtered planes (#PC {})'.format(pc.size))

    # Remove isolated points
    sor = pc.make_statistical_outlier_filter()
    sor.set_mean_k(10)
    sor.set_std_dev_mul_thresh(0.1)
    pc = sor.filter()
    print('#PC: {}'.format(pc.size))
    npc = pc.to_array()
    pcl.save(pc, 'tmp/scene_dense.pcd')
    print('Filtered outliers (#PC {})'.format(pc.size))

    # Finding three spheres directly in scene:
    # spheres = find_spheres(pc, n_max=3)
    # Finding spheres based on Euclidean Clustering
    ece = EuclideanClusterExtractor(
        nn_dist=2 * point_distance,  # ball_radius*(1-np.cos(np.pi/6)),
        min_pts=10,
        min_max_length=0.3 * ball_radius,
        max_max_length=2.3 * ball_radius)
    ecs = ece.extract(pc)
    print('Extracted {} clusters'.format(len(ecs)))
    if len(ecs) == 0:
        print('!!!!!!!\n!!!!!! No Clusters found !!!!!!\n!!!!!!!\n')
        return None
    spheres = []
    for i, ec in enumerate(ecs):
        pcl.save(ec, 'tmp/cluster_{:03d}.pcd'.format(i))
        ec_spheres = find_spheres(ec, ball_radius, n_max=1)
        spheres += ec_spheres
    spheres = [sph for sph in spheres if sph[0].size > 10]
    n_sph = len(spheres)
    spheres.sort(key=lambda s: s[0].size, reverse=True)
    for i in range(n_sph):
        pcl.save(spheres[i][0], 'tmp/sphere_{}.pcd'.format(i))
    centres = [m3d.Vector(sph[1][:3]) for sph in spheres]
    n_centres = len(centres)

    # Setup distance matrix
    distm = spsp.distance.squareform(
        spsp.distance.pdist([c.array for c in centres]))
    # distm = np.zeros((n_centres,n_centres))
    # for i in range(n_centres):
    #     for j in range(n_centres):
    #         distm[i,j] = (centres[i] - centres[j]).length
    print(distm)

    # Match matrix
    matchm0 = np.abs(distm - obj_dims[0]) < 0.01
    matchm1 = np.abs(distm - obj_dims[1]) < 0.01
    match = np.logical_and(matchm0.any(axis=1), matchm1.any(axis=1))

    # Test if there is unambiguous object match
    matchidxs = np.where(match)[0]
    if len(matchidxs) > 1:
        print('!!!!!! Match was ambiguous (#matches={}) !!!!!!\n'.format(
            len(matchidxs)))
        return None
    if len(matchidxs) == 0:
        print('!!!!!!!\n!!!!!! No match found. !!!!!!\n!!!!!!!\n')
        return None
    # Identify indexes for sphere 0
    sph_0_idx = matchidxs[0]
    # Sphere x and y indexes where distances match
    errs_x = np.abs(distm[sph_0_idx] - obj_dims[0])
    errs_y = np.abs(distm[sph_0_idx] - obj_dims[1])
    sph_x_idx = errs_x.argmin()
    sph_y_idx = errs_y.argmin()
    err_x = errs_x[sph_x_idx]
    err_y = errs_y[sph_y_idx]
    print('Matching indices: 0:{}, x:{} ({}), y:{} ({})'.format(
        sph_0_idx, sph_x_idx, err_x, sph_y_idx, err_y))

    # Single out the centres and form the x- and y-unit vectors.
    sph_0 = centres[sph_0_idx]
    sph_x = centres[sph_x_idx]
    sph_y = centres[sph_y_idx]
    d_x = (sph_x - sph_0).normalized
    d_y = (sph_y - sph_0).normalized

    # Form the object transform
    t_obj = m3d.Transform.new_from_xyp(d_x, d_y, sph_0)
    return t_obj
コード例 #15
0
 def setUp(self):
     self.p = pcl.PointCloud(_data)
コード例 #16
0
    def AllPoses(
            self,
            save_poses=False,
            dir_name=None,
            align_pc=False):  #read a message, display pose, and write to file
        print('Showing All Poses from Robot in openRAVE')
        save_time_interval = 0.1  # amount of time between saved stls

        # some directory checking for file creation
        if save_poses:
            if dir_name is not None:
                try:
                    shutil.rmtree(dir_name)
                    print("Folder Exists.  Deleting Folder")
                except:
                    print("Folder Did not Exist")
                os.mkdir(dir_name, 0777)
            else:
                print("no directory name")
                return
        #read all messages
        #if there is a timestamp with enough information, show the pose
        # if there is a timestamp with enough information, save the pointcloud data from that timestep
        bridge = CvBridge()
        last_save_time = 0
        for topic, msg, t in self.bag_gen:
            self.parseData(topic, msg,
                           t)  #adds message to all_data with topic as key

            if '/wam/joint_states' in self.all_data[-1].keys(
            ) and '/bhand/joint_states' in self.all_data[-1].keys():
                # pdb.set_trace()
                # if the necessary data is in the last entry of list to show a pose
                JA = self.robotStateToArray(
                    self.all_data[-1]['/wam/joint_states'],
                    self.all_data[-1]['/bhand/joint_states'])
                # pdb.set_trace()
                self.setJA(JA)
                print('Pose at time: %s' % self.all_data[-1]['time'])
                time.sleep(0.01)
                if save_poses:
                    if '/camera1/depth/points' in self.all_data[-1].keys() \
                    and '/camera1/rgb/image_raw/compressed' in self.all_data[-1].keys() \
                    and '/camera2/depth/points' in self.all_data[-1].keys() \
                    and '/camera2/rgb/image_raw/compressed' in self.all_data[-1].keys() \
                    and abs(self.all_data[-1]['time'] - last_save_time) > save_time_interval:
                        # if '/kinect2/hd/points' in self.all_data[-1].keys() and '/camera/depth/points' in self.all_data[-1].keys():

                        camera1_pts = self.pc2ToXYZRGB(
                            self.all_data[-1]['/camera1/depth/points'])
                        camera2_pts = self.pc2ToXYZRGB(
                            self.all_data[-1]['/camera2/depth/points'])
                        # camera1_pts = self.pc2ToXYZ(self.all_data[-1]['/camera1/depth/points'])
                        # camera2_pts = self.pc2ToXYZ(self.all_data[-1]['/camera2/depth/points'])
                        self.showPointCloud(camera1_pts)
                        self.showPointCloud(camera2_pts)
                        # pdb.set_trace()

                        # saving image from camera -- should check this at the if statement...
                        try:
                            cv_image1 = bridge.compressed_imgmsg_to_cv2(
                                self.all_data[-1]
                                ['/camera1/rgb/image_raw/compressed'], "bgr8")
                            cv_image2 = bridge.compressed_imgmsg_to_cv2(
                                self.all_data[-1]
                                ['/camera2/rgb/image_raw/compressed'], "bgr8")
                        except CvBridgeError, e:
                            print e

                        # save point clouds
                        camera_pts = [camera1_pts, camera2_pts]
                        # camera_pts = [camera1_pts]
                        camera_filename = [
                            '%s/frame%s_pc%s.csv' %
                            (dir_name, self.frame_count, ci)
                            for ci in range(len(camera_pts))
                        ]
                        for i in range(len(camera_pts)
                                       ):  # won't work with only one camera
                            with open(camera_filename[i], 'wb') as f:
                                cam_writer = csv.writer(f, delimiter=',')
                                for pt in camera_pts[i]:
                                    cam_writer.writerow(pt)
                            print("CSV File Created: %s" % camera_filename[i])

                        # save STL
                        self.arm.generateSTL('%s/frame%s.stl' %
                                             (dir_name, self.frame_count))

                        # save image
                        cv2.imwrite(
                            '%s/frame%s_camera%s.png' %
                            (dir_name, self.frame_count, 1), cv_image1)
                        cv2.imwrite(
                            '%s/frame%s_camera%s.png' %
                            (dir_name, self.frame_count, 2), cv_image2)

                        self.frame_count += 1
                        last_save_time = self.all_data[-1]['time']

                if align_pc:
                    # check if data is there
                    if '/camera1/depth/points' in self.all_data[-1].keys() \
                    and '/camera2/depth/points' in self.all_data[-1].keys() :
                        # get mesh from STL
                        self.arm.getSTLFeatures()

                        # convert to proper format
                        pcarm = pcl.PointCloud()
                        pcarm.from_list(self.arm.all_vertices)

                        # get data
                        camera1_pts = np.array(
                            self.pc2ToXYZRGB(
                                self.all_data[-1]['/camera1/depth/points']))
                        camera2_pts = np.array(
                            self.pc2ToXYZRGB(
                                self.all_data[-1]['/camera2/depth/points']))

                        # convert to proper format
                        pc1 = pcl.PointCloud()
                        pc2 = pcl.PointCloud()
                        # seperate from colors
                        pc1.from_list(camera1_pts[:, 0:3])
                        pc2.from_list(camera2_pts[:, 0:3])
                        pdb.set_trace()
                        if pc1.size == 0 and pc2.size == 0:
                            print('No Points to align')
                        elif pc1.size != 0 and pc2.size == 0:
                            print("Aligning PC1 with arm")
                            icp_success1, pc1_T, pc1_transfomed, fitness1 = pcl.registration.icp_nl(
                                source=pc1, target=pcarm, max_iter=None)
                            pc1_transformed_list = np.hstack(
                                (pc1_transformed.to_list(),
                                 camera1_pts[:, 3:]))  # reassemble with colors
                            self.showPointCloud(pc1_transformed_list)

                        elif pc1.size == 0 and pc2.size != 0:
                            print("Aligning PC2 with arm")
                            icp_success2, pc2_T, pc2_transformed, fitness2 = pcl.registration.icp_nl(
                                source=pc2, target=pcarm, max_iter=None)
                            pc2_transformed_list = np.hstack(
                                (pc2_transformed.to_list(),
                                 camera2_pts[:, 3:]))  # reassemble with colors
                            self.showPointCloud(pc2_transformed_list)

                        elif pc1.size != 0 and pc2.size != 0:
                            print("Aligning PC1 to PC2"
                                  )  # align point clouds to other point cloud
                            icp_success12, pc12_T, pc12_transfomed, fitness12 = pcl.registration.icp_nl(
                                source=pc1, target=pc2, max_iter=None)
                            pc12_transformed_list = np.hstack(
                                (pc12_transformed.to_list(),
                                 camera1_pts[:, 3:]))  # reassemble with colors
                            self.showPointCloud(pc12_transformed_list)

                        else:
                            print("Should not be here...")

            for idx, frame in enumerate(self.all_data):
                if frame['time'] < self.all_data[-1][
                        'time']:  #if entry time is less than current time, pop
                    self.all_data.pop(idx)
                    print('List Entry Popped')
コード例 #17
0
ファイル: kinect2grasp.py プロジェクト: RAI-ZZU/PointNetGPD
def cal_grasp(msg, cam_pos_):
    """根据在线采集的点云计算候选的抓取姿态
    """
    #把pointcloud2类型的消息点云,转换为ndarray  points_
    points_ = pointclouds.pointcloud2_to_xyz_array(msg)
    #复制一份points_ ndarray对象,并将所有的点坐标转换为float32类型
    points_ = points_.astype(np.float32)

    remove_white = False
    if remove_white:
        points_ = remove_white_pixel(msg, points_, vis=True)
    # begin voxel points
    n = n_voxel  # parameter related to voxel method
    # gpg improvements, highlights: flexible n parameter for voxelizing.
    #这一句话执行的时候,不能打开虚拟机,否则容易卡住
    points_voxel_ = get_voxel_fun(points_, n)

    #当点云点数小于2000时
    if len(points_) < 2000:  # should be a parameter
        while len(points_voxel_) < len(points_) - 15:
            points_voxel_ = get_voxel_fun(points_, n)
            n = n + 100
            rospy.loginfo(
                "the voxel has {} points, we want get {} points".format(
                    len(points_voxel_), len(points_)))

    rospy.loginfo("the voxel has {} points, we want get {} points".format(
        len(points_voxel_), len(points_)))
    #
    points_ = points_voxel_
    remove_points = False
    #是否剔除支撑平面
    if remove_points:
        points_ = remove_table_points(points_, vis=True)
    #重新构造经过“降采样”的点云
    point_cloud = pcl.PointCloud(points_)

    print(len(points_))
    #构造法向量估计对象
    norm = point_cloud.make_NormalEstimation()
    tree = point_cloud.make_kdtree()
    norm.set_SearchMethod(tree)
    #以周边30个点作为法向量计算点
    norm.set_KSearch(10)  # critical parameter when calculating the norms
    normals = norm.compute()

    #将点云法向量转换为ndarry类型
    surface_normal = normals.to_array()

    surface_normal = surface_normal[:, 0:3]

    #每个点到  相机位置(无姿态)的向量             但是,感觉是相机到点的向量
    vector_p2cam = cam_pos_ - points_
    #print(vector_p2cam)
    #print(cam_pos_)
    """
    np.linalg.norm(vector_p2cam, axis=1) 默认求2范数,axis=1  代表按行向量处理,求多个行向量的2范数(求模)
    np.linalg.norm(vector_p2cam, axis=1).reshape(-1, 1)  将其调整为m行 1列

    整句话的含义是,将vector_p2cam归一化,单位化
    """
    vector_p2cam = vector_p2cam / np.linalg.norm(vector_p2cam, axis=1).reshape(
        -1, 1)

    #将表面法相与表面法相(都是单位向量)点乘,以备后面计算向量夹角
    tmp = np.dot(vector_p2cam, surface_normal.T).diagonal()
    #print(vector_p2cam)
    #print(surface_normal.T)
    #print(tmp)
    """
    np.clip(tmp, -1.0, 1.0)  截取函数,将tmp中的值,都限制在-1.0到1.0之间,大于1的变成1,小于-1的记为-1
    np.arccos() 求解反余弦,求夹角
    """
    angel = np.arccos(np.clip(tmp, -1.0, 1.0))
    #print(angel)

    #找到与视角向量夹角大于90度的角(认为法向量计算错误)
    wrong_dir_norm = np.where(angel > np.pi * 0.5)[0]
    #print(np.where(angel > np.pi * 0.5))
    #print(wrong_dir_norm)
    #print(len(wrong_dir_norm))

    #创建一个len(angel)行,3列的ndarry对象
    tmp = np.ones([len(angel), 3])
    #将法向量错误的行的元素都改写为-1
    tmp[wrong_dir_norm, :] = -1
    #与表面法相元素对元素相乘,作用是将"错误的"法向量的方向   扭转过来
    surface_normal = surface_normal * tmp
    #选取桌子以上2cm处的点作为检测点
    select_point_above_table = 0.020
    #modify of gpg: make it as a parameter. avoid select points near the table.
    #查看每个点的z方向,如果它们的点z轴方向的值大于select_point_above_table,就把他们抽出来
    points_for_sample = points_[np.where(
        points_[:, 2] > select_point_above_table)[0]]
    print(len(points_for_sample))
    if len(points_for_sample) == 0:
        rospy.loginfo(
            "Can not seltect point, maybe the point cloud is too low?")
        return [], points_, surface_normal
    yaml_config['metrics']['robust_ferrari_canny']['friction_coef'] = value_fc

    grasps_together_ = []

    if rospy.get_param("/robot_at_home") == "false":
        robot_at_home = False
    else:
        robot_at_home = True

    if not robot_at_home:
        rospy.loginfo("Robot is moving, waiting the robot go home.")
    elif not using_mp:
        rospy.loginfo("Begin cal grasps using single thread, slow!")
        grasps_together_ = ags.sample_grasps(point_cloud,
                                             points_for_sample,
                                             surface_normal,
                                             num_grasps_single_worker,
                                             max_num_samples=max_num_samples,
                                             show_final_grasp=show_final_grasp)
    else:
        # begin parallel grasp:
        rospy.loginfo("Begin cal grasps using parallel!")

        def grasp_task(num_grasps_, ags_, queue_):
            ret = ags_.sample_grasps(point_cloud,
                                     points_for_sample,
                                     surface_normal,
                                     num_grasps_,
                                     max_num_samples=max_num_samples,
                                     show_final_grasp=False)
            queue_.put(ret)

        queue = mp.Queue()

        #num_grasps_p_worker = int(num_grasps/num_workers)
        workers = [
            mp.Process(target=grasp_task,
                       args=(num_grasps_p_worker, ags, queue))
            for _ in range(num_workers)
        ]
        [i.start() for i in workers]

        grasps_together_ = []
        for i in range(num_workers):
            grasps_together_ = grasps_together_ + queue.get()
        rospy.loginfo("Finish mp processing!")

        if show_final_grasp and using_mp:
            ags.show_all_grasps(points_, grasps_together_)
            ags.show_points(points_, scale_factor=0.002)
            mlab.show()

    rospy.loginfo("Grasp sampler finish, generated {} grasps.".format(
        len(grasps_together_)))
    #返回抓取     场景的点      以及点云的表面法向量
    return grasps_together_, points_, surface_normal
コード例 #18
0
    manager = cluster_manager(debug=args.debug)

for i, velo in enumerate(dataset.velo):
    if len(tracklet_rects[i]) == 0: continue
    #if i%30!=0:
    #    continue
    oxts_pose = next(iter(itertools.islice(dataset.oxts, i, None))).T_w_imu
    oxts_pose = oxts_pose.dot(np.linalg.inv(
        dataset.calib.T_velo_imu))  # world-velo

    # register points to world coord
    velo = np.hstack((velo[:, :3], np.ones(
        (velo.shape[0], 1)))).dot(oxts_pose.T)

    # vox
    velo = pcl.PointCloud(velo[:, :3].astype(np.float32))
    sor = velo.make_voxel_grid_filter()
    sor.set_leaf_size(0.1, 0.1, 0.1)
    velo = sor.filter().to_array()

    # register robot center
    cen = np.vstack((cen, oxts_pose.dot([0, 0, 0, 1])))

    velo = filter_ground(velo, cen, th=15)

    labels = cluster_points(velo, n_clusters=n_clusters)

    if args.debug:
        manager.update(velo, labels)
    # evaluation
    for j, box in enumerate(tracklet_rects[i]):
コード例 #19
0
 def testCropHull(self):
     filterCloud = pcl.PointCloud()
     vt = pcl.Vertices()
コード例 #20
0
def main():
    try:
        from pylibfreenect2 import OpenCLPacketPipeline
        pipeline = OpenCLPacketPipeline()
    except:
        from pylibfreenect2 import CpuPacketPipeline
        pipeline = CpuPacketPipeline()

    # Create and set logger
    logger = createConsoleLogger(LoggerLevel.Debug)
    setGlobalLogger(logger)

    fn = Freenect2()
    num_devices = fn.enumerateDevices()
    if num_devices == 0:
        print("No device connected!")
        sys.exit(1)

    serial = fn.getDeviceSerialNumber(0)
    device = fn.openDevice(serial, pipeline=pipeline)

    listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                      | FrameType.Depth)

    # Register listeners
    device.setColorFrameListener(listener)
    device.setIrAndDepthFrameListener(listener)

    device.start()

    # NOTE: must be called after device.start()
    registration = Registration(device.getIrCameraParams(),
                                device.getColorCameraParams())

    undistorted = Frame(512, 424, 4)
    registered = Frame(512, 424, 4)

    # Optinal parameters for registration
    # set True if you need
    need_bigdepth = False
    need_color_depth_map = False

    bigdepth = Frame(1920, 1082, 4) if need_bigdepth else None
    color_depth_map = np.zeros((424, 512),  np.int32).ravel() \
        if need_color_depth_map else None

    point = pcl.PointCloud()
    visual = pcl.pcl_visualization.CloudViewing()
    visual.ShowColorCloud(cloud)

    while True:
        frames = listener.waitForNewFrame()

        color = frames["color"]
        ir = frames["ir"]
        depth = frames["depth"]

        registration.apply(color,
                           depth,
                           undistorted,
                           registered,
                           bigdepth=bigdepth,
                           color_depth_map=color_depth_map)

        # NOTE for visualization:
        # cv2.imshow without OpenGL backend seems to be quite slow to draw all
        # things below. Try commenting out some imshow if you don't have a fast
        # visualization backend.
        # cv2.imshow("ir", ir.asarray() / 65535.)
        # cv2.imshow("depth", depth.asarray() / 4500.)
        # cv2.imshow("color", cv2.resize(color.asarray(), (int(1920 / 3), int(1080 / 3))))
        # cv2.imshow("registered", registered.asarray(np.uint8))

        # if need_bigdepth:
        #     cv2.imshow("bigdepth", cv2.resize(bigdepth.asarray(np.float32),
        #                                       (int(1920 / 3), int(1082 / 3))))
        # if need_color_depth_map:
        #     cv2.imshow("color_depth_map", color_depth_map.reshape(424, 512))

        undistorted_arrray = undistorted.asarray(dtype=np.float32, ndim=2)
        # registered_array = registered.asarray(dtype=np.uint8)
        point = pcl.PointCloud(undistorted_arrray)
        # visual.ShowColorCloud(cloud)

        listener.release(frames)

        # key = cv2.waitKey(delay=1)
        # if key == ord('q'):
        #     break
        v = True
        while v:
            v = not (visual.WasStopped())

    device.stop()
    device.close()

    sys.exit(0)
コード例 #21
0
 def setUp(self):
     self.p = pcl.PointCloud(_data)
     self.segment = pcl.ProgressiveMorphologicalFilter()
コード例 #22
0
        file_count += 1

    for model_index in range(len(data_test)):

        test_file = open(
            OUT_DIR + "/test/" + file_name + str(file_count) + ".txt", 'w')
        test_data_file = open(
            OUT_DIR + "/test_data/" + file_name + str(file_count) + ".pts",
            'w')
        test_label_file = open(
            OUT_DIR + "/test_label/" + file_name + str(file_count) + ".seg",
            'w')

        try:
            test_cloud = pcl.PointCloud(data_train[model_index])
            print(test_cloud)
            pcl.save(test_cloud, (OUT_DIR + "/test_ply/" + file_name +
                                  str(file_count) + ".ply"),
                     format="ply")
        except:
            print("No Data, skipping.")
            continue
        for data in range(len(data_test[model_index])):

            data_str = str(data_test[model_index][data][0]) + " " + str(
                data_test[model_index][data][1]) + " " + str(
                    data_test[model_index][data][2])
            normal_str = str(normal_test[model_index][data][0]) + " " + str(
                normal_test[model_index][data][1]) + " " + str(
                    normal_test[model_index][data][2])
コード例 #23
0
 def setUp(self):
     self.p = pcl.PointCloud(_data)
     self.segment = pcl.ConditionalEuclideanClustering()
コード例 #24
0
def cal_grasp(msg, cam_pos_):
    points_ = pointclouds.pointcloud2_to_xyz_array(msg)
    points_ = points_.astype(np.float32)
    remove_white = False
    if remove_white:
        points_ = remove_white_pixel(msg, points_, vis=True)
    # begin voxel points
    n = n_voxel  # parameter related to voxel method
    # gpg improvements, highlights: flexible n parameter for voxelizing.
    points_[:,
            0] = points_[:,
                         0] + 0.025  # liang: as the kinect2 is not well colibrated, here is a work around
    points_[:,
            2] = points_[:,
                         2]  #+ 0.018  # liang: as the kinect2 is not well colibrated, here is a work around

    points_voxel_ = get_voxel_fun(points_, n)
    if len(points_) < 2000:  # should be a parameter
        while len(points_voxel_) < len(points_) - 15:
            points_voxel_ = get_voxel_fun(points_, n)
            n = n + 100
            rospy.loginfo(
                "the voxel has {} points, we want get {} points".format(
                    len(points_voxel_), len(points_)))

    rospy.loginfo("the voxel has {} points, we want get {} points".format(
        len(points_voxel_), len(points_)))
    points_ = points_voxel_
    remove_points = False
    if remove_points:
        points_ = remove_table_points(points_, vis=True)
    point_cloud = pcl.PointCloud(points_)
    norm = point_cloud.make_NormalEstimation()
    norm.set_KSearch(30)  # critical parameter when calculating the norms
    normals = norm.compute()
    surface_normal = normals.to_array()
    surface_normal = surface_normal[:, 0:3]
    vector_p2cam = cam_pos_ - points_
    vector_p2cam = vector_p2cam / np.linalg.norm(vector_p2cam, axis=1).reshape(
        -1, 1)
    tmp = np.dot(vector_p2cam, surface_normal.T).diagonal()
    angel = np.arccos(np.clip(tmp, -1.0, 1.0))
    wrong_dir_norm = np.where(angel > np.pi * 0.5)[0]
    tmp = np.ones([len(angel), 3])
    tmp[wrong_dir_norm, :] = -1
    surface_normal = surface_normal * tmp
    select_point_above_table = 0.010
    #  modify of gpg: make it as a parameter. avoid select points near the table.
    points_for_sample = points_[np.where(
        points_[:, 2] > select_point_above_table)[0]]
    if len(points_for_sample) == 0:
        rospy.loginfo(
            "Can not seltect point, maybe the point cloud is too low?")
        return [], points_, surface_normal
    yaml_config['metrics']['robust_ferrari_canny']['friction_coef'] = value_fc
    if not using_mp:
        rospy.loginfo("Begin cal grasps using single thread, slow!")
        grasps_together_ = ags.sample_grasps(point_cloud,
                                             points_for_sample,
                                             surface_normal,
                                             num_grasps,
                                             max_num_samples=max_num_samples,
                                             show_final_grasp=show_final_grasp)
    else:
        # begin parallel grasp:
        rospy.loginfo("Begin cal grasps using parallel!")

        def grasp_task(num_grasps_, ags_, queue_):
            ret = ags_.sample_grasps(point_cloud,
                                     points_for_sample,
                                     surface_normal,
                                     num_grasps_,
                                     max_num_samples=max_num_samples,
                                     show_final_grasp=show_final_grasp)
            queue_.put(ret)

        queue = mp.Queue()
        num_grasps_p_worker = int(num_grasps / num_workers)
        workers = [
            mp.Process(target=grasp_task,
                       args=(num_grasps_p_worker, ags, queue))
            for _ in range(num_workers)
        ]
        [i.start() for i in workers]

        grasps_together_ = []
        for i in range(num_workers):
            grasps_together_ = grasps_together_ + queue.get()
        rospy.loginfo("Finish mp processing!")
    rospy.loginfo("Grasp sampler finish, generated {} grasps.".format(
        len(grasps_together_)))
    return grasps_together_, points_, surface_normal
コード例 #25
0
ファイル: 3dexam.py プロジェクト: Igniscode/UsingPCL
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import pcl

cloud = pcl.PointCloud()
cloud._from_ply_file("E:/CloudMethod/Changed/000.ply")

fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')

clouds = np.array(cloud.to_array(),dtype=np.float32)
# For each set of style and range settings, plot n random points in the box
# defined by x in [23, 32], y in [0, 100], z in [zlow, zhigh].

XT = clouds[:,0]
YT = clouds[:,1]
ZT = clouds[:,2]
X = XT[1::20]
Y = YT[1::20]
Z = ZT[1::20]
print X,Y,Z
ax.scatter(X, Y, Z , s=0.5 ,c='r')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')

plt.show()
コード例 #26
0
ファイル: gen_label_graph.py プロジェクト: lyk125/SG_PR
    def gen_labels(self, FLAGS, scan_name, label_name, label_output_dir):
        # start = time.time()
        # open scan
        # TODO(yxm): downsampling
        scan = np.fromfile(scan_name, dtype=np.float32)
        scan = scan.reshape((-1, 4))
        # put in attribute
        points = scan[:, 0:4]  # get xyzr
        remissions = scan[:, 3]  # get remission

        label = np.fromfile(label_name, dtype=np.uint32)
        label = label.reshape((-1))

        # demolition or not
        if FLAGS.demolition == True:
            start_angle = np.random.random()
            start_angle *= 360
            end_angle = (start_angle + drop_angle) % 360

            angle = np.arctan2(points[:, 1], points[:, 0])
            angle = angle * 180 / np.pi
            angle += 180
            # print("angle:", angle)
            if end_angle > start_angle:
                remain_id = np.argwhere(angle < start_angle).reshape(-1)
                remain_id = np.append(
                    remain_id,
                    np.argwhere(angle > end_angle).reshape(-1))
            else:
                remain_id = np.argwhere((angle > end_angle)
                                        & (angle < start_angle)).reshape(-1)

            points = points[remain_id, :]
            label = label[remain_id]

        if label.shape[0] == points.shape[0]:
            sem_label = label & 0xFFFF  # semantic label in lower half
            inst_label = label >> 16  # instance id in upper half
            assert ((sem_label + (inst_label << 16) == label).all())
        else:
            print("Points shape: ", points.shape)
            print("Label shape: ", label.shape)
            raise ValueError(
                "Scan and Label don't contain same number of points")

        sem_label = remap_lut[sem_label]
        sem_label_set = list(set(sem_label))
        sem_label_set.sort()

        # Start clustering
        cluster = []
        inst_id = 0
        for id_i, label_i in enumerate(sem_label_set):
            # print('sem_label:', label_i)
            index = np.argwhere(sem_label == label_i)
            index = index.reshape(index.shape[0])
            sem_cluster = points[index, :]
            # print("sem_cluster_shape:",sem_cluster.shape[0])

            tmp_inst_label = inst_label[index]
            tmp_inst_set = list(set(tmp_inst_label))
            tmp_inst_set.sort()
            # print(tmp_inst_set)

            if label_i in [9, 10]:  # road/parking, dont need to cluster
                inst_cluster = sem_cluster
                inst_cluster = np.concatenate(
                    (inst_cluster,
                     np.full(
                         (inst_cluster.shape[0], 1), label_i,
                         dtype=np.uint32)),
                    axis=1)
                # inst_cluster = np.insert(inst_cluster, 4, label_i, axis=1)
                inst_cluster = np.concatenate(
                    (inst_cluster,
                     np.full(
                         (inst_cluster.shape[0], 1), inst_id,
                         dtype=np.uint32)),
                    axis=1)
                inst_id = inst_id + 1
                cluster.extend(inst_cluster)  # Nx6
                continue

            elif label_i in [0, 2, 3, 6, 7, 8]:  # discard
                continue

            elif len(tmp_inst_set) > 1 or (
                    len(tmp_inst_set) == 1
                    and tmp_inst_set[0] != 0):  # have instance labels
                for id_j, label_j in enumerate(tmp_inst_set):
                    points_index = np.argwhere(tmp_inst_label == label_j)
                    points_index = points_index.reshape(points_index.shape[0])
                    # print(id_j, 'inst_size:', len(points_index))
                    if len(points_index) <= 20:
                        continue
                    inst_cluster = sem_cluster[points_index, :]
                    inst_cluster = np.concatenate(
                        (inst_cluster,
                         np.full((inst_cluster.shape[0], 1),
                                 label_i,
                                 dtype=np.uint32)),
                        axis=1)
                    # inst_cluster = np.insert(inst_cluster, 4, label_i, axis=1)
                    inst_cluster = np.concatenate(
                        (inst_cluster,
                         np.full((inst_cluster.shape[0], 1),
                                 inst_id,
                                 dtype=np.uint32)),
                        axis=1)
                    inst_id = inst_id + 1
                    cluster.extend(inst_cluster)
            else:  # Euclidean cluster
                # time_start = time.time()
                if label_i in [1, 4, 5, 14]:  # car truck other-vehicle fence
                    cluster_tolerance = 0.5
                elif label_i in [
                        11, 12, 13, 15, 17
                ]:  # sidewalk other-ground building vegetation terrain
                    cluster_tolerance = 2
                else:
                    cluster_tolerance = 0.2

                if label_i in [16, 19]:  # trunk traffic-sign
                    min_size = 50
                elif label_i == 15:  # vegetation
                    min_size = 200
                elif label_i in [11, 12, 13,
                                 17]:  # sidewalk other-ground building terrain
                    min_size = 300
                else:
                    min_size = 100

                # print(cluster_tolerance, min_size)
                cloud = pcl.PointCloud(sem_cluster[:, 0:3])
                tree = cloud.make_kdtree()
                ec = cloud.make_EuclideanClusterExtraction()
                ec.set_ClusterTolerance(cluster_tolerance)
                ec.set_MinClusterSize(min_size)
                ec.set_MaxClusterSize(50000)
                ec.set_SearchMethod(tree)
                cluster_indices = ec.Extract()
                # time_end = time.time()
                # print(time_end - time_start)
                for j, indices in enumerate(cluster_indices):
                    # print('j = ', j, ', indices = ' + str(len(indices)))
                    inst_cluster = np.zeros((len(indices), 4),
                                            dtype=np.float32)
                    inst_cluster = sem_cluster[np.array(indices), 0:4]
                    inst_cluster = np.concatenate(
                        (inst_cluster,
                         np.full((inst_cluster.shape[0], 1),
                                 label_i,
                                 dtype=np.uint32)),
                        axis=1)
                    # inst_cluster = np.insert(inst_cluster, 4, label_i, axis=1)
                    inst_cluster = np.concatenate(
                        (inst_cluster,
                         np.full((inst_cluster.shape[0], 1),
                                 inst_id,
                                 dtype=np.uint32)),
                        axis=1)
                    inst_id = inst_id + 1
                    cluster.extend(inst_cluster)  # Nx6

        # print(time.time()-start)
        # print('*'*80)
        cluster = np.array(cluster)
        if 'path' in FLAGS.pub_or_path:
            np.save(
                label_output_dir + '/' +
                label_name.split('/')[-1].split('.')[0] + ".npy", cluster)
        if 'pub' in FLAGS.pub_or_path:
            # print(cluster[11100:11110])
            msg_points = pc2.create_cloud(header=self.header1,
                                          fields=_make_point_field(
                                              cluster.shape[1]),
                                          points=cluster)
            self._labels_pub.publish(msg_points)

        return cluster
コード例 #27
0
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pcl
from pcl.registration import icp, gicp, icp_nl

theta = [0., 0., np.pi]
rot_x = [[1, 0, 0], [0, cos(theta[0]), -sin(theta[0])],
         [0, sin(theta[0]), cos(theta[0])]]
rot_y = [[cos(theta[1]), 0, sin(theta[1])], [0, 1, 0],
         [-sin(theta[1]), 0, cos(theta[1])]]
rot_z = [[cos(theta[2]), -sin(theta[1]), 0], [sin(theta[2]),
                                              cos(theta[1]), 0], [0, 0, 1]]
transform = np.dot(rot_x, np.dot(rot_y, rot_z))

source = np.random.randn(100, 3)
source_pc = pcl.PointCloud(source.astype(np.float32))
target = np.dot(transform, source.T).T
target_pc = pcl.PointCloud(target.astype(np.float32))

converged, transf, estimate, fitness = icp(source_pc,
                                           target_pc,
                                           max_iter=10000)

print(converged, transf, estimate, fitness)
result = estimate.to_array()

fig = plt.figure()
ax = fig.add_subplot(221, projection='3d')
ax.scatter(source[:, 0], source[:, 1], source[:, 2])
ax = fig.add_subplot(222, projection='3d')
ax.scatter(target[:, 0], target[:, 1], target[:, 2])
コード例 #28
0
def write_pcd_file(pointcloud, output_path):
    output_pointcloud = pcl.PointCloud()
    output_pointcloud.from_array(np.float32(pointcloud))
    output_pointcloud.to_file(output_path)
    return
コード例 #29
0
def add_pose_estimation(bag):
	v_z = Quaternion(0, 0, 0, 1)
	# orientation of the imu frame
	imu_frame = Quaternion(-0.0198769629216, 0.974536168168, -0.218396560265, -0.0467665023439)
	x_vec = np.array([1, 0, 0])
	time = []
	previous_pcl = pcl.PointCloud()
	current_pcl = pcl.PointCloud()
	est_pose_bool = False
	pose_msg = PoseWithCovarianceStamped()
	pose_msg.header.frame_id = 'base_footprint'
	previous_pose = np.array([])
	previous_ori = []
	x = []
	y = []
	prev_tr = np.array([])
	cov = [0] * 36
	scan_time = []
	ros_time = []
	f_pose = 20 # Hz
	queued_pcl_list = []
	dt = 0
	print 'Extracting poses using ICP'
	for topic, msg, t in bag.read_messages(topics=['/imu/data', '/scan'],
										   start_time=rospy.Time(bag.get_start_time())+rospy.Duration(0)):
		#
		if topic == '/imu/data':
			q = Quaternion(msg.orientation.w, msg.orientation.x, msg.orientation.y, msg.orientation.z)
			v_z_ = q * imu_frame * v_z * imu_frame.conjugate * q.conjugate
			imu_rot = q

			if np.arccos(v_z_[3])-math.pi > -.05:
				est_pose_bool = True
			else:
				est_pose_bool = False

			# EDIT
			est_pose_bool = True

		elif topic == "/scan" and est_pose_bool:
			previous_pcl.from_array(current_pcl.to_array())
			current_pcl.from_array(to_pcl_array(msg))
			if dt != 0:
				if dt > 1:
					queued_pcl_list = []
				else:
					queued_pcl_list.append(previous_pcl)
					if len(queued_pcl_list) > 1: #queue size = 2 less than 1521*2*3 points
						queued_pcl_list.pop(0)

					previous_pcl_array = np.concatenate(([queue_.to_array() for queue_ in queued_pcl_list]), axis=0)
					previous_pcl.from_array(previous_pcl_array)

			if not time:
				time.append(msg.header.stamp.to_sec())
				prev_tr = np.zeros((1, 3))
				scan_time.append(msg.header.stamp.to_sec())
				ros_time.append(t.to_sec())
				yaw = [0]
			if previous_pcl.size is not 0:
				if len(scan_time) == 1:
					init_q = get_yaw_quaternion(imu_rot)
					prev_q = Quaternion(1, 0, 0, 0)
					q_array = [prev_q]
				icp = current_pcl.make_IterativeClosestPoint()
				# Final = icp.align()
				converged, transf, transf_current_pcl, fitness = icp.icp(current_pcl, previous_pcl)

				# check if the rotation matrix is valid (i.e. det==1)
				if not np.isclose(np.linalg.det(transf[0:3, 0:3]), 1.0):
					converged = False

				if converged:

					time.append(msg.header.stamp.to_sec())
					dt = time[-1] - time[-2]

					if 0 < dt <= 0.5: #if more than 10 scans (i.e. .5s) have been dropped, do save the pose (same pose as previous)
						# get covariance matrix
						pose_orientation, pose_translation = matrix4_to_rot_trans(imu_rot, transf, mode='2d')
						#pose_orientation, pose_translation, cov = get_cov(transf, current_pcl, transf_current_pcl)

						if np.linalg.norm(pose_translation)/dt*3.6 < 40: # the speed between two scans should not be greater than 40 kph
							prev_tr = np.append(prev_tr, [prev_q.rotate(pose_translation) + prev_tr[-1]],
												axis=0)
							prev_q = init_q.conjugate*pose_orientation
							q_array.append(prev_q)

							scan_time.append(msg.header.stamp.to_sec())
							ros_time.append(t.to_sec())
				else:
					dt = 0

	print '%i poses were calculated' % len(scan_time)
	print 'Interpolate the poses (orientation, position)'
	interp_time = np.linspace(scan_time[0], scan_time[-1], np.round((scan_time[-1] - scan_time[0])*f_pose))
	tr = np.zeros((interp_time.size, 3))
	interp_ros_time = np.linspace(ros_time[0], ros_time[-1], interp_time.size)
	ori = get_interpolated_quat(interp_time, scan_time, q_array)
	tr[:, 0] = np.interp(interp_time, scan_time, prev_tr[:, 0])
	tr[:, 1] = np.interp(interp_time, scan_time, prev_tr[:, 1])

	print 'Change coordinates to footprint_frame'
	# rotate from laser_frame to footprint frame
	laser_rd = -(90+9)*math.pi/180
	X = np.cos(laser_rd) * tr[:, 1] - np.sin(laser_rd) * tr[:, 0]
	Y = np.sin(laser_rd) * tr[:, 1] + np.cos(laser_rd) * tr[:, 0]
	tr[:, 1] = X
	tr[:, 0] = Y
	# rotate the orientation
	'''rot_ = Quaternion(axis=[0, 0, 1], radians=laser_rd)
	ori = [rot_*o for o in ori]'''
	print ori
	plt.plot(X, Y)
	plt.axis('equal')
	plt.show()
	print 'Writing the poses in the /scan_pose topic'
	write_pose_to_bag(bag, interp_time, interp_ros_time, ori, tr)
	print 'Re-indexing the bag'
	for done in bag.reindex():
		pass
	bag.close()
	print 'Done'
コード例 #30
0
def save_pcl(pcd, name):
    pc = pcl.PointCloud()
    pc.from_array(pcd.astype(np.float32))
    pc.to_file(name)