コード例 #1
0
 def set_cmd_vel(self, msg):
     rospy.loginfo("Calculating the points...")
     # Initialize the centroid coordinates point count
     x = y = z = n = 0
     
     # Read in the x, y, z coordinates of all points in the cloud
     for point in point_cloud2.read_points(msg, skip_nans=True):
         pt_x = point[0]
         pt_y = point[1]
         pt_z = point[2]
         
         if pt_z < self.goal_z:
             x += pt_x
             y += pt_y
             z = min(pt_z, z)
             n += 1
             
     # Stop the robot by default
     move_cmd = Twist()
     
     rospy.loginfo(n)
     
     # stop if number of points is large enough
     if n > self.point_cloud_threshold:
         self.stop = True
         move_cmd.linear.x = 0
         move_cmd.angular.z = 0
                     
     # Publish the movement command
     self.cmd_vel_pub.publish(move_cmd)
     #allow up to 2 seconds for the action server to come up
     self.move_base.wait_for_server(rospy.Duration(2))
コード例 #2
0
    def _cloud_cb(self, cloud):
        points = np.array(list(read_points(cloud, skip_nans=True)))
        if points.shape[0] == 0:
            return

        # Get 4x4 matrix which transforms point cloud co-ords to odometry frame
        try:
            points_to_map = self.tf.asMatrix('/odom', cloud.header)
        except tf.ExtrapolationException:
            return

        transformed_points = points_to_map.dot(np.vstack((points.T, np.ones((1, points.shape[0])))))
        transformed_points = transformed_points[:3,:].T

        if self.fused is None:
            self.fused = transformed_points
        else:
            self.fused = np.vstack((self.fused, transformed_points))

        self.seq += 1
        header = Header()
        header.seq = self.seq
        header.stamp = rospy.Time.now()
        header.frame_id = '/odom'

        output_cloud = create_cloud(header, cloud.fields, self.fused)

        rospy.loginfo('Publishing new cloud')
        self.cloud_pub.publish(output_cloud)
コード例 #3
0
    def set_cmd_vel(self, msg):
        # Initialize the centroid coordinates point count
        x = y = z = n = 0

        # Read in the x, y, z coordinates of all points in the cloud
        for point in point_cloud2.read_points(msg, skip_nans=True):
            pt_x = point[0]
            pt_y = point[1]
            pt_z = point[2]

            # Keep only those points within our designated boundaries and sum them up
            if (
                -pt_y > self.min_y
                and -pt_y < self.max_y
                and pt_x < self.max_x
                and pt_x > self.min_x
                and pt_z < self.max_z
            ):
                x += pt_x
                y += pt_y
                z += pt_z
                n += 1

        # Stop the robot by default
        move_cmd = Twist()

        # If we have points, compute the centroid coordinates
        if n:
            x /= n
            y /= n
            z /= n

            # Check our movement thresholds
            if (abs(z - self.goal_z) > self.z_threshold) or (abs(x) > self.x_threshold):
                # Compute the linear and angular components of the movement
                linear_speed = (z - self.goal_z) * self.z_scale
                angular_speed = -x * self.x_scale

                # Make sure we meet our min/max specifications
                linear_speed = copysign(
                    max(
                        self.min_linear_speed,
                        min(self.max_linear_speed, abs(linear_speed)),
                    ),
                    linear_speed,
                )
                angular_speed = copysign(
                    max(
                        self.min_angular_speed,
                        min(self.max_angular_speed, abs(angular_speed)),
                    ),
                    angular_speed,
                )

                move_cmd.linear.x = linear_speed
                move_cmd.angular.z = angular_speed

        # Publish the movement command
        self.cmd_vel_pub.publish(move_cmd)
コード例 #4
0
 def getPointCloud(self):
     print('getting point cloud...')
     pc_message = rospy.wait_for_message("/multisense/camera/points2",
                                         PointCloud2)
     print('got point cloud.')
     self.point_cloud = pc2.read_points(pc_message,
                                        field_names=("x", "y", "z"),
                                        skip_nans=True)
     return self.point_cloud
コード例 #5
0
    def run(self):
        while True:
            ## Main loop

            # Finals task
            # /srcsim/finals/task

            # Joint state
            joint_state_msg = rospy.wait_for_message(
                "/ihmc_ros/valkyrie/output/joint_states", JointState)
            #joint_state_msg = rospy.wait_for_message("/ihmc_ros/valkyrie/output/joint_states", JointState)
            self.comms.transmit([
                '/ihmc_ros/valkyrie/output/joint_states', {
                    'name': joint_state_msg.name,
                    'position': joint_state_msg.position
                }
            ])

            # Transforms
            #transforms_msg = rospy.wait_for_message("/tf",TFMessage)
            #comms.transmit( ['/tf',{'transforms':transforms_msg.transforms}] )

            # Left camera image
            # TODO: use grey scale instead of color to save bits?
            # TODO: or send Canny edge detection (presumably binary) to save bits
            left_camera_msg = rospy.wait_for_message(
                '/multisense/camera/left/image_rect_color', Image)
            self.comms.transmit([
                '/multisense/camera/left/image_rect_color', {
                    'height': left_camera_msg.height,
                    'width': left_camera_msg.width,
                    'encoding': left_camera_msg.encoding,
                    'is_bigendian': left_camera_msg.is_bigendian,
                    'step': left_camera_msg.step,
                    'data': left_camera_msg.data
                }
            ])

            # Point Cloud
            # We don't send full point cloud, but the reduced colorless 3d points
            pcl_msg = rospy.wait_for_message('/multisense/camera/points2',
                                             PointCloud2)
            #comms.transmit(['/multisense/camera/points2',{'height':pcl_msg.height, 'width':pcl_msg.width, 'is_bigendian':pcl_msg.is_bigendian, 'point_step':pcl_msg.point_step, 'row_step':pcl_msg.row_step, 'data':pcl_msg.data, 'is_dense':pcl_msg.is_dense}])
            pcl = pc2.read_points(pcl_msg,
                                  field_names=("x", "y", "z"),
                                  skip_nans=True)
            pcl_data = []
            for point in pcl:
                pcl_data.append([point[2], point[0], point[1]])
            self.comms.transmit(['/multisense/camera/points2', pcl_data])

            rospy.sleep(0.5)

        self.comms.stop()
コード例 #6
0
ファイル: depth_cam.py プロジェクト: apache8080/BluetoothBot
def get_min_depth(point, pcl_data):
    global min_depths
    if point[1] >= pcl_data.height or point[0] >= pcl_data.width :
        return -1
    data_out = pc2.read_points(pcl_data, field_names=('x', 'y','z'), skip_nans=False, uvs=[point])
    d= next(data_out)
    #print d
    if math.isnan(d[2])==False:
        if len(min_depths)==0:
            min_depths=d
        elif d[2]<min_depths[2]:
            min_depths=d
コード例 #7
0
def onPointCloud(data):
    global isStitching
    if (isStitching):
        points = pc2.read_points(data)
        orientation = (lastIMU.orientation.x, lastIMU.orientation.y,
                       lastIMU.orientation.z, lastIMU.orientation.w)
        angle = tf.transformations.euler_from_quaternion(orientation)

        for p in points:
            correctedPoint = rotateVector(
                p, 22.5, 0)  # to correct clouds pointing up...
            rotatedPoint = rotateVector(correctedPoint, -angle[2], 2)
            pointCloud.append(rotatedPoint)
コード例 #8
0
    def update_obstacles(self, msg):
        # Initialize the centroid coordinates point count
        x = y = z = n = 0

        # Read in the x, y, z coordinates of all points in the cloud
        for point in point_cloud2.read_points(msg, skip_nans=True):
            pt_x = point[0]
            pt_y = point[1]
            pt_z = point[2]

            # Keep only those points within our designated boundaries and sum them up
            if (
                -pt_y > self.min_y
                and -pt_y < self.max_y
                and pt_x < self.max_x
                and pt_x > self.min_x
                and pt_z < self.max_z
            ):
                x += pt_x
                y += pt_y
                z += pt_z
                n += 1

        # If we have points, compute the centroid coordinates
        if n:
            x /= n
            y /= n
            z /= n

            # Check our movement thresholds
            if (abs(z - self.goal_z) > self.z_threshold) or (abs(x) > self.x_threshold):
                # Compute the linear and angular components of the movement
                linear_speed = (z - self.goal_z) * self.z_scale
                angular_speed = -x * self.x_scale

                # Make sure we meet our min/max specifications
                linear_speed = copysign(
                    max(self.min_linear_speed, min(self.max_linear_speed, abs(linear_speed))), linear_speed
                )
                angular_speed = copysign(
                    max(self.min_angular_speed, min(self.max_angular_speed, abs(angular_speed))), angular_speed
                )

                move_cmd.linear.x = linear_speed
                move_cmd.angular.z = angular_speed

        # Publish the movement command
        self.cmd_vel_pub.publish(move_cmd)
コード例 #9
0
 def publish_object_markers(self):
     marker_array = visualization_msgs.msg.MarkerArray()                    
     for model in self.model_list:
         marker = visualization_msgs.msg.Marker()
         marker.pose = model.get_world_pose()
         marker.header.frame_id='/world'
         marker.type = marker.POINTS
         marker.scale.x = .01
         marker.scale.y = .01
         marker.scale.z = .01
         marker.lifetime = rospy.Duration()
         point_generator = point_cloud2.read_points(model.point_cloud_data, None, True)
         marker.points = [geometry_msgs.msg.Point(point[0], point[1], point[2]) for point in point_generator]
         marker.colors = [std_msgs.msg.ColorRGBA(1,1,1,1) for point in marker.points]
         marker_array.markers.append(marker)
     pub = rospy.Publisher('/object_marker_array', visualization_msgs.msg.MarkerArray)
     pub.publish(marker_array) 
コード例 #10
0
def onPointCloud(data):
    global isStitching, probablityMap, pointCloud, useFilter
    if (isStitching):
        points = pc2.read_points(data)
        orientation = (lastIMU.orientation.x, lastIMU.orientation.y,
                       lastIMU.orientation.z, lastIMU.orientation.w)
        angle = tf.transformations.euler_from_quaternion(orientation)

        for p in points:
            correctedPoint = rotateVector(
                p, 22.5, 0)  # to correct clouds pointing up...
            rotatedPoint = rotateVector(correctedPoint, -angle[2], 2)
            if (useFilter):
                probablityMap.addPointHit(rotatedPoint[0], rotatedPoint[1],
                                          rotatedPoint[2])
            else:
                pointCloud.append(rotatedPoint)
コード例 #11
0
 def set_cmd_vel(self, msg):
     # Initialize the centroid coordinates point count
     x = y = z = n = 0
     
     # Read in the x, y, z coordinates of all points in the cloud
     for point in point_cloud2.read_points(msg, skip_nans=True):
         pt_x = point[0]
         pt_y = point[1]
         pt_z = point[2]
         
         x += pt_x
         y += pt_y
         z += pt_z
         n += 1
             
     # Stop the robot by default
     move_cmd = Twist()
     
     rospy.loginfo(n)
     
     # If we have points, compute the centroid coordinates
     if n:
         x /= n 
         y /= n 
         z /= n
         
         #rospy.loginfo(z)
                     
         # Check our movement thresholds
         if (abs(z - self.goal_z) > self.z_threshold) or (abs(x) > self.x_threshold):     
             # Compute the linear and angular components of the movement
             linear_speed = (z - self.goal_z) * self.z_scale
             angular_speed = -x * self.x_scale
             
             # Make sure we meet our min/max specifications
             linear_speed = copysign(max(self.min_linear_speed, 
                                         min(self.max_linear_speed, abs(linear_speed))), linear_speed)
             angular_speed = copysign(max(self.min_angular_speed, 
                                          min(self.max_angular_speed, abs(angular_speed))), angular_speed)
 
             move_cmd.linear.x = linear_speed
             move_cmd.angular.z = angular_speed
                     
     # Publish the movement command
     self.cmd_vel_pub.publish(move_cmd)
コード例 #12
0
def onPointCloud(data):
    # convert pointcloud2 to an array and get rotation in radians
    points = pc2.read_points(data)
    orientation = (lastIMU.orientation.x, lastIMU.orientation.y,
                   lastIMU.orientation.z, lastIMU.orientation.w)
    angle = tf.transformations.euler_from_quaternion(orientation)
    # initialize point cloud
    pointCloud = []
    # rotate every point in point cloud
    for p in points:
        correctedPoint = rotateVector(p, 22.5,
                                      0)  # to correct clouds pointing up...
        rotatedPoint = rotateVector(correctedPoint, -angle[2], 2)
        pointCloud.append(rotatedPoint)
    # create pointcloud2 message and publish for BLAM
    header = Header()
    header.stamp = rospy.Time.now()
    header.frame_id = "lidar_link"
    blamPublisher.publish(pc2.create_cloud_xyz32(header, pointCloud))
コード例 #13
0
 def send_pointlist_to_graspit(self, pointcloud_msg, downsample_factor = 1, num_points = -1 , tran = np.eye(4)):
     uvs = []
     if not downsample_factor == 1:
         uvs = [(u,v) for u in range(0, pointcloud_msg.height, downsample_factor)
                for v in range(0, pointcloud_msg.width,  downsample_factor)]
     point_generator = point_cloud2.read_points(pointcloud_msg,None, True, uvs)
     point_list = [point for point in point_generator]
     color_list = [struct.unpack('BBBx', struct.pack('f',point[3])) for point in point_list]
     total_str = ""
     point_list2 = point_list[:num_points]
     for point_ind, point in enumerate(point_list2):
         color = color_list[point_ind]
         point_vec = tran*np.mat([point[0],point[1],point[2], 1]).transpose()
         point_str_list = " %f %f %f %i %i %i"%(point_vec[0], point_vec[1], point_vec[2],
                                               color[2], color[1], color[0])
         total_str += point_str_list
     command_str = "addPointCloud %i%s \n"%(len(point_list2), total_str)
     self.socket.sendall(command_str)
     #success = bool(self.socket.recv(100))
     return command_str
コード例 #14
0
 def publish_object_markers(self):
     marker_array = visualization_msgs.msg.MarkerArray()
     for model in self.model_list:
         marker = visualization_msgs.msg.Marker()
         marker.pose = model.get_world_pose()
         marker.header.frame_id = '/world'
         marker.type = marker.POINTS
         marker.scale.x = .01
         marker.scale.y = .01
         marker.scale.z = .01
         marker.lifetime = rospy.Duration()
         point_generator = point_cloud2.read_points(model.point_cloud_data,
                                                    None, True)
         marker.points = [
             geometry_msgs.msg.Point(point[0], point[1], point[2])
             for point in point_generator
         ]
         marker.colors = [
             std_msgs.msg.ColorRGBA(1, 1, 1, 1) for point in marker.points
         ]
         marker_array.markers.append(marker)
     pub = rospy.Publisher('/object_marker_array',
                           visualization_msgs.msg.MarkerArray)
     pub.publish(marker_array)
コード例 #15
0
 def getPointCloudImage(self):
     # Not the typical data structure (generator) for a point cloud
     pc_message = rospy.wait_for_message("/multisense/camera/points2",
                                         PointCloud2)
     pc = pc2.read_points(pc_message,
                          field_names=("x", "y", "z"),
                          skip_nans=False)
     depth_image = np.zeros((544, 1024, 3), np.float32)
     row = 0
     col = 0
     for index in xrange(544 * 1024):
         point = pc.next()
         if not isnan(point[2]):
             distance = sqrt(point[0]**2 + point[1]**2 + point[2]**2)
             depth_image[row, col][1] = 1 - min(
                 1.0, distance / 15.0)  # Green only
             #depth_image[row,col][0] = point[0]
             #depth_image[row,col][1] = point[1]
             #depth_image[row,col][2] = point[2]
         col += 1
         if col >= 1024:
             col = 0
             row += 1
     return cv2.flip(cv2.flip(depth_image, 0), 1)
コード例 #16
0
 def send_pointcloud_to_graspit(self, pointcloud_msg, downsample_factor = 1):
     uvs = [(u,v) for u in range(0, pointcloud_msg.height, downsample_factor)
            for v in range(0, pointcloud_msg.width,  downsample_factor)]
     point_generator = point_cloud2.read_points(pointcloud_msg,None, True, uvs)
     point_list = [point for point in point_generator]
     return point_list
コード例 #17
0
def onPointCloudStream(pointCloud):  # not in use
    print("streaming point_cloud")
    points_list = []
    for data in pc2.read_points(pointCloud, skip_nans=True):
        points_list.append([data[0], data[1], data[2]])
    sio.emit('ON_POINT_CLOUD_STREAM', points_list)
コード例 #18
0
    def detect_cubes_cb(self, req):
        if req.color not in set(self.available_colors):
            err_msg = 'Unknown color: %s, available colors: %s. Make sure ..._min_max_hsv.yaml contains corresponding values for color %s' % \
                        (req.color, str(self.available_colors), req.color)
            rospy.logerr(err_msg)
            raise Exception(err_msg)
        if req.image_fname != '':
            self.img = cv2.imread(req.image_fname)
            if type(self.img) != type(np.array([])):
                err_msg = 'Cannot read file %s' % req.image_fname
                rospy.logerr(err_msg)
                raise Exception(err_msg)
        elif self.rgb_only_camera:
            vcap = cv2.VideoCapture(0)
            self.img = vcap.read()
            if not self.img[0]:
                rospy.logerr('Cannot capture RGB frame from VideoCapture')
            else:
                self.img = self.img[1]
        resp = DetectCubeResponse()
        rospy.loginfo('detect_cubes service called\n' + str(req))
        if self.img == None:
            rospy.logerr('ERROR: no RGB data available')
            resp = self.err_resp()
            return resp

        if not self.known_histograms.has_key(req.color):
            rospy.logerr('ERROR: unknown color: %s' % req.color)
            resp = self.err_resp()
            return resp

        if DETECT_METHOD == DETECT_METHOD_CONTOUR:
            conts, conts_img, back, back_filt = self.get_contours(self.img, req)
            cube_rects = self.detect_cubes_contour(conts_img, conts, back_filt, req)
        elif DETECT_METHOD == DETECT_METHOD_TEMPLATE:
            hsv_planes = cv2.split(img_hsv)
            cube_rects = self.detect_cubes_template(hsv_planes, req)
        else:
            raise Exception('Unknown detection method requested: %s' % DETECT_METHOD)

        #Now fill all cubes info into ROS message array
        if len(cube_rects) > 0:
            for c in cube_rects:
                u =  c[0] + c[2] / 2
                v =  c[1] + c[3] / 2
                pose_wrt_camera = PoseStamped()
                if self.rgb_only_camera or req.image_fname != '':
                    pose_wrt_camera.pose.position.x = u
                    pose_wrt_camera.pose.position.y = v
                    pose_wrt_camera.pose.position.z = 0
                    pose_final = pose_wrt_camera
                else:
                    pp = []
                    try:
                        #UMAX2 = min(5, abs(self.img.shape[1]))
                        #VMAX2 = min(5, abs(self.img.shape[0]))
                        #us = np.range(u - UMAX2, u + UMAX2)
                        #vs = np.range(v - VMAX2, v + VMAX2)
                        #uvs = np.transpose(np.tile(us, len(vs)), np.repeat(vs, len(us)))
                        uvs = [[u, v]]
                        print uvs
                        for i in read_points(self.cloud, uvs = uvs):
                            pp.append(i)
                    except Exception, e:
                        print 'ERROR while iterating over read_points'
                    p = [np.nan, np.nan, np.nan]
                    for _p in pp:
                        if not np.isnan(_p[0]):
                            p = _p
                            break
                    if DETECT_USE_EUC_DIST_FILTERING:
                        dist = np.linalg.norm(p)
                        if dist > DETECT_DIST_MAX:
                            continue
                    pose_wrt_camera.pose.position.x = p[0]
                    pose_wrt_camera.pose.position.y = p[1]
                    pose_wrt_camera.pose.position.z = p[2]
                    pose_wrt_camera.header.frame_id = CAMERA_FRAME
                    if self.if_do_transform:
                        pose_final = self.transform_to_base_frame(pose_wrt_camera)
                    else:
                        pose_final = pose_wrt_camera
                    pose_final.pose.orientation.x = 0.0
                    pose_final.pose.orientation.y = 0.0
                    pose_final.pose.orientation.z = 0.0
                if not np.isnan(pose_final.pose.position.x):
                    resp.sizes.append(max(c[2], c[3]))
                    resp.poses.append(pose_final)
            if DEBUG:
                pass