def __init__(self): super(ProjectionNode, self).__init__() # init the node rospy.init_node('projection_node', anonymous=False) self.listener = tf.TransformListener() self._bridge = CvBridge() (depth_topic, face_topic, output_topic, f, cx, cy,id_increment,id_offset,output_spencer) = \ self.get_parameters() # Subscribe to the face positions sub_obj = message_filters.Subscriber(face_topic,\ BoundingBoxes) sub_depth = message_filters.Subscriber(depth_topic,\ Image) # Advertise the result of Face Depths self.pub = rospy.Publisher(output_topic, \ DetectionArray, queue_size=1) self.pub_pic = rospy.Publisher("/darknet_projection/image", Image, queue_size=1) self.pub_spencer = rospy.Publisher("/test_spencer", DetectedPersons, queue_size=1) #self.pub_spencer=rospy.Publisher("", DetectedPersons,queue_size=1) self.pub_spencer_odom = rospy.Publisher(output_spencer, DetectedPersonsOdom, queue_size=1) # Create the message filter ts = message_filters.ApproximateTimeSynchronizer(\ [sub_obj, sub_depth], \ 2, \ 0.9) ts.registerCallback(self.detection_callback) self.timer = rospy.Timer(rospy.Duration(0.166), self.timer_cb) self.timer_pros = rospy.Timer(rospy.Duration(0.02), self.timer_cb_pros) self.new_msgs = False self.f = f self.cx = cx self.cy = cy self.detection_id_increment = id_increment self.detection_id_offset = id_offset self.current_detection_id = self.detection_id_offset self.spencer_detect = DetectedPersons() self.last_time = rospy.get_rostime().secs self.bridge = CvBridge() self.new_bounding = False self.message = [] self.lock = threading.RLock() # spin rospy.spin()
def timer_cb(self, timer): """ Timercallback: To publish spencer msgs """ if ((rospy.get_rostime().secs - self.last_time) > 0.05): self.new_msgs = False if (self.new_msgs): #self.pub_spencer.publish(self.spencer_detect) pass else: spencer_detect_temp = DetectedPersons() spencer_detect_temp.header.stamp = rospy.get_rostime() spencer_detect_temp.header.frame_id = "cam_front_color_optical_frame"
def newMessageReceived(positionMeasurementArray): detectedPersons = DetectedPersons() if positionMeasurementArray.people: detectedPersons.header = positionMeasurementArray.people[0].header else: detectedPersons.header = positionMeasurementArray.header detectedPersons.header.frame_id = "odom" # hack since frame_id is not known global detectionId, detectionIdIncrement for positionMeasurement in positionMeasurementArray.people: # We assume that all detections have the same frame ID assert (detectedPersons.header.frame_id == positionMeasurement.header.frame_id) # Construct DetectedPerson detectedPerson = DetectedPerson() detectedPerson.modality = positionMeasurement.name detectedPerson.confidence = positionMeasurement.reliability detectedPerson.pose.pose.position = positionMeasurement.pos # Covariance for x in xrange(0, 3): for y in xrange(0, 3): detectedPerson.pose.covariance[ y * 6 + x] = positionMeasurement.covariance[y * 3 + x] * covScale for i in xrange(3, 6): detectedPerson.pose.covariance[i * 6 + i] = 99999.0 # Detection ID if useObjectId: match = re.search("[0-9]+", positionMeasurement.object_id) if match: detectedPerson.detection_id = int(match.group(0)) else: detectedPerson.detection_id = detectionId detectionId += detectionIdIncrement detectedPersons.detections.append(detectedPerson) pub.publish(detectedPersons)
def newCompositeDetectedPersonsAvailable(compositeDetectedPersons): detectedPersons = DetectedPersons() detectedPersons.header = compositeDetectedPersons.header for compositeDetectedPerson in compositeDetectedPersons.elements: detectedPerson = DetectedPerson() detectedPerson.detection_id = compositeDetectedPerson.composite_detection_id detectedPerson.confidence = compositeDetectedPerson.max_confidence detectedPerson.pose = compositeDetectedPerson.pose involvedModalities = list( set([ originalDetection.modality for originalDetection in compositeDetectedPerson.original_detections ])) detectedPerson.modality = ",".join(sorted(involvedModalities)) detectedPersons.detections.append(detectedPerson) detectedPersonsPublisher.publish(detectedPersons)
def CallBack(self, data): detectedPersons = DetectedPersons() detectedPersons.header = data.header for Person in data.people: detectedPerson = DetectedPerson() detectedPerson.modality = self.modality detectedPerson.confidence = self.confidence detectedPerson.detection_id = self.detectionId detectedPerson.pose.pose = Person.pose for i in xrange(0, 6): detectedPerson.pose.covariance[ i * 6 + i] = self.posVariance if i < 3 else self.rotVariance detectedPersons.detections.append(detectedPerson) self.detectionId += self.detectionIdIncrement self.pub.publish(detectedPersons)
def newMessageReceived(poseArray): detectedPersons = DetectedPersons() detectedPersons.header = poseArray.header global detectionId, detectionIdIncrement for pose in poseArray.poses: detectedPerson = DetectedPerson() detectedPerson.modality = modality detectedPerson.confidence = confidence detectedPerson.detection_id = detectionId detectedPerson.pose.pose = pose for i in xrange(0, 6): detectedPerson.pose.covariance[ i * 6 + i] = posVariance if i < 3 else rotVariance detectedPersons.detections.append(detectedPerson) detectionId += detectionIdIncrement pub.publish(detectedPersons)
def detection_callback(self, msg, depth): """ Callback for RGB images: The main logic is applied here Args: msg (cob_perception_msgs/DetectionArray): detections array depth (sensor_msgs/PointCloud2): depth image from camera """ cv_depth = self._bridge.imgmsg_to_cv2(depth, "passthrough") # get the number of detections no_of_detections = len(msg.detections) spencer_detected_persons = DetectedPersons() spencer_detected_persons.header = msg.header # Check if there is a detection if no_of_detections > 0: for i, detection in enumerate(msg.detections): x = detection.mask.roi.x y = detection.mask.roi.y width = detection.mask.roi.width height = detection.mask.roi.height scale = 0.3 height_delta = (height - height * scale) / 2 y = y = height_delta width_delta = (width - width * scale) / 2 cv_depth_bounding_box = cv_depth[ int(y + height_delta):int(y + height_delta + height * scale), int(x + width_delta):int(x + width_delta + width * scale)] try: depth_mean = np.nanmedian(\ cv_depth_bounding_box[np.nonzero(cv_depth_bounding_box)]) real_x = (x + width / 2 - self.cx) * (depth_mean) / self.f real_y = (y + height / 2 - self.cy) * (depth_mean) / self.f msg.detections[i].pose.pose.position.x = real_x msg.detections[i].pose.pose.position.y = real_y msg.detections[i].pose.pose.position.z = depth_mean if (msg.detections[i].label == 'person'): spencer_detected_person = DetectedPerson() spencer_detected_person.modality = spencer_detected_person.MODALITY_GENERIC_RGBD spencer_detected_person.confidence = 1 spencer_detected_person.pose.pose = msg.detections[ i].pose.pose LARGE_VARIANCE = 999999999 pose_variance = 0.05 spencer_detected_person.pose.covariance[ 0 * 6 + 0] = pose_variance spencer_detected_person.pose.covariance[ 1 * 6 + 1] = pose_variance spencer_detected_person.pose.covariance[ 2 * 6 + 2] = pose_variance spencer_detected_person.pose.covariance[ 3 * 6 + 3] = LARGE_VARIANCE spencer_detected_person.pose.covariance[ 4 * 6 + 4] = LARGE_VARIANCE spencer_detected_person.pose.covariance[ 5 * 6 + 5] = LARGE_VARIANCE spencer_detected_person.detection_id = self.current_detection_id self.current_detection_id = self.current_detection_id + self.detection_id_increment spencer_detected_persons.detections.append( spencer_detected_person) except Exception as e: print e self.pub.publish(msg) self.pub_spencer.publish(spencer_detected_persons)
def newSegmentationReceived(self, laserscan, laserscanSegmentation): currentStamp = laserscanSegmentation.header.stamp pointCount = len(laserscan.ranges) cartesianCoordinates = [] # Required for velocity calculations if self._lastDataStamp is None: self._lastDataStamp = laserscanSegmentation.header.stamp # Build lookup of cartesian coordinates per laser point for pointIndex in xrange(0, pointCount): cartesianCoordinates.append( self.calculateCartesianCoordinates(laserscan, pointIndex)) # For each labelled segment, create and append one TrackedPerson and DetectedPerson message trackedPersons = TrackedPersons(header=laserscanSegmentation.header) detectedPersons = DetectedPersons(header=laserscanSegmentation.header) for segment in laserscanSegmentation.segments: # Calculate centroid of tracked person centroid = numpy.array([0.0, 0.0, 0.0]) for pointIndex in segment.measurement_indices: centroid += cartesianCoordinates[pointIndex] centroid /= float(len(segment.measurement_indices)) # Lookup previous centroid (for velocity/twist calculation), assume zero velocity at track initialization if not segment.label in self._previousCentroidLookup: self._previousCentroidLookup[ segment.label] = collections.deque() # Maintain centroid history centroidHistory = self._previousCentroidLookup[segment.label] while len(centroidHistory) > 20: centroidHistory.popleft() # Calculate average velocity over past few frames dt = 0 velocity = accumulatedVelocity = numpy.array([0.0, 0.0, 0.0]) if centroidHistory: previousCentroid = centroid previousStamp = currentStamp for historyStamp, historyCentroid in reversed(centroidHistory): accumulatedVelocity += previousCentroid - historyCentroid dt += abs((previousStamp - historyStamp).to_sec()) previousCentroid = historyCentroid previousStamp = historyStamp velocity = accumulatedVelocity / dt centroidHistory.append((currentStamp, centroid)) # Remember age of track if not segment.label in self._firstTrackEncounterLookup: self._firstTrackEncounterLookup[segment.label] = currentStamp # Initialize TrackedPerson message trackedPerson = TrackedPerson() trackedPerson.track_id = segment.label trackedPerson.age = currentStamp - self._firstTrackEncounterLookup[ segment.label] trackedPerson.detection_id = self._detectionIdCounter trackedPerson.is_occluded = False # Set position LARGE_VARIANCE = 99999999 trackedPerson.pose.pose.position.x = centroid[0] trackedPerson.pose.pose.position.y = centroid[1] trackedPerson.pose.pose.position.z = centroid[2] # Set orientation if dt > 0: yaw = math.atan2(velocity[1], velocity[0]) quaternion = tf.transformations.quaternion_from_euler( 0, 0, yaw) trackedPerson.pose.pose.orientation = Quaternion( x=quaternion[0], y=quaternion[1], z=quaternion[2], w=quaternion[3]) trackedPerson.pose.covariance[ 2 * 6 + 2] = trackedPerson.pose.covariance[ 3 * 6 + 3] = trackedPerson.pose.covariance[ 4 * 6 + 4] = LARGE_VARIANCE # z pos, roll, pitch # Set velocity if dt > 0: trackedPerson.twist.twist.linear.x = velocity[0] trackedPerson.twist.twist.linear.y = velocity[1] trackedPerson.twist.twist.linear.z = velocity[2] trackedPerson.twist.covariance[ 2 * 6 + 2] = trackedPerson.twist.covariance[ 3 * 6 + 3] = trackedPerson.twist.covariance[ 4 * 6 + 4] = trackedPerson.twist.covariance[ 5 * 6 + 5] = LARGE_VARIANCE # linear z, angular x, y, z # Append to list of tracked persons trackedPersons.tracks.append(trackedPerson) # Initialize DetectedPerson message by copying data from TrackedPerson detectedPerson = DetectedPerson() detectedPerson.detection_id = trackedPerson.detection_id detectedPerson.confidence = 1.0 detectedPerson.pose = copy.deepcopy(trackedPerson.pose) detectedPerson.pose.pose.orientation = Quaternion() for i in xrange(0, 2): detectedPerson.pose.covariance[i * 6 + i] = 0.17 * 0.17 detectedPerson.pose.covariance[5 * 6 + 5] = LARGE_VARIANCE # yaw detectedPersons.detections.append(detectedPerson) self._detectionIdCounter += 1 # Publish tracked persons self.trackedPersonsPublisher.publish(trackedPersons) self.detectedPersonsPublisher.publish(detectedPersons) self._lastDataStamp = laserscanSegmentation.header.stamp
updateRateHz = 10 # Test coordinate frame for checking if mapping into the fixed frame works correctly frameOffset = (0, 0, 0) frameOrientation = tf.transformations.quaternion_from_euler(0, 0, 0) # 90.0 / 180.0 * pi print("Sending test messages on " + observationTopic + " and " + trackTopic) rate = rospy.Rate(updateRateHz) while not rospy.is_shutdown(): br.sendTransform(frameOffset, frameOrientation, rospy.Time.now(), "test_tf_frame", "odom") trackedPersons = TrackedPersons() trackedPersons.header.frame_id = "/test_tf_frame" trackedPersons.header.stamp = rospy.Time.now() detectedPersons = DetectedPersons() detectedPersons.header = trackedPersons.header tracks = trackedPersons.tracks; detections = detectedPersons.detections; createTrackAndDetection(tracks, detections, idShift+0, 3, currentAngle, 2.0) createTrackAndDetection(tracks, detections, idShift+1, 7, currentAngle + pi / 2, 2.5) createTrackAndDetection(tracks, detections, idShift+2, -1, currentAngle + pi / 1, 3.0) createTrackAndDetection(tracks, detections, idShift+3, -1, currentAngle + pi * 1.5, cos(currentAngle) * 3.5 + 7.0) createTrackAndDetection(tracks, detections, idShift+4, 88, 0.0, 0.0) trackPublisher.publish( trackedPersons ) observationPublisher.publish( detectedPersons ) currentAngle += angleStep
def infer(self, velodyne_points, cluster_array, marker_array): global fout global is_save global frame_count start_time = time.time() # convert PointCloud2 message to PCL static_cloud = self.convertPLmsg2array(velodyne_points, mode='XYZI') clusters = cluster_array.clusters Cloud = [] for cluster in clusters: cloud = self.convertPLmsg2array(cluster) cloud_np = np.asarray(cloud) # nomalise the cluser and intensity cloud_centriod = np.mean(cloud_np[:, :3], axis=0) cloud_np[:, :3] -= cloud_centriod cloud_np[:, -1] = cloud_np[:, -1] / 255. Cloud.append(cloud_np) # print cloud_np Image = utility.convert2images(Cloud, self.saved_args.input_dim[0]) feed = {self.model.input_data: Image} output, feature, pred = self.sess.run([self.model.output, self.model.feature, self.model.pred], feed) # index = np.nonzero(np.max(np.array(output, dtype=np.float32), axis=1) > 0.7) # pred = np.array(pred, dtype=np.float32) # pred = pred[index[0]] # pred = pred.tolist() # transform detection to /map done = False while not done and self.is_save_bbox: try: transform = self.tf_buffer.lookup_transform(self.target_frame, self.sensor_frame_id, rospy.Time(), rospy.Duration(10)) done = True except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException): print "transform2 exception" continue print "transform done!" objs_markers = MarkerArray() detected_persons = DetectedPersons() detected_persons.header = velodyne_points.header for p, m, o, f in zip(pred, marker_array.markers, output, feature): r = 0 g = 0 b = 0 if p == 1: r = 1.0 bbox_min = m.points[10] bbox_max = m.points[0] detectedPerson = DetectedPerson() detectedPerson.pose.pose.position.x = (bbox_min.x + bbox_max.x) / 2.0 detectedPerson.pose.pose.position.y = (bbox_min.y + bbox_max.y) / 2.0 detectedPerson.pose.pose.position.z = (bbox_min.z + bbox_max.z) / 2.0 detectedPerson.modality = DetectedPerson.MODALITY_GENERIC_LASER_3D detectedPerson.confidence = 1.0 detectedPerson.detection_id = 0 detected_persons.detections.append(detectedPerson) elif p == 2: r = 1.0 g = 1.0 # truck else: r = 0. continue m.color.r = r m.color.g = g m.color.b = b objs_markers.markers.append(m) if p >= 1: # static_cloud = self.crop_box(static_cloud, m) if self.is_save_bbox: bbox_min = m.points[10] bbox_max = m.points[0] bbox_min_pt = self.transform_3d_pt([bbox_min.x, bbox_min.y, bbox_min.z], transform) bbox_max_pt = self.transform_3d_pt([bbox_max.x, bbox_max.y, bbox_max.z], transform) output_line = [frame_count] + bbox_min_pt + bbox_max_pt + o.tolist() + f.tolist() for i in output_line: fout.write("%f "% i) fout.write("\n") frame_count += 1 self.marker_pub.publish(objs_markers) self.detected_persons_pub.publish(detected_persons) if self.velodyne_pub.get_num_connections(): header = velodyne_points.header pointFiledx = PointField('x', 0, 7, 1) pointFiledy = PointField('y', 4, 7, 1) pointFieldz = PointField('z', 8, 7, 1) pointFieldi = PointField('intensity', 12, 7, 1) pointFiled = [pointFiledx, pointFiledy, pointFieldz, pointFieldi] header = velodyne_points.header static_velodyne = point_cloud2.create_cloud(header, pointFiled, static_cloud) self.velodyne_pub.publish(static_velodyne) print("[inference_node]: runing time = " + str(time.time()-start_time))
def timer_cb_pros(self, timer): if (self.new_bounding): self.lock.acquire() self.new_bounding = False msg = self.message[0] depth = self.message[1] self.lock.release() t = TransformStamped() try: #print(rospy.Time.now().to_sec) #print(depth.header.stamp) #time=rospy.Time.now().to_sec-depth.header.stamp.secs #print(time) (trans, rot) = self.listener.lookupTransform("odom", depth.header.frame_id, rospy.Time(0)) t.header.stamp = rospy.Time.now() t.header.frame_id = "odom" t.child_frame_id = depth.header.frame_id t.transform.translation.x = trans[0] t.transform.translation.y = trans[1] t.transform.translation.z = trans[2] t.transform.rotation.x = rot[0] t.transform.rotation.y = rot[1] t.transform.rotation.z = rot[2] t.transform.rotation.w = rot[3] except (tf.LookupException): print("LookupException") except (tf.ConnectivityException): print("ConnectivityException") except (tf.ExtrapolationException): print("ExtrapolationException") cv_depth = self._bridge.imgmsg_to_cv2(depth, "passthrough") #cv_color = self._bridge.imgmsg_to_cv2(color, "rgb8") # get the number of detections no_of_detections = len(msg.bounding_boxes) spencer_detected_persons = DetectedPersons() spencer_detected_persons.header = msg.image_header # Check if there is a detection if no_of_detections > 0: for i, boxes in enumerate(msg.bounding_boxes): x = boxes.xmin y = boxes.ymin xmax = boxes.xmax ymax = boxes.ymax width = xmax - x height = ymax - y scale = 0.6 height_delta = (height - height * scale) / 2 width_delta = (width - width * scale) / 2 cv_depth_bounding_box = cv_depth[ int(y + height_delta):int(y + height_delta + height * scale), int(x + width_delta):int(x + width_delta + width * scale)] #cv_color_debug=cv_color[int(y+height_delta):int(y+height_delta+height*scale),int(x+width_delta):int(x+width_delta+width*scale)] try: depth_mean = np.nanmedian(\ cv_depth_bounding_box[np.nonzero(cv_depth_bounding_box)]) real_x = (x + width / 2 - self.cx) * (depth_mean) / self.f real_y = (y + height / 2 - self.cy) * (depth_mean) / self.f if (boxes.Class == 'person'): spencer_detected_person = DetectedPerson() spencer_detected_person.modality = spencer_detected_person.MODALITY_GENERIC_RGBD spencer_detected_person.confidence = 1 spencer_detected_person.pose.pose.position.x = real_x spencer_detected_person.pose.pose.position.y = real_y spencer_detected_person.pose.pose.position.z = depth_mean spencer_detected_person.pose.pose.orientation.w = 1.0 LARGE_VARIANCE = 999999999 pose_variance = 0.05 spencer_detected_person.pose.covariance[ 0 * 6 + 0] = pose_variance spencer_detected_person.pose.covariance[ 1 * 6 + 1] = pose_variance spencer_detected_person.pose.covariance[ 2 * 6 + 2] = pose_variance spencer_detected_person.pose.covariance[ 3 * 6 + 3] = LARGE_VARIANCE spencer_detected_person.pose.covariance[ 4 * 6 + 4] = LARGE_VARIANCE spencer_detected_person.pose.covariance[ 5 * 6 + 5] = LARGE_VARIANCE spencer_detected_person.detection_id = self.current_detection_id self.current_detection_id = self.current_detection_id + self.detection_id_increment spencer_detected_persons.detections.append( spencer_detected_person) #self.pub_pic.publish(self.bridge.cv2_to_imgmsg(cv_color_debug, "rgb8")) except Exception as e: print e self.new_msgs = True self.last_time = rospy.get_rostime().secs self.spencer_detect = spencer_detected_persons spencer_detect_odom = DetectedPersonsOdom() spencer_detect_odom.DetectedPersons = spencer_detected_persons spencer_detect_odom.Transform = t #self.pub_spencer.publish(spencer_detected_persons) self.pub_spencer_odom.publish(spencer_detect_odom)
def __init__(self): rospy.init_node('yolo_detection_to_depth') rospy.on_shutdown(self.shutdown) # Displacement of the depth camera back from the front of the robot in meters self.camera_displacement = 0.612 self.display_depth_image = False self.base_link = rospy.get_param('~base_link', 'sibot/base_link') self.depth_frame = rospy.get_param('~depth_frame', 'sibot/camera_rgb_optical_frame') self.tfBuffer = tf2_ros.Buffer() tf_listener = tf2_ros.TransformListener(self.tfBuffer) # Keep a list of peope as a People message self.people = People() self.people.header.frame_id = self.base_link # Publish results as people_msgs/People messages self.people_pub = rospy.Publisher("people", People, queue_size=5) # Keep a list of peope poses as PoseArray message self.people_poses = PoseArray() self.people_poses.header.frame_id = self.base_link # Publish detections as a pose array self.people_poses_pub = rospy.Publisher("people_poses", PoseArray, queue_size=5) # Keep a list of people 3D bounding boxes as a BoundingBoxArray message self.people_bounding_boxes = BoundingBoxArray() self.people_bounding_boxes.header.frame_id = self.depth_frame # Publish detections as a JSK BoundingBoxArray for viewing in RViz self.people_bounding_boxes_pub = rospy.Publisher( "people_bounding_boxes", BoundingBoxArray, queue_size=5) # Keep a list of detected people using the Spencer DetectedPersons message type so we can display nice graphcis in RViz self.detected_persons = DetectedPersons() self.detected_persons.header.frame_id = self.base_link # Publish detections as a DetectedPersons array for viewing in RViz self.detected_persons_pub = rospy.Publisher("detected_persons", DetectedPersons, queue_size=5) # Publish person pointclouds #self.people_cloud_pub = rospy.Publisher("people_clouds", PointCloud2, queue_size=5) # Use message filters to time synchronize the bboxes and the pointcloud self.bbox_sub = message_filters.Subscriber("yolo_bboxes", bbox_array_stamped, queue_size=10) self.pointcloud_sub = message_filters.Subscriber('point_cloud', PointCloud2, queue_size=10) # Register the synchronized callback self.time_sync = message_filters.ApproximateTimeSynchronizer( [self.bbox_sub, self.pointcloud_sub], 10, 2) self.time_sync.registerCallback(self.get_bbox_cog) rospy.loginfo("Getting detections from YOLO...")