def createTrackAndDetection( tracks, detections, track_id, detection_id, angle, radius ) : trackedPerson = TrackedPerson() trackedPerson.track_id = track_id if detection_id >= 0 : trackedPerson.detection_id = detection_id trackedPerson.is_occluded = False else : trackedPerson.is_occluded = True trackedPerson.age = rospy.Time.now() - startTime setPoseAndTwistFromAngle(trackedPerson.pose, trackedPerson.twist, angle, radius) tracks.append(trackedPerson) if detection_id >= 0: detectedPerson = DetectedPerson() detectedPerson.detection_id = detection_id detectedPerson.confidence = random.random() detectedPerson.pose = copy.deepcopy(trackedPerson.pose) detectedPerson.pose.pose.position.x += random.random() * 0.5 - 0.25 # introduce some noise on observation position detectedPerson.pose.pose.position.y += random.random() * 0.5 - 0.25 detections.append(detectedPerson) return
def bayes2spencer(self, bayes_msg): spencer_msg = TrackedPersons() spencer_msg.header = bayes_msg.header for i, pose in enumerate(bayes_msg.poses): track = TrackedPerson() track.track_id = self.string2uint64(bayes_msg.uuids[i]) # PoseWithCovariance track.pose.pose.position = pose.position track.pose.pose.orientation = pose.orientation # TwistWithCovariance track.twist.twist.linear = bayes_msg.velocities[i] # Data not in bayes. Not sure about these ... track.pose.covariance = (np.random.normal(0.3, 0.1) * np.identity(6)).flatten().tolist() track.twist.covariance = (np.random.normal(0.3, 0.1) * np.identity(6)).flatten().tolist() # we assume 0 here # track.twist.twist.angular track.is_occluded = False track.is_matched = False track.detection_id = self.string2uint64(bayes_msg.uuids[i]) track.age = 0 spencer_msg.tracks.append(track) return spencer_msg
def newSegmentationReceived(self, laserscan, laserscanSegmentation): currentStamp = laserscanSegmentation.header.stamp pointCount = len(laserscan.ranges) cartesianCoordinates = [] # Required for velocity calculations if self._lastDataStamp is None: self._lastDataStamp = laserscanSegmentation.header.stamp # Build lookup of cartesian coordinates per laser point for pointIndex in xrange(0, pointCount): cartesianCoordinates.append( self.calculateCartesianCoordinates(laserscan, pointIndex)) # For each labelled segment, create and append one TrackedPerson and DetectedPerson message trackedPersons = TrackedPersons(header=laserscanSegmentation.header) detectedPersons = DetectedPersons(header=laserscanSegmentation.header) for segment in laserscanSegmentation.segments: # Calculate centroid of tracked person centroid = numpy.array([0.0, 0.0, 0.0]) for pointIndex in segment.measurement_indices: centroid += cartesianCoordinates[pointIndex] centroid /= float(len(segment.measurement_indices)) # Lookup previous centroid (for velocity/twist calculation), assume zero velocity at track initialization if not segment.label in self._previousCentroidLookup: self._previousCentroidLookup[ segment.label] = collections.deque() # Maintain centroid history centroidHistory = self._previousCentroidLookup[segment.label] while len(centroidHistory) > 20: centroidHistory.popleft() # Calculate average velocity over past few frames dt = 0 velocity = accumulatedVelocity = numpy.array([0.0, 0.0, 0.0]) if centroidHistory: previousCentroid = centroid previousStamp = currentStamp for historyStamp, historyCentroid in reversed(centroidHistory): accumulatedVelocity += previousCentroid - historyCentroid dt += abs((previousStamp - historyStamp).to_sec()) previousCentroid = historyCentroid previousStamp = historyStamp velocity = accumulatedVelocity / dt centroidHistory.append((currentStamp, centroid)) # Remember age of track if not segment.label in self._firstTrackEncounterLookup: self._firstTrackEncounterLookup[segment.label] = currentStamp # Initialize TrackedPerson message trackedPerson = TrackedPerson() trackedPerson.track_id = segment.label trackedPerson.age = currentStamp - self._firstTrackEncounterLookup[ segment.label] trackedPerson.detection_id = self._detectionIdCounter trackedPerson.is_occluded = False # Set position LARGE_VARIANCE = 99999999 trackedPerson.pose.pose.position.x = centroid[0] trackedPerson.pose.pose.position.y = centroid[1] trackedPerson.pose.pose.position.z = centroid[2] # Set orientation if dt > 0: yaw = math.atan2(velocity[1], velocity[0]) quaternion = tf.transformations.quaternion_from_euler( 0, 0, yaw) trackedPerson.pose.pose.orientation = Quaternion( x=quaternion[0], y=quaternion[1], z=quaternion[2], w=quaternion[3]) trackedPerson.pose.covariance[ 2 * 6 + 2] = trackedPerson.pose.covariance[ 3 * 6 + 3] = trackedPerson.pose.covariance[ 4 * 6 + 4] = LARGE_VARIANCE # z pos, roll, pitch # Set velocity if dt > 0: trackedPerson.twist.twist.linear.x = velocity[0] trackedPerson.twist.twist.linear.y = velocity[1] trackedPerson.twist.twist.linear.z = velocity[2] trackedPerson.twist.covariance[ 2 * 6 + 2] = trackedPerson.twist.covariance[ 3 * 6 + 3] = trackedPerson.twist.covariance[ 4 * 6 + 4] = trackedPerson.twist.covariance[ 5 * 6 + 5] = LARGE_VARIANCE # linear z, angular x, y, z # Append to list of tracked persons trackedPersons.tracks.append(trackedPerson) # Initialize DetectedPerson message by copying data from TrackedPerson detectedPerson = DetectedPerson() detectedPerson.detection_id = trackedPerson.detection_id detectedPerson.confidence = 1.0 detectedPerson.pose = copy.deepcopy(trackedPerson.pose) detectedPerson.pose.pose.orientation = Quaternion() for i in xrange(0, 2): detectedPerson.pose.covariance[i * 6 + i] = 0.17 * 0.17 detectedPerson.pose.covariance[5 * 6 + 5] = LARGE_VARIANCE # yaw detectedPersons.detections.append(detectedPerson) self._detectionIdCounter += 1 # Publish tracked persons self.trackedPersonsPublisher.publish(trackedPersons) self.detectedPersonsPublisher.publish(detectedPersons) self._lastDataStamp = laserscanSegmentation.header.stamp
def newSegmentationReceived(self, laserscan, laserscanSegmentation): currentStamp = laserscanSegmentation.header.stamp pointCount = len(laserscan.ranges) cartesianCoordinates = [] # Required for velocity calculations if self._lastDataStamp is None: self._lastDataStamp = laserscanSegmentation.header.stamp # Build lookup of cartesian coordinates per laser point for pointIndex in xrange(0, pointCount): cartesianCoordinates.append(self.calculateCartesianCoordinates(laserscan, pointIndex)) # For each labelled segment, create and append one TrackedPerson and DetectedPerson message trackedPersons = TrackedPersons(header=laserscanSegmentation.header) detectedPersons = DetectedPersons(header=laserscanSegmentation.header) for segment in laserscanSegmentation.segments: # Calculate centroid of tracked person centroid = numpy.array([0.0, 0.0, 0.0]) for pointIndex in segment.measurement_indices: centroid += cartesianCoordinates[pointIndex] centroid /= float(len(segment.measurement_indices)) # Lookup previous centroid (for velocity/twist calculation), assume zero velocity at track initialization if not segment.label in self._previousCentroidLookup: self._previousCentroidLookup[segment.label] = collections.deque() # Maintain centroid history centroidHistory = self._previousCentroidLookup[segment.label] while len(centroidHistory) > 20: centroidHistory.popleft() # Calculate average velocity over past few frames dt = 0 velocity = accumulatedVelocity = numpy.array([0.0, 0.0, 0.0]) if centroidHistory: previousCentroid = centroid previousStamp = currentStamp for historyStamp, historyCentroid in reversed(centroidHistory): accumulatedVelocity += previousCentroid - historyCentroid dt += abs((previousStamp - historyStamp).to_sec()) previousCentroid = historyCentroid previousStamp = historyStamp velocity = accumulatedVelocity / dt centroidHistory.append((currentStamp, centroid)) # Remember age of track if not segment.label in self._firstTrackEncounterLookup: self._firstTrackEncounterLookup[segment.label] = currentStamp # Initialize TrackedPerson message trackedPerson = TrackedPerson() trackedPerson.track_id = segment.label trackedPerson.age = currentStamp - self._firstTrackEncounterLookup[segment.label] trackedPerson.detection_id = self._detectionIdCounter trackedPerson.is_occluded = False # Set position LARGE_VARIANCE = 99999999 trackedPerson.pose.pose.position.x = centroid[0] trackedPerson.pose.pose.position.y = centroid[1] trackedPerson.pose.pose.position.z = centroid[2] # Set orientation if dt > 0: yaw = math.atan2(velocity[1], velocity[0]) quaternion = tf.transformations.quaternion_from_euler(0, 0, yaw) trackedPerson.pose.pose.orientation = Quaternion( x=quaternion[0], y=quaternion[1], z=quaternion[2], w=quaternion[3] ) trackedPerson.pose.covariance[2 * 6 + 2] = trackedPerson.pose.covariance[ 3 * 6 + 3 ] = trackedPerson.pose.covariance[ 4 * 6 + 4 ] = LARGE_VARIANCE # z pos, roll, pitch # Set velocity if dt > 0: trackedPerson.twist.twist.linear.x = velocity[0] trackedPerson.twist.twist.linear.y = velocity[1] trackedPerson.twist.twist.linear.z = velocity[2] trackedPerson.twist.covariance[2 * 6 + 2] = trackedPerson.twist.covariance[ 3 * 6 + 3 ] = trackedPerson.twist.covariance[4 * 6 + 4] = trackedPerson.twist.covariance[ 5 * 6 + 5 ] = LARGE_VARIANCE # linear z, angular x, y, z # Append to list of tracked persons trackedPersons.tracks.append(trackedPerson) # Initialize DetectedPerson message by copying data from TrackedPerson detectedPerson = DetectedPerson() detectedPerson.detection_id = trackedPerson.detection_id detectedPerson.confidence = 1.0 detectedPerson.pose = copy.deepcopy(trackedPerson.pose) detectedPerson.pose.pose.orientation = Quaternion() for i in xrange(0, 2): detectedPerson.pose.covariance[i * 6 + i] = 0.17 * 0.17 detectedPerson.pose.covariance[5 * 6 + 5] = LARGE_VARIANCE # yaw detectedPersons.detections.append(detectedPerson) self._detectionIdCounter += 1 # Publish tracked persons self.trackedPersonsPublisher.publish(trackedPersons) self.detectedPersonsPublisher.publish(detectedPersons) self._lastDataStamp = laserscanSegmentation.header.stamp
def createTrackAndDetection( tracks, detections, track_id, angle, radius, moving = True) : trackedPerson = TrackedPerson() global trackDictionary, detectionCounter trackDictionaryEntry = None if not track_id in trackDictionary: trackDictionaryEntry = TrackDictionaryEntry() trackDictionary[track_id] = trackDictionaryEntry else: trackDictionaryEntry = trackDictionary[track_id] # Generate detection ID detection_id = detectionCounter detectionCounter += 1 # # Simulate occlusions # occlusionProbability = 0.02 occlusionMinDuration = 1.0 / updateRateHz # at least 1 frame occlusionMaxDuration = 15.0 / updateRateHz # at most 15 frames # Currently not occluded? if trackDictionaryEntry.remainingOcclusionTime <= 0: if random.random() < occlusionProbability: trackDictionaryEntry.remainingOcclusionTime = random.random() * (occlusionMaxDuration - occlusionMinDuration) + occlusionMinDuration # Is the track occluded? if trackDictionaryEntry.remainingOcclusionTime <= 0: trackedPerson.detection_id = detection_id trackedPerson.is_occluded = False else : trackedPerson.is_occluded = True trackDictionaryEntry.remainingOcclusionTime -= 1.0 / updateRateHz # # Simulate track ID switches # idSwitchProbability = 0.001 if random.random() < idSwitchProbability: trackDictionaryEntry.idShift += 1 trackDictionaryEntry.lastIdShiftAt = rospy.Time.now() idShiftAmount = 66 # better don't change this as other mock components might rely upon this to consistently fake their data across ID switches trackedPerson.track_id = track_id + trackDictionaryEntry.idShift * idShiftAmount trackedPerson.age = rospy.Time.now() - trackDictionaryEntry.lastIdShiftAt # Determine track position setPoseAndTwistFromAngle(trackedPerson.pose, trackedPerson.twist, angle, radius, moving) # Track position noise trackedPerson.pose.pose.position.x += random.random() * 0.1 - 0.05 trackedPerson.pose.pose.position.y += random.random() * 0.1 - 0.05 tracks.append(trackedPerson) if not trackedPerson.is_occluded: detectedPerson = DetectedPerson() detectedPerson.detection_id = detection_id detectedPerson.confidence = random.random() * 0.5 + 0.5 detectedPerson.pose = copy.deepcopy(trackedPerson.pose) detectedPerson.pose.pose.position.x += random.random() * 0.5 - 0.25 # introduce some noise on detected position detectedPerson.pose.pose.position.y += random.random() * 0.5 - 0.25 detections.append(detectedPerson) return
def createTrackedPersons(self, timestamp): trackedPersons = TrackedPersons() trackedPersons.header.stamp = timestamp trackedPersons.header.frame_id = self.trackingFrame for trackKey, track in self.database.getTracks().iteritems(): waypoints = self.database.getWaypoints(track) if waypoints: firstWaypoint = waypoints[0] lastWaypoint = waypoints[-1] # Check if we are before the first or past the last waypoint beforeFirstWaypoint = timestamp.to_sec( ) < firstWaypoint["timestamp"] behindLastWaypoint = timestamp.to_sec( ) > lastWaypoint["timestamp"] if not beforeFirstWaypoint and not behindLastWaypoint: trackedPerson = TrackedPerson() trackedPerson.track_id = self.database.getTrackId(trackKey) trackedPerson.is_occluded = False trackedPerson.age = rospy.Time(firstWaypoint["timestamp"]) # Find adjacent waypoints for linear interpolation of tracked person's position previousWaypoint = nextWaypoint = None for waypoint in waypoints: previousWaypoint = nextWaypoint nextWaypoint = waypoint if nextWaypoint["timestamp"] > timestamp.to_sec(): break try: # If there is only one waypoint for this track, previousWaypoint will be None nextPosition = self.transformer.toCommonFrame( nextWaypoint, self.trackingFrame) nextTimestamp = nextWaypoint["timestamp"] # Determine waypoint segment duration and distance if previousWaypoint is not None: previousPosition = self.transformer.toCommonFrame( previousWaypoint, self.trackingFrame) previousTimestamp = previousWaypoint["timestamp"] segmentDuration = nextTimestamp - previousTimestamp alpha = (timestamp.to_sec() - previousTimestamp) / segmentDuration diffVector = nextPosition - previousPosition else: previousPosition = nextPosition segmentDuration = 1.0 alpha = 0.0 diffVector = numpy.array([0, 0]) except tf.Exception: continue # Linear interpolation interpolatedPosition = previousPosition + diffVector * alpha velocity = diffVector / segmentDuration trackedPerson.pose.pose.position.x, trackedPerson.pose.pose.position.y = interpolatedPosition trackedPerson.pose.covariance[ 2 * 6 + 2] = trackedPerson.pose.covariance[ 3 * 6 + 3] = trackedPerson.pose.covariance[4 * 6 + 4] = 99999999 yaw = math.atan2(velocity[1], velocity[0]) quaternion = tf.transformations.quaternion_from_euler( 0, 0, yaw) trackedPerson.pose.pose.orientation = Quaternion( x=quaternion[0], y=quaternion[1], z=quaternion[2], w=quaternion[3]) trackedPerson.twist.twist.linear.x, trackedPerson.twist.twist.linear.y = velocity trackedPersons.tracks.append(trackedPerson) return trackedPersons
def publish_obstacles(self): with self.lock: if self.transport_data is None: return # non-leg obstacles timestamp, xx, yy, clusters, is_legs, cogs, radii, tf_rob_in_fix = self.transport_data # tracks track_ids = [] tracks_latest_pos, tracks_color = [], [] tracks_in_frame, tracks_velocities = [], [] tracks_radii = [] for trackid in self.tracker.active_tracks: track = self.tracker.active_tracks[trackid] xy = np.array(track.pos_history[-1]) is_track_in_frame = True if trackid in self.tracker.latest_matches: color = (0.,1.,0.,1.) # green elif trackid in self.tracker.new_tracks: color = (0.,0.,1.,1.) # blue else: color = (0.7, 0.7, 0.7, 1.) is_track_in_frame = False track_ids.append(trackid) tracks_latest_pos.append(xy) tracks_color.append(color) tracks_in_frame.append(is_track_in_frame) tracks_velocities.append(track.estimate_velocity()) tracks_radii.append(track.avg_radius()) # publish trackedpersons from spencer_tracking_msgs.msg import TrackedPersons, TrackedPerson pub = rospy.Publisher("/tracked_persons", TrackedPersons, queue_size=1) tp_msg = TrackedPersons() tp_msg.header.frame_id = self.kFixedFrame tp_msg.header.stamp = timestamp for trackid, xy, in_frame, vel, radius in zip(track_ids, tracks_latest_pos, tracks_in_frame, tracks_velocities, tracks_radii): # if not in_frame: # continue tp = TrackedPerson() tp.track_id = trackid tp.is_occluded = False tp.is_matched = in_frame tp.detection_id = trackid tp.pose.pose.position.x = xy[0] tp.pose.pose.position.y = xy[1] heading_angle = np.arctan2(vel[1], vel[0]) # guess heading from velocity from geometry_msgs.msg import Quaternion tp.pose.pose.orientation = Quaternion( *tf.transformations.quaternion_from_euler(0, 0, heading_angle)) tp.twist.twist.linear.x = vel[0] tp.twist.twist.linear.y = vel[1] tp.twist.twist.angular.z = 0 # unknown tp_msg.tracks.append(tp) pub.publish(tp_msg) pub = rospy.Publisher('/obstacles', ObstacleArrayMsg, queue_size=1) obstacles_msg = ObstacleArrayMsg() obstacles_msg.header.stamp = timestamp obstacles_msg.header.frame_id = self.kFixedFrame for trackid, xy, in_frame, vel, radius in zip(track_ids, tracks_latest_pos, tracks_in_frame, tracks_velocities, tracks_radii): if not in_frame: continue # Add point obstacle obst = ObstacleMsg() obst.id = trackid obst.polygon.points = [Point32()] obst.polygon.points[0].x = xy[0] obst.polygon.points[0].y = xy[1] obst.polygon.points[0].z = 0 obst.radius = radius yaw = np.arctan2(vel[1], vel[0]) q = tf.transformations.quaternion_from_euler(0,0,yaw) obst.orientation = Quaternion(*q) obst.velocities.twist.linear.x = vel[0] obst.velocities.twist.linear.y = vel[1] obst.velocities.twist.linear.z = 0 obst.velocities.twist.angular.x = 0 obst.velocities.twist.angular.y = 0 obst.velocities.twist.angular.z = 0 obstacles_msg.obstacles.append(obst) pub.publish(obstacles_msg) pub = rospy.Publisher('/close_nonleg_obstacles', ObstacleArrayMsg, queue_size=1) MAX_DIST_STATIC_CLUSTERS_M = 3. cogs_in_fix = [] for i in range(len(cogs)): cogs_in_fix.append(apply_tf(cogs[i], Pose2D(tf_rob_in_fix))) obstacles_msg = ObstacleArrayMsg() obstacles_msg.header.stamp = timestamp obstacles_msg.header.frame_id = self.kFixedFrame for c, cog_in_fix, cog_in_rob, r, is_leg in zip(clusters, cogs_in_fix, cogs, radii, is_legs): if np.linalg.norm(cog_in_rob) < self.kRobotRadius: continue if len(c) < self.kMinClusterSize: continue # leg obstacles are already published in the tracked obstacles topic if is_leg: continue # close obstacles only if np.linalg.norm(cog_in_rob) > MAX_DIST_STATIC_CLUSTERS_M: continue # Add point obstacle obst = ObstacleMsg() obst.id = 0 obst.polygon.points = [Point32()] obst.polygon.points[0].x = cog_in_fix[0] obst.polygon.points[0].y = cog_in_fix[1] obst.polygon.points[0].z = 0 obst.radius = r yaw = 0 q = tf.transformations.quaternion_from_euler(0,0,yaw) from geometry_msgs.msg import Quaternion obst.orientation = Quaternion(*q) obst.velocities.twist.linear.x = 0 obst.velocities.twist.linear.y = 0 obst.velocities.twist.linear.z = 0 obst.velocities.twist.angular.x = 0 obst.velocities.twist.angular.y = 0 obst.velocities.twist.angular.z = 0 obstacles_msg.obstacles.append(obst) pub.publish(obstacles_msg) pub = rospy.Publisher('/obstacle_markers', MarkerArray, queue_size=1) # delete all markers ma = MarkerArray() mk = Marker() mk.header.frame_id = self.kFixedFrame mk.ns = "obstacles" mk.id = 0 mk.type = 0 # ARROW mk.action = 3 # deleteall ma.markers.append(mk) pub.publish(ma) # publish tracks ma = MarkerArray() id_ = 0 # track endpoint for trackid, xy, in_frame, vel in zip(track_ids, tracks_latest_pos, tracks_in_frame, tracks_velocities): if not in_frame: continue normvel = np.linalg.norm(vel) if normvel == 0: continue mk = Marker() mk.header.frame_id = self.kFixedFrame mk.ns = "tracks" mk.id = trackid mk.type = 0 # ARROW mk.action = 0 mk.scale.x = np.linalg.norm(vel) mk.scale.y = 0.1 mk.scale.z = 0.1 mk.color.r = color[0] mk.color.g = color[1] mk.color.b = color[2] mk.color.a = color[3] mk.frame_locked = True mk.pose.position.x = xy[0] mk.pose.position.y = xy[1] mk.pose.position.z = 0.03 yaw = np.arctan2(vel[1], vel[0]) q = tf.transformations.quaternion_from_euler(0,0,yaw) mk.pose.orientation = Quaternion(*q) ma.markers.append(mk) pub.publish(ma)