def createTrackedPerson(track_id, x, y, theta): trackedPerson = TrackedPerson() theta = radians(theta) + pi / 2.0 trackedPerson.track_id = track_id quaternion = tf.transformations.quaternion_from_euler(0, 0, theta) trackedPerson.pose.pose.position.x = x trackedPerson.pose.pose.position.y = y trackedPerson.pose.pose.orientation.x = quaternion[0] trackedPerson.pose.pose.orientation.y = quaternion[1] trackedPerson.pose.pose.orientation.z = quaternion[2] trackedPerson.pose.pose.orientation.w = quaternion[3] trackedPerson.pose.covariance[0 + 0 * 6] = 0.001 # x trackedPerson.pose.covariance[1 + 1 * 6] = 0.001 # y trackedPerson.pose.covariance[2 + 2 * 6] = 999999 # z trackedPerson.pose.covariance[3 + 3 * 6] = 999999 # x rotation trackedPerson.pose.covariance[4 + 5 * 6] = 999999 # y rotation trackedPerson.pose.covariance[4 + 5 * 6] = 999999 # z rotation trackedPerson.twist.twist.linear.x = cos(theta) trackedPerson.twist.twist.linear.y = sin(theta) for i in range(0, 3): trackedPerson.twist.covariance[i + i * 6] = 1.0 # linear velocity for i in range(3, 6): trackedPerson.twist.covariance[i + i * 6] = float( "inf") # rotational velocity return trackedPerson
def detectedPersonsCallback(trackAssociation, detectedPersons, syncronized_person_pub): age = rospy.Time.now() - detectedPersons.header.stamp output = "New detections with track association available (age of detections = %.2f sec)! Detection to track association: " % age.to_sec( ) syncronizedPersons = TrackedPersons() syncronizedPersons.header = detectedPersons.header if detectedPersons.detections: for detectedPerson in detectedPersons.detections: # The TrackSynchronizer invoking this callback guarantees that the detectedPersons message is buffered until a # track association is available for these detections (by comparing message timestamps of tracks and detections). detectionId = detectedPerson.detection_id trackId = trackAssociation.lookupTrackId( detectionId) # <-- this is what this is all about output += "\n[det %d --> track %s]" % (detectionId, str(trackId)) if trackId is not None: syncronizedPerson = TrackedPerson() syncronizedPerson.track_id = trackId syncronizedPerson.detection_id = detectionId syncronizedPersons.tracks.append(syncronizedPerson) rospy.loginfo(output) if len(syncronizedPersons.tracks) > 0: syncronized_person_pub.publish(syncronizedPersons) else: output += "Empty set of detections!"
def createTrackedPerson(track_id, x, y, theta): trackedPerson = TrackedPerson() theta = radians(theta) + pi/2.0 trackedPerson.track_id = track_id quaternion = tf.transformations.quaternion_from_euler(0, 0, theta) trackedPerson.pose.pose.position.x = x trackedPerson.pose.pose.position.y = y trackedPerson.pose.pose.orientation.x = quaternion[0] trackedPerson.pose.pose.orientation.y = quaternion[1] trackedPerson.pose.pose.orientation.z = quaternion[2] trackedPerson.pose.pose.orientation.w = quaternion[3] trackedPerson.pose.covariance[0 + 0 * 6] = 0.001 # x trackedPerson.pose.covariance[1 + 1 * 6] = 0.001 # y trackedPerson.pose.covariance[2 + 2 * 6] = 999999 # z trackedPerson.pose.covariance[3 + 3 * 6] = 999999 # x rotation trackedPerson.pose.covariance[4 + 5 * 6] = 999999 # y rotation trackedPerson.pose.covariance[4 + 5 * 6] = 999999 # z rotation trackedPerson.twist.twist.linear.x = cos(theta) trackedPerson.twist.twist.linear.y = sin(theta) for i in range(0, 3): trackedPerson.twist.covariance[i + i * 6] = 1.0 # linear velocity for i in range(3, 6): trackedPerson.twist.covariance[i + i * 6] = float("inf") # rotational velocity return trackedPerson
def bayes2spencer(self, bayes_msg): spencer_msg = TrackedPersons() spencer_msg.header = bayes_msg.header for i, pose in enumerate(bayes_msg.poses): track = TrackedPerson() track.track_id = self.string2uint64(bayes_msg.uuids[i]) # PoseWithCovariance track.pose.pose.position = pose.position track.pose.pose.orientation = pose.orientation # TwistWithCovariance track.twist.twist.linear = bayes_msg.velocities[i] # Data not in bayes. Not sure about these ... track.pose.covariance = (np.random.normal(0.3, 0.1) * np.identity(6)).flatten().tolist() track.twist.covariance = (np.random.normal(0.3, 0.1) * np.identity(6)).flatten().tolist() # we assume 0 here # track.twist.twist.angular track.is_occluded = False track.is_matched = False track.detection_id = self.string2uint64(bayes_msg.uuids[i]) track.age = 0 spencer_msg.tracks.append(track) return spencer_msg
def createTrackAndDetection( tracks, detections, track_id, detection_id, angle, radius ) : trackedPerson = TrackedPerson() trackedPerson.track_id = track_id if detection_id >= 0 : trackedPerson.detection_id = detection_id trackedPerson.is_occluded = False else : trackedPerson.is_occluded = True trackedPerson.age = rospy.Time.now() - startTime setPoseAndTwistFromAngle(trackedPerson.pose, trackedPerson.twist, angle, radius) tracks.append(trackedPerson) if detection_id >= 0: detectedPerson = DetectedPerson() detectedPerson.detection_id = detection_id detectedPerson.confidence = random.random() detectedPerson.pose = copy.deepcopy(trackedPerson.pose) detectedPerson.pose.pose.position.x += random.random() * 0.5 - 0.25 # introduce some noise on observation position detectedPerson.pose.pose.position.y += random.random() * 0.5 - 0.25 detections.append(detectedPerson) return
def do_3d_tracking(self, detections_2d, detections_3d): start = time.time() #rospy.loginfo("Tracking frame") # convert_detections boxes_2d = [] boxes_3d = [] valid_3d = [] features_2d = [] features_3d = [] dets_2d = sorted(detections_2d.detection2d_with_features, key=lambda x: x.frame_det_id) dets_3d = sorted(detections_3d.detection3d_with_features, key=lambda x: x.frame_det_id) i, j = 0, 0 while i < len(dets_2d) and j < len(dets_3d): det_2d = dets_2d[i] det_3d = dets_3d[j] if det_2d.frame_det_id == det_3d.frame_det_id: i += 1 j += 1 valid_3d.append(det_3d.valid) boxes_2d.append( np.array([ det_2d.x1, det_2d.y1, det_2d.x2, det_2d.y2, 1, -1, -1 ])) features_2d.append(torch.Tensor(det_2d.feature).to('cuda:0')) if det_3d.valid: boxes_3d.append( np.array([ det_3d.x, det_3d.y, det_3d.z, det_3d.l, det_3d.h, det_3d.w, det_3d.theta ])) features_3d.append( torch.Tensor(det_3d.feature).to('cuda:0')) else: boxes_3d.append(None) features_3d.append(None) elif det_2d.frame_det_id < det_3d.frame_det_id: i += 1 else: j += 1 if not boxes_3d: boxes_3d = None features_3d, features_2d = combine_features( features_2d, features_3d, valid_3d, self.combination_model, depth_weight=self.depth_weight) detections = convert_detections(boxes_2d, features_3d, features_2d, boxes_3d) self.tracker.predict() self.tracker.update(None, detections) tracked_array = TrackedPersons() tracked_array.header.stamp = detections_3d.header.stamp tracked_array.header.frame_id = 'occam' for track in self.tracker.tracks: if not track.is_confirmed(): continue #print('Confirmed track!') pose_msg = Pose() tracked_person_msg = TrackedPerson() tracked_person_msg.header.stamp = detections_3d.header.stamp tracked_person_msg.header.frame_id = 'occam' tracked_person_msg.track_id = track.track_id if track.time_since_update < 2: tracked_person_msg.is_matched = True else: tracked_person_msg.is_matched = False bbox = track.to_tlwh3d() covariance = track.get_cov().reshape(-1).tolist() pose_msg.position.x = bbox[0] pose_msg.position.y = bbox[1] - bbox[4] / 2 pose_msg.position.z = bbox[2] pose_msg = PoseWithCovariance(pose=pose_msg, covariance=covariance) tracked_person_msg.pose = pose_msg tracked_array.tracks.append(tracked_person_msg) self.tracker_output_pub.publish(tracked_array)
def newSegmentationReceived(self, laserscan, laserscanSegmentation): currentStamp = laserscanSegmentation.header.stamp pointCount = len(laserscan.ranges) cartesianCoordinates = [] # Required for velocity calculations if self._lastDataStamp is None: self._lastDataStamp = laserscanSegmentation.header.stamp # Build lookup of cartesian coordinates per laser point for pointIndex in xrange(0, pointCount): cartesianCoordinates.append( self.calculateCartesianCoordinates(laserscan, pointIndex)) # For each labelled segment, create and append one TrackedPerson and DetectedPerson message trackedPersons = TrackedPersons(header=laserscanSegmentation.header) detectedPersons = DetectedPersons(header=laserscanSegmentation.header) for segment in laserscanSegmentation.segments: # Calculate centroid of tracked person centroid = numpy.array([0.0, 0.0, 0.0]) for pointIndex in segment.measurement_indices: centroid += cartesianCoordinates[pointIndex] centroid /= float(len(segment.measurement_indices)) # Lookup previous centroid (for velocity/twist calculation), assume zero velocity at track initialization if not segment.label in self._previousCentroidLookup: self._previousCentroidLookup[ segment.label] = collections.deque() # Maintain centroid history centroidHistory = self._previousCentroidLookup[segment.label] while len(centroidHistory) > 20: centroidHistory.popleft() # Calculate average velocity over past few frames dt = 0 velocity = accumulatedVelocity = numpy.array([0.0, 0.0, 0.0]) if centroidHistory: previousCentroid = centroid previousStamp = currentStamp for historyStamp, historyCentroid in reversed(centroidHistory): accumulatedVelocity += previousCentroid - historyCentroid dt += abs((previousStamp - historyStamp).to_sec()) previousCentroid = historyCentroid previousStamp = historyStamp velocity = accumulatedVelocity / dt centroidHistory.append((currentStamp, centroid)) # Remember age of track if not segment.label in self._firstTrackEncounterLookup: self._firstTrackEncounterLookup[segment.label] = currentStamp # Initialize TrackedPerson message trackedPerson = TrackedPerson() trackedPerson.track_id = segment.label trackedPerson.age = currentStamp - self._firstTrackEncounterLookup[ segment.label] trackedPerson.detection_id = self._detectionIdCounter trackedPerson.is_occluded = False # Set position LARGE_VARIANCE = 99999999 trackedPerson.pose.pose.position.x = centroid[0] trackedPerson.pose.pose.position.y = centroid[1] trackedPerson.pose.pose.position.z = centroid[2] # Set orientation if dt > 0: yaw = math.atan2(velocity[1], velocity[0]) quaternion = tf.transformations.quaternion_from_euler( 0, 0, yaw) trackedPerson.pose.pose.orientation = Quaternion( x=quaternion[0], y=quaternion[1], z=quaternion[2], w=quaternion[3]) trackedPerson.pose.covariance[ 2 * 6 + 2] = trackedPerson.pose.covariance[ 3 * 6 + 3] = trackedPerson.pose.covariance[ 4 * 6 + 4] = LARGE_VARIANCE # z pos, roll, pitch # Set velocity if dt > 0: trackedPerson.twist.twist.linear.x = velocity[0] trackedPerson.twist.twist.linear.y = velocity[1] trackedPerson.twist.twist.linear.z = velocity[2] trackedPerson.twist.covariance[ 2 * 6 + 2] = trackedPerson.twist.covariance[ 3 * 6 + 3] = trackedPerson.twist.covariance[ 4 * 6 + 4] = trackedPerson.twist.covariance[ 5 * 6 + 5] = LARGE_VARIANCE # linear z, angular x, y, z # Append to list of tracked persons trackedPersons.tracks.append(trackedPerson) # Initialize DetectedPerson message by copying data from TrackedPerson detectedPerson = DetectedPerson() detectedPerson.detection_id = trackedPerson.detection_id detectedPerson.confidence = 1.0 detectedPerson.pose = copy.deepcopy(trackedPerson.pose) detectedPerson.pose.pose.orientation = Quaternion() for i in xrange(0, 2): detectedPerson.pose.covariance[i * 6 + i] = 0.17 * 0.17 detectedPerson.pose.covariance[5 * 6 + 5] = LARGE_VARIANCE # yaw detectedPersons.detections.append(detectedPerson) self._detectionIdCounter += 1 # Publish tracked persons self.trackedPersonsPublisher.publish(trackedPersons) self.detectedPersonsPublisher.publish(detectedPersons) self._lastDataStamp = laserscanSegmentation.header.stamp
def newSegmentationReceived(self, laserscan, laserscanSegmentation): currentStamp = laserscanSegmentation.header.stamp pointCount = len(laserscan.ranges) cartesianCoordinates = [] # Required for velocity calculations if self._lastDataStamp is None: self._lastDataStamp = laserscanSegmentation.header.stamp # Build lookup of cartesian coordinates per laser point for pointIndex in xrange(0, pointCount): cartesianCoordinates.append(self.calculateCartesianCoordinates(laserscan, pointIndex)) # For each labelled segment, create and append one TrackedPerson and DetectedPerson message trackedPersons = TrackedPersons(header=laserscanSegmentation.header) detectedPersons = DetectedPersons(header=laserscanSegmentation.header) for segment in laserscanSegmentation.segments: # Calculate centroid of tracked person centroid = numpy.array([0.0, 0.0, 0.0]) for pointIndex in segment.measurement_indices: centroid += cartesianCoordinates[pointIndex] centroid /= float(len(segment.measurement_indices)) # Lookup previous centroid (for velocity/twist calculation), assume zero velocity at track initialization if not segment.label in self._previousCentroidLookup: self._previousCentroidLookup[segment.label] = collections.deque() # Maintain centroid history centroidHistory = self._previousCentroidLookup[segment.label] while len(centroidHistory) > 20: centroidHistory.popleft() # Calculate average velocity over past few frames dt = 0 velocity = accumulatedVelocity = numpy.array([0.0, 0.0, 0.0]) if centroidHistory: previousCentroid = centroid previousStamp = currentStamp for historyStamp, historyCentroid in reversed(centroidHistory): accumulatedVelocity += previousCentroid - historyCentroid dt += abs((previousStamp - historyStamp).to_sec()) previousCentroid = historyCentroid previousStamp = historyStamp velocity = accumulatedVelocity / dt centroidHistory.append((currentStamp, centroid)) # Remember age of track if not segment.label in self._firstTrackEncounterLookup: self._firstTrackEncounterLookup[segment.label] = currentStamp # Initialize TrackedPerson message trackedPerson = TrackedPerson() trackedPerson.track_id = segment.label trackedPerson.age = currentStamp - self._firstTrackEncounterLookup[segment.label] trackedPerson.detection_id = self._detectionIdCounter trackedPerson.is_occluded = False # Set position LARGE_VARIANCE = 99999999 trackedPerson.pose.pose.position.x = centroid[0] trackedPerson.pose.pose.position.y = centroid[1] trackedPerson.pose.pose.position.z = centroid[2] # Set orientation if dt > 0: yaw = math.atan2(velocity[1], velocity[0]) quaternion = tf.transformations.quaternion_from_euler(0, 0, yaw) trackedPerson.pose.pose.orientation = Quaternion( x=quaternion[0], y=quaternion[1], z=quaternion[2], w=quaternion[3] ) trackedPerson.pose.covariance[2 * 6 + 2] = trackedPerson.pose.covariance[ 3 * 6 + 3 ] = trackedPerson.pose.covariance[ 4 * 6 + 4 ] = LARGE_VARIANCE # z pos, roll, pitch # Set velocity if dt > 0: trackedPerson.twist.twist.linear.x = velocity[0] trackedPerson.twist.twist.linear.y = velocity[1] trackedPerson.twist.twist.linear.z = velocity[2] trackedPerson.twist.covariance[2 * 6 + 2] = trackedPerson.twist.covariance[ 3 * 6 + 3 ] = trackedPerson.twist.covariance[4 * 6 + 4] = trackedPerson.twist.covariance[ 5 * 6 + 5 ] = LARGE_VARIANCE # linear z, angular x, y, z # Append to list of tracked persons trackedPersons.tracks.append(trackedPerson) # Initialize DetectedPerson message by copying data from TrackedPerson detectedPerson = DetectedPerson() detectedPerson.detection_id = trackedPerson.detection_id detectedPerson.confidence = 1.0 detectedPerson.pose = copy.deepcopy(trackedPerson.pose) detectedPerson.pose.pose.orientation = Quaternion() for i in xrange(0, 2): detectedPerson.pose.covariance[i * 6 + i] = 0.17 * 0.17 detectedPerson.pose.covariance[5 * 6 + 5] = LARGE_VARIANCE # yaw detectedPersons.detections.append(detectedPerson) self._detectionIdCounter += 1 # Publish tracked persons self.trackedPersonsPublisher.publish(trackedPersons) self.detectedPersonsPublisher.publish(detectedPersons) self._lastDataStamp = laserscanSegmentation.header.stamp
def createTrackAndDetection( tracks, detections, track_id, angle, radius, moving = True) : trackedPerson = TrackedPerson() global trackDictionary, detectionCounter trackDictionaryEntry = None if not track_id in trackDictionary: trackDictionaryEntry = TrackDictionaryEntry() trackDictionary[track_id] = trackDictionaryEntry else: trackDictionaryEntry = trackDictionary[track_id] # Generate detection ID detection_id = detectionCounter detectionCounter += 1 # # Simulate occlusions # occlusionProbability = 0.02 occlusionMinDuration = 1.0 / updateRateHz # at least 1 frame occlusionMaxDuration = 15.0 / updateRateHz # at most 15 frames # Currently not occluded? if trackDictionaryEntry.remainingOcclusionTime <= 0: if random.random() < occlusionProbability: trackDictionaryEntry.remainingOcclusionTime = random.random() * (occlusionMaxDuration - occlusionMinDuration) + occlusionMinDuration # Is the track occluded? if trackDictionaryEntry.remainingOcclusionTime <= 0: trackedPerson.detection_id = detection_id trackedPerson.is_occluded = False else : trackedPerson.is_occluded = True trackDictionaryEntry.remainingOcclusionTime -= 1.0 / updateRateHz # # Simulate track ID switches # idSwitchProbability = 0.001 if random.random() < idSwitchProbability: trackDictionaryEntry.idShift += 1 trackDictionaryEntry.lastIdShiftAt = rospy.Time.now() idShiftAmount = 66 # better don't change this as other mock components might rely upon this to consistently fake their data across ID switches trackedPerson.track_id = track_id + trackDictionaryEntry.idShift * idShiftAmount trackedPerson.age = rospy.Time.now() - trackDictionaryEntry.lastIdShiftAt # Determine track position setPoseAndTwistFromAngle(trackedPerson.pose, trackedPerson.twist, angle, radius, moving) # Track position noise trackedPerson.pose.pose.position.x += random.random() * 0.1 - 0.05 trackedPerson.pose.pose.position.y += random.random() * 0.1 - 0.05 tracks.append(trackedPerson) if not trackedPerson.is_occluded: detectedPerson = DetectedPerson() detectedPerson.detection_id = detection_id detectedPerson.confidence = random.random() * 0.5 + 0.5 detectedPerson.pose = copy.deepcopy(trackedPerson.pose) detectedPerson.pose.pose.position.x += random.random() * 0.5 - 0.25 # introduce some noise on detected position detectedPerson.pose.pose.position.y += random.random() * 0.5 - 0.25 detections.append(detectedPerson) return
def createTrackedPersons(self, timestamp): trackedPersons = TrackedPersons() trackedPersons.header.stamp = timestamp trackedPersons.header.frame_id = self.trackingFrame for trackKey, track in self.database.getTracks().iteritems(): waypoints = self.database.getWaypoints(track) if waypoints: firstWaypoint = waypoints[0] lastWaypoint = waypoints[-1] # Check if we are before the first or past the last waypoint beforeFirstWaypoint = timestamp.to_sec( ) < firstWaypoint["timestamp"] behindLastWaypoint = timestamp.to_sec( ) > lastWaypoint["timestamp"] if not beforeFirstWaypoint and not behindLastWaypoint: trackedPerson = TrackedPerson() trackedPerson.track_id = self.database.getTrackId(trackKey) trackedPerson.is_occluded = False trackedPerson.age = rospy.Time(firstWaypoint["timestamp"]) # Find adjacent waypoints for linear interpolation of tracked person's position previousWaypoint = nextWaypoint = None for waypoint in waypoints: previousWaypoint = nextWaypoint nextWaypoint = waypoint if nextWaypoint["timestamp"] > timestamp.to_sec(): break try: # If there is only one waypoint for this track, previousWaypoint will be None nextPosition = self.transformer.toCommonFrame( nextWaypoint, self.trackingFrame) nextTimestamp = nextWaypoint["timestamp"] # Determine waypoint segment duration and distance if previousWaypoint is not None: previousPosition = self.transformer.toCommonFrame( previousWaypoint, self.trackingFrame) previousTimestamp = previousWaypoint["timestamp"] segmentDuration = nextTimestamp - previousTimestamp alpha = (timestamp.to_sec() - previousTimestamp) / segmentDuration diffVector = nextPosition - previousPosition else: previousPosition = nextPosition segmentDuration = 1.0 alpha = 0.0 diffVector = numpy.array([0, 0]) except tf.Exception: continue # Linear interpolation interpolatedPosition = previousPosition + diffVector * alpha velocity = diffVector / segmentDuration trackedPerson.pose.pose.position.x, trackedPerson.pose.pose.position.y = interpolatedPosition trackedPerson.pose.covariance[ 2 * 6 + 2] = trackedPerson.pose.covariance[ 3 * 6 + 3] = trackedPerson.pose.covariance[4 * 6 + 4] = 99999999 yaw = math.atan2(velocity[1], velocity[0]) quaternion = tf.transformations.quaternion_from_euler( 0, 0, yaw) trackedPerson.pose.pose.orientation = Quaternion( x=quaternion[0], y=quaternion[1], z=quaternion[2], w=quaternion[3]) trackedPerson.twist.twist.linear.x, trackedPerson.twist.twist.linear.y = velocity trackedPersons.tracks.append(trackedPerson) return trackedPersons
def publish_obstacles(self): with self.lock: if self.transport_data is None: return # non-leg obstacles timestamp, xx, yy, clusters, is_legs, cogs, radii, tf_rob_in_fix = self.transport_data # tracks track_ids = [] tracks_latest_pos, tracks_color = [], [] tracks_in_frame, tracks_velocities = [], [] tracks_radii = [] for trackid in self.tracker.active_tracks: track = self.tracker.active_tracks[trackid] xy = np.array(track.pos_history[-1]) is_track_in_frame = True if trackid in self.tracker.latest_matches: color = (0.,1.,0.,1.) # green elif trackid in self.tracker.new_tracks: color = (0.,0.,1.,1.) # blue else: color = (0.7, 0.7, 0.7, 1.) is_track_in_frame = False track_ids.append(trackid) tracks_latest_pos.append(xy) tracks_color.append(color) tracks_in_frame.append(is_track_in_frame) tracks_velocities.append(track.estimate_velocity()) tracks_radii.append(track.avg_radius()) # publish trackedpersons from spencer_tracking_msgs.msg import TrackedPersons, TrackedPerson pub = rospy.Publisher("/tracked_persons", TrackedPersons, queue_size=1) tp_msg = TrackedPersons() tp_msg.header.frame_id = self.kFixedFrame tp_msg.header.stamp = timestamp for trackid, xy, in_frame, vel, radius in zip(track_ids, tracks_latest_pos, tracks_in_frame, tracks_velocities, tracks_radii): # if not in_frame: # continue tp = TrackedPerson() tp.track_id = trackid tp.is_occluded = False tp.is_matched = in_frame tp.detection_id = trackid tp.pose.pose.position.x = xy[0] tp.pose.pose.position.y = xy[1] heading_angle = np.arctan2(vel[1], vel[0]) # guess heading from velocity from geometry_msgs.msg import Quaternion tp.pose.pose.orientation = Quaternion( *tf.transformations.quaternion_from_euler(0, 0, heading_angle)) tp.twist.twist.linear.x = vel[0] tp.twist.twist.linear.y = vel[1] tp.twist.twist.angular.z = 0 # unknown tp_msg.tracks.append(tp) pub.publish(tp_msg) pub = rospy.Publisher('/obstacles', ObstacleArrayMsg, queue_size=1) obstacles_msg = ObstacleArrayMsg() obstacles_msg.header.stamp = timestamp obstacles_msg.header.frame_id = self.kFixedFrame for trackid, xy, in_frame, vel, radius in zip(track_ids, tracks_latest_pos, tracks_in_frame, tracks_velocities, tracks_radii): if not in_frame: continue # Add point obstacle obst = ObstacleMsg() obst.id = trackid obst.polygon.points = [Point32()] obst.polygon.points[0].x = xy[0] obst.polygon.points[0].y = xy[1] obst.polygon.points[0].z = 0 obst.radius = radius yaw = np.arctan2(vel[1], vel[0]) q = tf.transformations.quaternion_from_euler(0,0,yaw) obst.orientation = Quaternion(*q) obst.velocities.twist.linear.x = vel[0] obst.velocities.twist.linear.y = vel[1] obst.velocities.twist.linear.z = 0 obst.velocities.twist.angular.x = 0 obst.velocities.twist.angular.y = 0 obst.velocities.twist.angular.z = 0 obstacles_msg.obstacles.append(obst) pub.publish(obstacles_msg) pub = rospy.Publisher('/close_nonleg_obstacles', ObstacleArrayMsg, queue_size=1) MAX_DIST_STATIC_CLUSTERS_M = 3. cogs_in_fix = [] for i in range(len(cogs)): cogs_in_fix.append(apply_tf(cogs[i], Pose2D(tf_rob_in_fix))) obstacles_msg = ObstacleArrayMsg() obstacles_msg.header.stamp = timestamp obstacles_msg.header.frame_id = self.kFixedFrame for c, cog_in_fix, cog_in_rob, r, is_leg in zip(clusters, cogs_in_fix, cogs, radii, is_legs): if np.linalg.norm(cog_in_rob) < self.kRobotRadius: continue if len(c) < self.kMinClusterSize: continue # leg obstacles are already published in the tracked obstacles topic if is_leg: continue # close obstacles only if np.linalg.norm(cog_in_rob) > MAX_DIST_STATIC_CLUSTERS_M: continue # Add point obstacle obst = ObstacleMsg() obst.id = 0 obst.polygon.points = [Point32()] obst.polygon.points[0].x = cog_in_fix[0] obst.polygon.points[0].y = cog_in_fix[1] obst.polygon.points[0].z = 0 obst.radius = r yaw = 0 q = tf.transformations.quaternion_from_euler(0,0,yaw) from geometry_msgs.msg import Quaternion obst.orientation = Quaternion(*q) obst.velocities.twist.linear.x = 0 obst.velocities.twist.linear.y = 0 obst.velocities.twist.linear.z = 0 obst.velocities.twist.angular.x = 0 obst.velocities.twist.angular.y = 0 obst.velocities.twist.angular.z = 0 obstacles_msg.obstacles.append(obst) pub.publish(obstacles_msg) pub = rospy.Publisher('/obstacle_markers', MarkerArray, queue_size=1) # delete all markers ma = MarkerArray() mk = Marker() mk.header.frame_id = self.kFixedFrame mk.ns = "obstacles" mk.id = 0 mk.type = 0 # ARROW mk.action = 3 # deleteall ma.markers.append(mk) pub.publish(ma) # publish tracks ma = MarkerArray() id_ = 0 # track endpoint for trackid, xy, in_frame, vel in zip(track_ids, tracks_latest_pos, tracks_in_frame, tracks_velocities): if not in_frame: continue normvel = np.linalg.norm(vel) if normvel == 0: continue mk = Marker() mk.header.frame_id = self.kFixedFrame mk.ns = "tracks" mk.id = trackid mk.type = 0 # ARROW mk.action = 0 mk.scale.x = np.linalg.norm(vel) mk.scale.y = 0.1 mk.scale.z = 0.1 mk.color.r = color[0] mk.color.g = color[1] mk.color.b = color[2] mk.color.a = color[3] mk.frame_locked = True mk.pose.position.x = xy[0] mk.pose.position.y = xy[1] mk.pose.position.z = 0.03 yaw = np.arctan2(vel[1], vel[0]) q = tf.transformations.quaternion_from_euler(0,0,yaw) mk.pose.orientation = Quaternion(*q) ma.markers.append(mk) pub.publish(ma)