Пример #1
0
    def bayes2spencer(self, bayes_msg):
        spencer_msg = TrackedPersons()
        spencer_msg.header = bayes_msg.header

        for i, pose in enumerate(bayes_msg.poses):
            track = TrackedPerson()
            track.track_id = self.string2uint64(bayes_msg.uuids[i])
            # PoseWithCovariance
            track.pose.pose.position = pose.position
            track.pose.pose.orientation = pose.orientation
            # TwistWithCovariance
            track.twist.twist.linear = bayes_msg.velocities[i]
            # Data not in bayes. Not sure about these ...
            track.pose.covariance = (np.random.normal(0.3, 0.1) *
                                     np.identity(6)).flatten().tolist()
            track.twist.covariance = (np.random.normal(0.3, 0.1) *
                                      np.identity(6)).flatten().tolist()
            # we assume 0 here
            # track.twist.twist.angular
            track.is_occluded = False
            track.is_matched = False
            track.detection_id = self.string2uint64(bayes_msg.uuids[i])
            track.age = 0

            spencer_msg.tracks.append(track)

        return spencer_msg
Пример #2
0
def createTrackAndDetection( tracks, detections, track_id, detection_id, angle, radius ) :
    trackedPerson = TrackedPerson()
    trackedPerson.track_id = track_id

    if detection_id >= 0 :
        trackedPerson.detection_id = detection_id
        trackedPerson.is_occluded = False
    else :
        trackedPerson.is_occluded = True

    trackedPerson.age = rospy.Time.now() - startTime

    setPoseAndTwistFromAngle(trackedPerson.pose, trackedPerson.twist, angle, radius)
    tracks.append(trackedPerson)

    if detection_id >= 0:
        detectedPerson = DetectedPerson()
        detectedPerson.detection_id = detection_id
        detectedPerson.confidence = random.random()

        detectedPerson.pose = copy.deepcopy(trackedPerson.pose)
        detectedPerson.pose.pose.position.x += random.random() * 0.5 - 0.25 # introduce some noise on observation position
        detectedPerson.pose.pose.position.y += random.random() * 0.5 - 0.25

        detections.append(detectedPerson)

    return
Пример #3
0
    def newSegmentationReceived(self, laserscan, laserscanSegmentation):
        currentStamp = laserscanSegmentation.header.stamp
        pointCount = len(laserscan.ranges)
        cartesianCoordinates = []

        # Required for velocity calculations
        if self._lastDataStamp is None:
            self._lastDataStamp = laserscanSegmentation.header.stamp

        # Build lookup of cartesian coordinates per laser point
        for pointIndex in xrange(0, pointCount):
            cartesianCoordinates.append(
                self.calculateCartesianCoordinates(laserscan, pointIndex))

        # For each labelled segment, create and append one TrackedPerson and DetectedPerson message
        trackedPersons = TrackedPersons(header=laserscanSegmentation.header)
        detectedPersons = DetectedPersons(header=laserscanSegmentation.header)
        for segment in laserscanSegmentation.segments:
            # Calculate centroid of tracked person
            centroid = numpy.array([0.0, 0.0, 0.0])
            for pointIndex in segment.measurement_indices:
                centroid += cartesianCoordinates[pointIndex]
            centroid /= float(len(segment.measurement_indices))

            # Lookup previous centroid (for velocity/twist calculation), assume zero velocity at track initialization
            if not segment.label in self._previousCentroidLookup:
                self._previousCentroidLookup[
                    segment.label] = collections.deque()

            # Maintain centroid history
            centroidHistory = self._previousCentroidLookup[segment.label]
            while len(centroidHistory) > 20:
                centroidHistory.popleft()

            # Calculate average velocity over past few frames
            dt = 0
            velocity = accumulatedVelocity = numpy.array([0.0, 0.0, 0.0])

            if centroidHistory:
                previousCentroid = centroid
                previousStamp = currentStamp
                for historyStamp, historyCentroid in reversed(centroidHistory):
                    accumulatedVelocity += previousCentroid - historyCentroid
                    dt += abs((previousStamp - historyStamp).to_sec())
                    previousCentroid = historyCentroid
                    previousStamp = historyStamp

                velocity = accumulatedVelocity / dt

            centroidHistory.append((currentStamp, centroid))

            # Remember age of track
            if not segment.label in self._firstTrackEncounterLookup:
                self._firstTrackEncounterLookup[segment.label] = currentStamp

            # Initialize TrackedPerson message
            trackedPerson = TrackedPerson()

            trackedPerson.track_id = segment.label
            trackedPerson.age = currentStamp - self._firstTrackEncounterLookup[
                segment.label]
            trackedPerson.detection_id = self._detectionIdCounter
            trackedPerson.is_occluded = False

            # Set position
            LARGE_VARIANCE = 99999999
            trackedPerson.pose.pose.position.x = centroid[0]
            trackedPerson.pose.pose.position.y = centroid[1]
            trackedPerson.pose.pose.position.z = centroid[2]

            # Set orientation
            if dt > 0:
                yaw = math.atan2(velocity[1], velocity[0])
                quaternion = tf.transformations.quaternion_from_euler(
                    0, 0, yaw)
                trackedPerson.pose.pose.orientation = Quaternion(
                    x=quaternion[0],
                    y=quaternion[1],
                    z=quaternion[2],
                    w=quaternion[3])

            trackedPerson.pose.covariance[
                2 * 6 + 2] = trackedPerson.pose.covariance[
                    3 * 6 + 3] = trackedPerson.pose.covariance[
                        4 * 6 + 4] = LARGE_VARIANCE  # z pos, roll, pitch

            # Set velocity
            if dt > 0:
                trackedPerson.twist.twist.linear.x = velocity[0]
                trackedPerson.twist.twist.linear.y = velocity[1]
                trackedPerson.twist.twist.linear.z = velocity[2]

            trackedPerson.twist.covariance[
                2 * 6 + 2] = trackedPerson.twist.covariance[
                    3 * 6 + 3] = trackedPerson.twist.covariance[
                        4 * 6 + 4] = trackedPerson.twist.covariance[
                            5 * 6 +
                            5] = LARGE_VARIANCE  # linear z, angular x, y, z

            # Append to list of tracked persons
            trackedPersons.tracks.append(trackedPerson)

            # Initialize DetectedPerson message by copying data from TrackedPerson
            detectedPerson = DetectedPerson()
            detectedPerson.detection_id = trackedPerson.detection_id
            detectedPerson.confidence = 1.0
            detectedPerson.pose = copy.deepcopy(trackedPerson.pose)
            detectedPerson.pose.pose.orientation = Quaternion()
            for i in xrange(0, 2):
                detectedPerson.pose.covariance[i * 6 + i] = 0.17 * 0.17
            detectedPerson.pose.covariance[5 * 6 + 5] = LARGE_VARIANCE  # yaw

            detectedPersons.detections.append(detectedPerson)
            self._detectionIdCounter += 1

        # Publish tracked persons
        self.trackedPersonsPublisher.publish(trackedPersons)
        self.detectedPersonsPublisher.publish(detectedPersons)

        self._lastDataStamp = laserscanSegmentation.header.stamp
    def newSegmentationReceived(self, laserscan, laserscanSegmentation):
        currentStamp = laserscanSegmentation.header.stamp
        pointCount = len(laserscan.ranges)
        cartesianCoordinates = []

        # Required for velocity calculations
        if self._lastDataStamp is None:
            self._lastDataStamp = laserscanSegmentation.header.stamp

        # Build lookup of cartesian coordinates per laser point
        for pointIndex in xrange(0, pointCount):
            cartesianCoordinates.append(self.calculateCartesianCoordinates(laserscan, pointIndex))

        # For each labelled segment, create and append one TrackedPerson and DetectedPerson message
        trackedPersons = TrackedPersons(header=laserscanSegmentation.header)
        detectedPersons = DetectedPersons(header=laserscanSegmentation.header)
        for segment in laserscanSegmentation.segments:
            # Calculate centroid of tracked person
            centroid = numpy.array([0.0, 0.0, 0.0])
            for pointIndex in segment.measurement_indices:
                centroid += cartesianCoordinates[pointIndex]
            centroid /= float(len(segment.measurement_indices))

            # Lookup previous centroid (for velocity/twist calculation), assume zero velocity at track initialization
            if not segment.label in self._previousCentroidLookup:
                self._previousCentroidLookup[segment.label] = collections.deque()

            # Maintain centroid history
            centroidHistory = self._previousCentroidLookup[segment.label]
            while len(centroidHistory) > 20:
                centroidHistory.popleft()

            # Calculate average velocity over past few frames
            dt = 0
            velocity = accumulatedVelocity = numpy.array([0.0, 0.0, 0.0])

            if centroidHistory:
                previousCentroid = centroid
                previousStamp = currentStamp
                for historyStamp, historyCentroid in reversed(centroidHistory):
                    accumulatedVelocity += previousCentroid - historyCentroid
                    dt += abs((previousStamp - historyStamp).to_sec())
                    previousCentroid = historyCentroid
                    previousStamp = historyStamp

                velocity = accumulatedVelocity / dt

            centroidHistory.append((currentStamp, centroid))

            # Remember age of track
            if not segment.label in self._firstTrackEncounterLookup:
                self._firstTrackEncounterLookup[segment.label] = currentStamp

            # Initialize TrackedPerson message
            trackedPerson = TrackedPerson()

            trackedPerson.track_id = segment.label
            trackedPerson.age = currentStamp - self._firstTrackEncounterLookup[segment.label]
            trackedPerson.detection_id = self._detectionIdCounter
            trackedPerson.is_occluded = False

            # Set position
            LARGE_VARIANCE = 99999999
            trackedPerson.pose.pose.position.x = centroid[0]
            trackedPerson.pose.pose.position.y = centroid[1]
            trackedPerson.pose.pose.position.z = centroid[2]

            # Set orientation
            if dt > 0:
                yaw = math.atan2(velocity[1], velocity[0])
                quaternion = tf.transformations.quaternion_from_euler(0, 0, yaw)
                trackedPerson.pose.pose.orientation = Quaternion(
                    x=quaternion[0], y=quaternion[1], z=quaternion[2], w=quaternion[3]
                )

            trackedPerson.pose.covariance[2 * 6 + 2] = trackedPerson.pose.covariance[
                3 * 6 + 3
            ] = trackedPerson.pose.covariance[
                4 * 6 + 4
            ] = LARGE_VARIANCE  # z pos, roll, pitch

            # Set velocity
            if dt > 0:
                trackedPerson.twist.twist.linear.x = velocity[0]
                trackedPerson.twist.twist.linear.y = velocity[1]
                trackedPerson.twist.twist.linear.z = velocity[2]

            trackedPerson.twist.covariance[2 * 6 + 2] = trackedPerson.twist.covariance[
                3 * 6 + 3
            ] = trackedPerson.twist.covariance[4 * 6 + 4] = trackedPerson.twist.covariance[
                5 * 6 + 5
            ] = LARGE_VARIANCE  # linear z, angular x, y, z

            # Append to list of tracked persons
            trackedPersons.tracks.append(trackedPerson)

            # Initialize DetectedPerson message by copying data from TrackedPerson
            detectedPerson = DetectedPerson()
            detectedPerson.detection_id = trackedPerson.detection_id
            detectedPerson.confidence = 1.0
            detectedPerson.pose = copy.deepcopy(trackedPerson.pose)
            detectedPerson.pose.pose.orientation = Quaternion()
            for i in xrange(0, 2):
                detectedPerson.pose.covariance[i * 6 + i] = 0.17 * 0.17
            detectedPerson.pose.covariance[5 * 6 + 5] = LARGE_VARIANCE  # yaw

            detectedPersons.detections.append(detectedPerson)
            self._detectionIdCounter += 1

        # Publish tracked persons
        self.trackedPersonsPublisher.publish(trackedPersons)
        self.detectedPersonsPublisher.publish(detectedPersons)

        self._lastDataStamp = laserscanSegmentation.header.stamp
def createTrackAndDetection( tracks, detections, track_id, angle, radius, moving = True) :
    trackedPerson = TrackedPerson()
    
    global trackDictionary, detectionCounter
    trackDictionaryEntry = None

    if not track_id in trackDictionary:
        trackDictionaryEntry = TrackDictionaryEntry()
        trackDictionary[track_id] = trackDictionaryEntry
    else:
        trackDictionaryEntry = trackDictionary[track_id]

    # Generate detection ID
    detection_id = detectionCounter
    detectionCounter += 1

    #
    # Simulate occlusions
    #
    occlusionProbability = 0.02
    occlusionMinDuration =  1.0 / updateRateHz # at least 1 frame
    occlusionMaxDuration = 15.0 / updateRateHz # at most 15 frames

    # Currently not occluded?
    if trackDictionaryEntry.remainingOcclusionTime <= 0:
        if random.random() < occlusionProbability:
            trackDictionaryEntry.remainingOcclusionTime = random.random() * (occlusionMaxDuration - occlusionMinDuration) + occlusionMinDuration

    # Is the track occluded?
    if trackDictionaryEntry.remainingOcclusionTime <= 0:
        trackedPerson.detection_id = detection_id
        trackedPerson.is_occluded = False
    else :
        trackedPerson.is_occluded = True
        trackDictionaryEntry.remainingOcclusionTime -= 1.0 / updateRateHz

    #
    # Simulate track ID switches
    #

    idSwitchProbability = 0.001
    if random.random() < idSwitchProbability:
        trackDictionaryEntry.idShift += 1
        trackDictionaryEntry.lastIdShiftAt = rospy.Time.now()

    idShiftAmount = 66 # better don't change this as other mock components might rely upon this to consistently fake their data across ID switches
    trackedPerson.track_id = track_id + trackDictionaryEntry.idShift * idShiftAmount
    trackedPerson.age = rospy.Time.now() - trackDictionaryEntry.lastIdShiftAt

    # Determine track position
    setPoseAndTwistFromAngle(trackedPerson.pose, trackedPerson.twist, angle, radius, moving)

    # Track position noise
    trackedPerson.pose.pose.position.x += random.random() * 0.1 - 0.05
    trackedPerson.pose.pose.position.y += random.random() * 0.1 - 0.05

    tracks.append(trackedPerson)

    if not trackedPerson.is_occluded:
        detectedPerson = DetectedPerson()
        detectedPerson.detection_id = detection_id
        detectedPerson.confidence = random.random() * 0.5 + 0.5

        detectedPerson.pose = copy.deepcopy(trackedPerson.pose)
        detectedPerson.pose.pose.position.x += random.random() * 0.5 - 0.25 # introduce some noise on detected position
        detectedPerson.pose.pose.position.y += random.random() * 0.5 - 0.25

        detections.append(detectedPerson)

    return
Пример #6
0
    def createTrackedPersons(self, timestamp):
        trackedPersons = TrackedPersons()
        trackedPersons.header.stamp = timestamp
        trackedPersons.header.frame_id = self.trackingFrame

        for trackKey, track in self.database.getTracks().iteritems():
            waypoints = self.database.getWaypoints(track)

            if waypoints:
                firstWaypoint = waypoints[0]
                lastWaypoint = waypoints[-1]

                # Check if we are before the first or past the last waypoint
                beforeFirstWaypoint = timestamp.to_sec(
                ) < firstWaypoint["timestamp"]
                behindLastWaypoint = timestamp.to_sec(
                ) > lastWaypoint["timestamp"]

                if not beforeFirstWaypoint and not behindLastWaypoint:
                    trackedPerson = TrackedPerson()
                    trackedPerson.track_id = self.database.getTrackId(trackKey)
                    trackedPerson.is_occluded = False
                    trackedPerson.age = rospy.Time(firstWaypoint["timestamp"])

                    # Find adjacent waypoints for linear interpolation of tracked person's position
                    previousWaypoint = nextWaypoint = None
                    for waypoint in waypoints:
                        previousWaypoint = nextWaypoint
                        nextWaypoint = waypoint
                        if nextWaypoint["timestamp"] > timestamp.to_sec():
                            break

                    try:
                        # If there is only one waypoint for this track, previousWaypoint will be None
                        nextPosition = self.transformer.toCommonFrame(
                            nextWaypoint, self.trackingFrame)
                        nextTimestamp = nextWaypoint["timestamp"]

                        # Determine waypoint segment duration and distance
                        if previousWaypoint is not None:
                            previousPosition = self.transformer.toCommonFrame(
                                previousWaypoint, self.trackingFrame)
                            previousTimestamp = previousWaypoint["timestamp"]
                            segmentDuration = nextTimestamp - previousTimestamp
                            alpha = (timestamp.to_sec() -
                                     previousTimestamp) / segmentDuration
                            diffVector = nextPosition - previousPosition
                        else:
                            previousPosition = nextPosition
                            segmentDuration = 1.0
                            alpha = 0.0
                            diffVector = numpy.array([0, 0])

                    except tf.Exception:
                        continue

                    # Linear interpolation
                    interpolatedPosition = previousPosition + diffVector * alpha
                    velocity = diffVector / segmentDuration

                    trackedPerson.pose.pose.position.x, trackedPerson.pose.pose.position.y = interpolatedPosition
                    trackedPerson.pose.covariance[
                        2 * 6 + 2] = trackedPerson.pose.covariance[
                            3 * 6 +
                            3] = trackedPerson.pose.covariance[4 * 6 +
                                                               4] = 99999999

                    yaw = math.atan2(velocity[1], velocity[0])
                    quaternion = tf.transformations.quaternion_from_euler(
                        0, 0, yaw)
                    trackedPerson.pose.pose.orientation = Quaternion(
                        x=quaternion[0],
                        y=quaternion[1],
                        z=quaternion[2],
                        w=quaternion[3])

                    trackedPerson.twist.twist.linear.x, trackedPerson.twist.twist.linear.y = velocity

                    trackedPersons.tracks.append(trackedPerson)

        return trackedPersons