def createTrackAndDetection( tracks, detections, track_id, detection_id, angle, radius ) :
    trackedPerson = TrackedPerson()
    trackedPerson.track_id = track_id

    if detection_id >= 0 :
        trackedPerson.detection_id = detection_id
        trackedPerson.is_occluded = False
    else :
        trackedPerson.is_occluded = True

    trackedPerson.age = rospy.Time.now() - startTime

    setPoseAndTwistFromAngle(trackedPerson.pose, trackedPerson.twist, angle, radius)
    tracks.append(trackedPerson)

    if detection_id >= 0:
        detectedPerson = DetectedPerson()
        detectedPerson.detection_id = detection_id
        detectedPerson.confidence = random.random()

        detectedPerson.pose = copy.deepcopy(trackedPerson.pose)
        detectedPerson.pose.pose.position.x += random.random() * 0.5 - 0.25 # introduce some noise on observation position
        detectedPerson.pose.pose.position.y += random.random() * 0.5 - 0.25

        detections.append(detectedPerson)

    return
def newCompositeDetectedPersonsAvailable(compositeDetectedPersons):
    detectedPersons = DetectedPersons()
    detectedPersons.header = compositeDetectedPersons.header

    for compositeDetectedPerson in compositeDetectedPersons.elements:
        detectedPerson = DetectedPerson()
        detectedPerson.detection_id = compositeDetectedPerson.composite_detection_id
        detectedPerson.confidence = compositeDetectedPerson.max_confidence
        detectedPerson.pose = compositeDetectedPerson.pose

        involvedModalities = list(set([originalDetection.modality for originalDetection in compositeDetectedPerson.original_detections]))
        detectedPerson.modality = ",".join(sorted(involvedModalities))

        detectedPersons.detections.append(detectedPerson)

    detectedPersonsPublisher.publish(detectedPersons)
def newMessageReceived(poseArray):
    detectedPersons = DetectedPersons()
    detectedPersons.header = poseArray.header

    global detectionId, detectionIdIncrement

    for pose in poseArray.poses:
        detectedPerson = DetectedPerson()
        detectedPerson.modality = modality;
        detectedPerson.confidence = confidence
        detectedPerson.detection_id = detectionId
        detectedPerson.pose.pose = pose

        for i in xrange(0, 6):
            detectedPerson.pose.covariance[i*6 + i] = posVariance if i < 3 else rotVariance

        detectedPersons.detections.append(detectedPerson)
        detectionId += detectionIdIncrement

    pub.publish(detectedPersons)
Exemple #4
0
    def CallBack(self, data):
        detectedPersons = DetectedPersons()
        detectedPersons.header = data.header

        for Person in data.people:
            detectedPerson = DetectedPerson()
            detectedPerson.modality = self.modality
            detectedPerson.confidence = self.confidence
            detectedPerson.detection_id = self.detectionId
            detectedPerson.pose.pose = Person.pose

            for i in xrange(0, 6):
                detectedPerson.pose.covariance[
                    i * 6 +
                    i] = self.posVariance if i < 3 else self.rotVariance

            detectedPersons.detections.append(detectedPerson)
            self.detectionId += self.detectionIdIncrement

        self.pub.publish(detectedPersons)
Exemple #5
0
def newMessageReceived(poseArray):
    detectedPersons = DetectedPersons()
    detectedPersons.header = poseArray.header

    global detectionId, detectionIdIncrement

    for pose in poseArray.poses:
        detectedPerson = DetectedPerson()
        detectedPerson.modality = modality
        detectedPerson.confidence = confidence
        detectedPerson.detection_id = detectionId
        detectedPerson.pose.pose = pose

        for i in xrange(0, 6):
            detectedPerson.pose.covariance[
                i * 6 + i] = posVariance if i < 3 else rotVariance

        detectedPersons.detections.append(detectedPerson)
        detectionId += detectionIdIncrement

    pub.publish(detectedPersons)
def newMessageReceived(positionMeasurementArray):
    detectedPersons = DetectedPersons()

    if positionMeasurementArray.people:
        detectedPersons.header = positionMeasurementArray.people[0].header
    else:
        detectedPersons.header = positionMeasurementArray.header
        detectedPersons.header.frame_id = "odom"  # hack since frame_id is not known

    global detectionId, detectionIdIncrement
    for positionMeasurement in positionMeasurementArray.people:
        # We assume that all detections have the same frame ID
        assert(detectedPersons.header.frame_id == positionMeasurement.header.frame_id)

        # Construct DetectedPerson
        detectedPerson = DetectedPerson()
        detectedPerson.modality = positionMeasurement.name;
        detectedPerson.confidence = positionMeasurement.reliability
        detectedPerson.pose.pose.position = positionMeasurement.pos
        
        # Covariance
        for x in xrange(0, 3):
            for y in xrange(0, 3):
                detectedPerson.pose.covariance[y*6 + x] = positionMeasurement.covariance[y*3 + x] * covScale

        for i in xrange(3, 6):
            detectedPerson.pose.covariance[i*6 + i] = 99999.0

        # Detection ID
        if useObjectId:
            match = re.search("[0-9]+", positionMeasurement.object_id)
            if match:
                detectedPerson.detection_id = int(match.group(0))
        else:
            detectedPerson.detection_id = detectionId
            detectionId += detectionIdIncrement

        detectedPersons.detections.append(detectedPerson)
        
    pub.publish(detectedPersons)
Exemple #7
0
def newMessageReceived(positionMeasurementArray):
    detectedPersons = DetectedPersons()

    if positionMeasurementArray.people:
        detectedPersons.header = positionMeasurementArray.people[0].header
    else:
        detectedPersons.header = positionMeasurementArray.header
        detectedPersons.header.frame_id = "odom"  # hack since frame_id is not known

    global detectionId, detectionIdIncrement
    for positionMeasurement in positionMeasurementArray.people:
        # We assume that all detections have the same frame ID
        assert (detectedPersons.header.frame_id ==
                positionMeasurement.header.frame_id)

        # Construct DetectedPerson
        detectedPerson = DetectedPerson()
        detectedPerson.modality = positionMeasurement.name
        detectedPerson.confidence = positionMeasurement.reliability
        detectedPerson.pose.pose.position = positionMeasurement.pos

        # Covariance
        for x in xrange(0, 3):
            for y in xrange(0, 3):
                detectedPerson.pose.covariance[
                    y * 6 +
                    x] = positionMeasurement.covariance[y * 3 + x] * covScale

        for i in xrange(3, 6):
            detectedPerson.pose.covariance[i * 6 + i] = 99999.0

        # Detection ID
        if useObjectId:
            match = re.search("[0-9]+", positionMeasurement.object_id)
            if match:
                detectedPerson.detection_id = int(match.group(0))
        else:
            detectedPerson.detection_id = detectionId
            detectionId += detectionIdIncrement

        detectedPersons.detections.append(detectedPerson)

    pub.publish(detectedPersons)
Exemple #8
0
def newCompositeDetectedPersonsAvailable(compositeDetectedPersons):
    detectedPersons = DetectedPersons()
    detectedPersons.header = compositeDetectedPersons.header

    for compositeDetectedPerson in compositeDetectedPersons.elements:
        detectedPerson = DetectedPerson()
        detectedPerson.detection_id = compositeDetectedPerson.composite_detection_id
        detectedPerson.confidence = compositeDetectedPerson.max_confidence
        detectedPerson.pose = compositeDetectedPerson.pose

        involvedModalities = list(
            set([
                originalDetection.modality for originalDetection in
                compositeDetectedPerson.original_detections
            ]))
        detectedPerson.modality = ",".join(sorted(involvedModalities))

        detectedPersons.detections.append(detectedPerson)

    detectedPersonsPublisher.publish(detectedPersons)
    def detection_callback(self, msg, depth):
        """
        Callback for RGB images: The main logic is applied here

        Args:
        msg (cob_perception_msgs/DetectionArray): detections array
        depth (sensor_msgs/PointCloud2): depth image from camera

        """

        cv_depth = self._bridge.imgmsg_to_cv2(depth, "passthrough")

        # get the number of detections
        no_of_detections = len(msg.detections)
        spencer_detected_persons = DetectedPersons()
        spencer_detected_persons.header = msg.header
        # Check if there is a detection
        if no_of_detections > 0:
            for i, detection in enumerate(msg.detections):

                x = detection.mask.roi.x
                y = detection.mask.roi.y
                width = detection.mask.roi.width
                height = detection.mask.roi.height
                scale = 0.3
                height_delta = (height - height * scale) / 2
                y = y = height_delta

                width_delta = (width - width * scale) / 2

                cv_depth_bounding_box = cv_depth[
                    int(y + height_delta):int(y + height_delta +
                                              height * scale),
                    int(x + width_delta):int(x + width_delta + width * scale)]

                try:

                    depth_mean = np.nanmedian(\
                       cv_depth_bounding_box[np.nonzero(cv_depth_bounding_box)])

                    real_x = (x + width / 2 - self.cx) * (depth_mean) / self.f

                    real_y = (y + height / 2 - self.cy) * (depth_mean) / self.f

                    msg.detections[i].pose.pose.position.x = real_x
                    msg.detections[i].pose.pose.position.y = real_y
                    msg.detections[i].pose.pose.position.z = depth_mean
                    if (msg.detections[i].label == 'person'):
                        spencer_detected_person = DetectedPerson()
                        spencer_detected_person.modality = spencer_detected_person.MODALITY_GENERIC_RGBD
                        spencer_detected_person.confidence = 1
                        spencer_detected_person.pose.pose = msg.detections[
                            i].pose.pose
                        LARGE_VARIANCE = 999999999
                        pose_variance = 0.05
                        spencer_detected_person.pose.covariance[
                            0 * 6 + 0] = pose_variance
                        spencer_detected_person.pose.covariance[
                            1 * 6 + 1] = pose_variance
                        spencer_detected_person.pose.covariance[
                            2 * 6 + 2] = pose_variance
                        spencer_detected_person.pose.covariance[
                            3 * 6 + 3] = LARGE_VARIANCE
                        spencer_detected_person.pose.covariance[
                            4 * 6 + 4] = LARGE_VARIANCE
                        spencer_detected_person.pose.covariance[
                            5 * 6 + 5] = LARGE_VARIANCE
                        spencer_detected_person.detection_id = self.current_detection_id
                        self.current_detection_id = self.current_detection_id + self.detection_id_increment
                        spencer_detected_persons.detections.append(
                            spencer_detected_person)

                except Exception as e:
                    print e

        self.pub.publish(msg)
        self.pub_spencer.publish(spencer_detected_persons)
Exemple #10
0
    def newSegmentationReceived(self, laserscan, laserscanSegmentation):
        currentStamp = laserscanSegmentation.header.stamp
        pointCount = len(laserscan.ranges)
        cartesianCoordinates = []

        # Required for velocity calculations
        if self._lastDataStamp is None:
            self._lastDataStamp = laserscanSegmentation.header.stamp

        # Build lookup of cartesian coordinates per laser point
        for pointIndex in xrange(0, pointCount):
            cartesianCoordinates.append(
                self.calculateCartesianCoordinates(laserscan, pointIndex))

        # For each labelled segment, create and append one TrackedPerson and DetectedPerson message
        trackedPersons = TrackedPersons(header=laserscanSegmentation.header)
        detectedPersons = DetectedPersons(header=laserscanSegmentation.header)
        for segment in laserscanSegmentation.segments:
            # Calculate centroid of tracked person
            centroid = numpy.array([0.0, 0.0, 0.0])
            for pointIndex in segment.measurement_indices:
                centroid += cartesianCoordinates[pointIndex]
            centroid /= float(len(segment.measurement_indices))

            # Lookup previous centroid (for velocity/twist calculation), assume zero velocity at track initialization
            if not segment.label in self._previousCentroidLookup:
                self._previousCentroidLookup[
                    segment.label] = collections.deque()

            # Maintain centroid history
            centroidHistory = self._previousCentroidLookup[segment.label]
            while len(centroidHistory) > 20:
                centroidHistory.popleft()

            # Calculate average velocity over past few frames
            dt = 0
            velocity = accumulatedVelocity = numpy.array([0.0, 0.0, 0.0])

            if centroidHistory:
                previousCentroid = centroid
                previousStamp = currentStamp
                for historyStamp, historyCentroid in reversed(centroidHistory):
                    accumulatedVelocity += previousCentroid - historyCentroid
                    dt += abs((previousStamp - historyStamp).to_sec())
                    previousCentroid = historyCentroid
                    previousStamp = historyStamp

                velocity = accumulatedVelocity / dt

            centroidHistory.append((currentStamp, centroid))

            # Remember age of track
            if not segment.label in self._firstTrackEncounterLookup:
                self._firstTrackEncounterLookup[segment.label] = currentStamp

            # Initialize TrackedPerson message
            trackedPerson = TrackedPerson()

            trackedPerson.track_id = segment.label
            trackedPerson.age = currentStamp - self._firstTrackEncounterLookup[
                segment.label]
            trackedPerson.detection_id = self._detectionIdCounter
            trackedPerson.is_occluded = False

            # Set position
            LARGE_VARIANCE = 99999999
            trackedPerson.pose.pose.position.x = centroid[0]
            trackedPerson.pose.pose.position.y = centroid[1]
            trackedPerson.pose.pose.position.z = centroid[2]

            # Set orientation
            if dt > 0:
                yaw = math.atan2(velocity[1], velocity[0])
                quaternion = tf.transformations.quaternion_from_euler(
                    0, 0, yaw)
                trackedPerson.pose.pose.orientation = Quaternion(
                    x=quaternion[0],
                    y=quaternion[1],
                    z=quaternion[2],
                    w=quaternion[3])

            trackedPerson.pose.covariance[
                2 * 6 + 2] = trackedPerson.pose.covariance[
                    3 * 6 + 3] = trackedPerson.pose.covariance[
                        4 * 6 + 4] = LARGE_VARIANCE  # z pos, roll, pitch

            # Set velocity
            if dt > 0:
                trackedPerson.twist.twist.linear.x = velocity[0]
                trackedPerson.twist.twist.linear.y = velocity[1]
                trackedPerson.twist.twist.linear.z = velocity[2]

            trackedPerson.twist.covariance[
                2 * 6 + 2] = trackedPerson.twist.covariance[
                    3 * 6 + 3] = trackedPerson.twist.covariance[
                        4 * 6 + 4] = trackedPerson.twist.covariance[
                            5 * 6 +
                            5] = LARGE_VARIANCE  # linear z, angular x, y, z

            # Append to list of tracked persons
            trackedPersons.tracks.append(trackedPerson)

            # Initialize DetectedPerson message by copying data from TrackedPerson
            detectedPerson = DetectedPerson()
            detectedPerson.detection_id = trackedPerson.detection_id
            detectedPerson.confidence = 1.0
            detectedPerson.pose = copy.deepcopy(trackedPerson.pose)
            detectedPerson.pose.pose.orientation = Quaternion()
            for i in xrange(0, 2):
                detectedPerson.pose.covariance[i * 6 + i] = 0.17 * 0.17
            detectedPerson.pose.covariance[5 * 6 + 5] = LARGE_VARIANCE  # yaw

            detectedPersons.detections.append(detectedPerson)
            self._detectionIdCounter += 1

        # Publish tracked persons
        self.trackedPersonsPublisher.publish(trackedPersons)
        self.detectedPersonsPublisher.publish(detectedPersons)

        self._lastDataStamp = laserscanSegmentation.header.stamp
    def newSegmentationReceived(self, laserscan, laserscanSegmentation):
        currentStamp = laserscanSegmentation.header.stamp
        pointCount = len(laserscan.ranges)
        cartesianCoordinates = []

        # Required for velocity calculations
        if self._lastDataStamp is None:
            self._lastDataStamp = laserscanSegmentation.header.stamp

        # Build lookup of cartesian coordinates per laser point
        for pointIndex in xrange(0, pointCount):
            cartesianCoordinates.append(self.calculateCartesianCoordinates(laserscan, pointIndex))

        # For each labelled segment, create and append one TrackedPerson and DetectedPerson message
        trackedPersons = TrackedPersons(header=laserscanSegmentation.header)
        detectedPersons = DetectedPersons(header=laserscanSegmentation.header)
        for segment in laserscanSegmentation.segments:
            # Calculate centroid of tracked person
            centroid = numpy.array([0.0, 0.0, 0.0])
            for pointIndex in segment.measurement_indices:
                centroid += cartesianCoordinates[pointIndex]
            centroid /= float(len(segment.measurement_indices))

            # Lookup previous centroid (for velocity/twist calculation), assume zero velocity at track initialization
            if not segment.label in self._previousCentroidLookup:
                self._previousCentroidLookup[segment.label] = collections.deque()

            # Maintain centroid history
            centroidHistory = self._previousCentroidLookup[segment.label]
            while len(centroidHistory) > 20:
                centroidHistory.popleft()

            # Calculate average velocity over past few frames
            dt = 0
            velocity = accumulatedVelocity = numpy.array([0.0, 0.0, 0.0])

            if centroidHistory:
                previousCentroid = centroid
                previousStamp = currentStamp
                for historyStamp, historyCentroid in reversed(centroidHistory):
                    accumulatedVelocity += previousCentroid - historyCentroid
                    dt += abs((previousStamp - historyStamp).to_sec())
                    previousCentroid = historyCentroid
                    previousStamp = historyStamp

                velocity = accumulatedVelocity / dt

            centroidHistory.append((currentStamp, centroid))

            # Remember age of track
            if not segment.label in self._firstTrackEncounterLookup:
                self._firstTrackEncounterLookup[segment.label] = currentStamp

            # Initialize TrackedPerson message
            trackedPerson = TrackedPerson()

            trackedPerson.track_id = segment.label
            trackedPerson.age = currentStamp - self._firstTrackEncounterLookup[segment.label]
            trackedPerson.detection_id = self._detectionIdCounter
            trackedPerson.is_occluded = False

            # Set position
            LARGE_VARIANCE = 99999999
            trackedPerson.pose.pose.position.x = centroid[0]
            trackedPerson.pose.pose.position.y = centroid[1]
            trackedPerson.pose.pose.position.z = centroid[2]

            # Set orientation
            if dt > 0:
                yaw = math.atan2(velocity[1], velocity[0])
                quaternion = tf.transformations.quaternion_from_euler(0, 0, yaw)
                trackedPerson.pose.pose.orientation = Quaternion(
                    x=quaternion[0], y=quaternion[1], z=quaternion[2], w=quaternion[3]
                )

            trackedPerson.pose.covariance[2 * 6 + 2] = trackedPerson.pose.covariance[
                3 * 6 + 3
            ] = trackedPerson.pose.covariance[
                4 * 6 + 4
            ] = LARGE_VARIANCE  # z pos, roll, pitch

            # Set velocity
            if dt > 0:
                trackedPerson.twist.twist.linear.x = velocity[0]
                trackedPerson.twist.twist.linear.y = velocity[1]
                trackedPerson.twist.twist.linear.z = velocity[2]

            trackedPerson.twist.covariance[2 * 6 + 2] = trackedPerson.twist.covariance[
                3 * 6 + 3
            ] = trackedPerson.twist.covariance[4 * 6 + 4] = trackedPerson.twist.covariance[
                5 * 6 + 5
            ] = LARGE_VARIANCE  # linear z, angular x, y, z

            # Append to list of tracked persons
            trackedPersons.tracks.append(trackedPerson)

            # Initialize DetectedPerson message by copying data from TrackedPerson
            detectedPerson = DetectedPerson()
            detectedPerson.detection_id = trackedPerson.detection_id
            detectedPerson.confidence = 1.0
            detectedPerson.pose = copy.deepcopy(trackedPerson.pose)
            detectedPerson.pose.pose.orientation = Quaternion()
            for i in xrange(0, 2):
                detectedPerson.pose.covariance[i * 6 + i] = 0.17 * 0.17
            detectedPerson.pose.covariance[5 * 6 + 5] = LARGE_VARIANCE  # yaw

            detectedPersons.detections.append(detectedPerson)
            self._detectionIdCounter += 1

        # Publish tracked persons
        self.trackedPersonsPublisher.publish(trackedPersons)
        self.detectedPersonsPublisher.publish(detectedPersons)

        self._lastDataStamp = laserscanSegmentation.header.stamp
	def infer(self, velodyne_points, cluster_array, marker_array):

		global fout
		global is_save
		global frame_count

		start_time = time.time()

		# convert PointCloud2 message to PCL
		static_cloud = self.convertPLmsg2array(velodyne_points, mode='XYZI')

		clusters = cluster_array.clusters

		Cloud = []
		for cluster in clusters:
			cloud = self.convertPLmsg2array(cluster)
			cloud_np = np.asarray(cloud)
			# nomalise the cluser and intensity
			cloud_centriod = np.mean(cloud_np[:, :3], axis=0)
			cloud_np[:, :3] -= cloud_centriod
			cloud_np[:, -1] = cloud_np[:, -1] / 255.
			Cloud.append(cloud_np)
			# print cloud_np

		Image = utility.convert2images(Cloud, self.saved_args.input_dim[0])

		feed = {self.model.input_data: Image}
		output, feature, pred = self.sess.run([self.model.output, self.model.feature, self.model.pred], feed)

		# index = np.nonzero(np.max(np.array(output, dtype=np.float32), axis=1) > 0.7)
		# pred = np.array(pred, dtype=np.float32)
		# pred = pred[index[0]]
		# pred = pred.tolist()

		# transform detection to /map
		done = False

		while not done and self.is_save_bbox:
			try:
				transform = self.tf_buffer.lookup_transform(self.target_frame, self.sensor_frame_id, rospy.Time(), rospy.Duration(10))
				done = True
			except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
				print "transform2 exception"
				continue

		print "transform done!"

		objs_markers = MarkerArray()
		detected_persons = DetectedPersons()
		detected_persons.header = velodyne_points.header

		for p, m, o, f in zip(pred, marker_array.markers, output, feature):

			r = 0
			g = 0
			b = 0

			if p == 1:
				r = 1.0
				bbox_min = m.points[10]
				bbox_max = m.points[0]

				detectedPerson = DetectedPerson()
				detectedPerson.pose.pose.position.x = (bbox_min.x + bbox_max.x) / 2.0
				detectedPerson.pose.pose.position.y = (bbox_min.y + bbox_max.y) / 2.0
				detectedPerson.pose.pose.position.z = (bbox_min.z + bbox_max.z) / 2.0
				detectedPerson.modality = DetectedPerson.MODALITY_GENERIC_LASER_3D
				detectedPerson.confidence = 1.0
				detectedPerson.detection_id = 0
				detected_persons.detections.append(detectedPerson)
			elif p == 2:
				r = 1.0
           			g = 1.0 # truck
			else:
				r = 0.
				continue

			m.color.r = r
			m.color.g = g
			m.color.b = b

			objs_markers.markers.append(m)

			if p >= 1: #
				static_cloud = self.crop_box(static_cloud, m)

			if self.is_save_bbox:
				bbox_min = m.points[10]
				bbox_max = m.points[0]

				bbox_min_pt = self.transform_3d_pt([bbox_min.x, bbox_min.y, bbox_min.z], transform)
				bbox_max_pt = self.transform_3d_pt([bbox_max.x, bbox_max.y, bbox_max.z], transform)

				output_line = [frame_count] + bbox_min_pt + bbox_max_pt + o.tolist() + f.tolist()

				for i in output_line:
					fout.write("%f "% i)

				fout.write("\n")

		frame_count += 1

		self.marker_pub.publish(objs_markers)
		self.detected_persons_pub.publish(detected_persons)

		if self.velodyne_pub.get_num_connections():

			header = velodyne_points.header

			pointFiledx = PointField('x', 0, 7, 1)
			pointFiledy = PointField('y', 4, 7, 1)
			pointFieldz = PointField('z', 8, 7, 1)
			pointFieldi = PointField('intensity', 12, 7, 1)
			pointFiled = [pointFiledx, pointFiledy, pointFieldz, pointFieldi]

			header = velodyne_points.header
			static_velodyne = point_cloud2.create_cloud(header, pointFiled, static_cloud)

			self.velodyne_pub.publish(static_velodyne)

		print("[inference_node]: runing time = " + str(time.time()-start_time))
def createTrackAndDetection( tracks, detections, track_id, angle, radius, moving = True) :
    trackedPerson = TrackedPerson()
    
    global trackDictionary, detectionCounter
    trackDictionaryEntry = None

    if not track_id in trackDictionary:
        trackDictionaryEntry = TrackDictionaryEntry()
        trackDictionary[track_id] = trackDictionaryEntry
    else:
        trackDictionaryEntry = trackDictionary[track_id]

    # Generate detection ID
    detection_id = detectionCounter
    detectionCounter += 1

    #
    # Simulate occlusions
    #
    occlusionProbability = 0.02
    occlusionMinDuration =  1.0 / updateRateHz # at least 1 frame
    occlusionMaxDuration = 15.0 / updateRateHz # at most 15 frames

    # Currently not occluded?
    if trackDictionaryEntry.remainingOcclusionTime <= 0:
        if random.random() < occlusionProbability:
            trackDictionaryEntry.remainingOcclusionTime = random.random() * (occlusionMaxDuration - occlusionMinDuration) + occlusionMinDuration

    # Is the track occluded?
    if trackDictionaryEntry.remainingOcclusionTime <= 0:
        trackedPerson.detection_id = detection_id
        trackedPerson.is_occluded = False
    else :
        trackedPerson.is_occluded = True
        trackDictionaryEntry.remainingOcclusionTime -= 1.0 / updateRateHz

    #
    # Simulate track ID switches
    #

    idSwitchProbability = 0.001
    if random.random() < idSwitchProbability:
        trackDictionaryEntry.idShift += 1
        trackDictionaryEntry.lastIdShiftAt = rospy.Time.now()

    idShiftAmount = 66 # better don't change this as other mock components might rely upon this to consistently fake their data across ID switches
    trackedPerson.track_id = track_id + trackDictionaryEntry.idShift * idShiftAmount
    trackedPerson.age = rospy.Time.now() - trackDictionaryEntry.lastIdShiftAt

    # Determine track position
    setPoseAndTwistFromAngle(trackedPerson.pose, trackedPerson.twist, angle, radius, moving)

    # Track position noise
    trackedPerson.pose.pose.position.x += random.random() * 0.1 - 0.05
    trackedPerson.pose.pose.position.y += random.random() * 0.1 - 0.05

    tracks.append(trackedPerson)

    if not trackedPerson.is_occluded:
        detectedPerson = DetectedPerson()
        detectedPerson.detection_id = detection_id
        detectedPerson.confidence = random.random() * 0.5 + 0.5

        detectedPerson.pose = copy.deepcopy(trackedPerson.pose)
        detectedPerson.pose.pose.position.x += random.random() * 0.5 - 0.25 # introduce some noise on detected position
        detectedPerson.pose.pose.position.y += random.random() * 0.5 - 0.25

        detections.append(detectedPerson)

    return
    def timer_cb_pros(self, timer):
        if (self.new_bounding):
            self.lock.acquire()
            self.new_bounding = False
            msg = self.message[0]
            depth = self.message[1]
            self.lock.release()
            t = TransformStamped()
            try:
                #print(rospy.Time.now().to_sec)
                #print(depth.header.stamp)
                #time=rospy.Time.now().to_sec-depth.header.stamp.secs
                #print(time)
                (trans,
                 rot) = self.listener.lookupTransform("odom",
                                                      depth.header.frame_id,
                                                      rospy.Time(0))

                t.header.stamp = rospy.Time.now()
                t.header.frame_id = "odom"
                t.child_frame_id = depth.header.frame_id
                t.transform.translation.x = trans[0]
                t.transform.translation.y = trans[1]
                t.transform.translation.z = trans[2]
                t.transform.rotation.x = rot[0]
                t.transform.rotation.y = rot[1]
                t.transform.rotation.z = rot[2]
                t.transform.rotation.w = rot[3]
            except (tf.LookupException):
                print("LookupException")
            except (tf.ConnectivityException):
                print("ConnectivityException")
            except (tf.ExtrapolationException):
                print("ExtrapolationException")

            cv_depth = self._bridge.imgmsg_to_cv2(depth, "passthrough")
            #cv_color = self._bridge.imgmsg_to_cv2(color, "rgb8")

            # get the number of detections
            no_of_detections = len(msg.bounding_boxes)
            spencer_detected_persons = DetectedPersons()
            spencer_detected_persons.header = msg.image_header
            # Check if there is a detection
            if no_of_detections > 0:
                for i, boxes in enumerate(msg.bounding_boxes):

                    x = boxes.xmin
                    y = boxes.ymin
                    xmax = boxes.xmax
                    ymax = boxes.ymax
                    width = xmax - x
                    height = ymax - y
                    scale = 0.6
                    height_delta = (height - height * scale) / 2

                    width_delta = (width - width * scale) / 2

                    cv_depth_bounding_box = cv_depth[
                        int(y + height_delta):int(y + height_delta +
                                                  height * scale),
                        int(x + width_delta):int(x + width_delta +
                                                 width * scale)]
                    #cv_color_debug=cv_color[int(y+height_delta):int(y+height_delta+height*scale),int(x+width_delta):int(x+width_delta+width*scale)]
                    try:

                        depth_mean = np.nanmedian(\
                        cv_depth_bounding_box[np.nonzero(cv_depth_bounding_box)])

                        real_x = (x + width / 2 -
                                  self.cx) * (depth_mean) / self.f

                        real_y = (y + height / 2 -
                                  self.cy) * (depth_mean) / self.f

                        if (boxes.Class == 'person'):
                            spencer_detected_person = DetectedPerson()
                            spencer_detected_person.modality = spencer_detected_person.MODALITY_GENERIC_RGBD
                            spencer_detected_person.confidence = 1
                            spencer_detected_person.pose.pose.position.x = real_x
                            spencer_detected_person.pose.pose.position.y = real_y
                            spencer_detected_person.pose.pose.position.z = depth_mean
                            spencer_detected_person.pose.pose.orientation.w = 1.0
                            LARGE_VARIANCE = 999999999
                            pose_variance = 0.05
                            spencer_detected_person.pose.covariance[
                                0 * 6 + 0] = pose_variance
                            spencer_detected_person.pose.covariance[
                                1 * 6 + 1] = pose_variance
                            spencer_detected_person.pose.covariance[
                                2 * 6 + 2] = pose_variance
                            spencer_detected_person.pose.covariance[
                                3 * 6 + 3] = LARGE_VARIANCE
                            spencer_detected_person.pose.covariance[
                                4 * 6 + 4] = LARGE_VARIANCE
                            spencer_detected_person.pose.covariance[
                                5 * 6 + 5] = LARGE_VARIANCE
                            spencer_detected_person.detection_id = self.current_detection_id
                            self.current_detection_id = self.current_detection_id + self.detection_id_increment
                            spencer_detected_persons.detections.append(
                                spencer_detected_person)
                            #self.pub_pic.publish(self.bridge.cv2_to_imgmsg(cv_color_debug,  "rgb8"))

                    except Exception as e:
                        print e
            self.new_msgs = True
            self.last_time = rospy.get_rostime().secs
            self.spencer_detect = spencer_detected_persons
            spencer_detect_odom = DetectedPersonsOdom()
            spencer_detect_odom.DetectedPersons = spencer_detected_persons
            spencer_detect_odom.Transform = t
            #self.pub_spencer.publish(spencer_detected_persons)
            self.pub_spencer_odom.publish(spencer_detect_odom)
Exemple #15
0
    def get_bbox_cog(self, yolo_boxes, cloud):
        # Clear the people array
        self.people.people = list()

        # Clear the people pose array
        self.people_poses.poses = list()

        # Clear the bounding box array
        self.people_bounding_boxes.boxes = list()

        # Clear the detected persons array
        self.detected_persons.detections = list()

        # Assign arbitrary IDs
        person_id = 0

        for detection in yolo_boxes.bboxes:
            if detection.Class == 'person':
                bbox = [
                    detection.xmin, detection.ymin, detection.xmax,
                    detection.ymax
                ]
                #try:
                person_pose, person_bounding_box = self.get_bbox_pose(
                    bbox, cloud)
                if person_pose is None:
                    continue
                #except:
                #    print person_pose, person_bounding_box
                #    os._exit(1)

                # Pose
                self.people_poses.poses.append(person_pose.pose)

                # Bounding box
                self.people_bounding_boxes.boxes.append(person_bounding_box)

                # Person message type for social cost map
                person = Person()
                person.position = person_pose.pose.position
                #person.position.z = 0.0
                person.name = str(len(self.people.people))
                person.reliability = 1.0

                # Spencer DetectedPerson
                detected_person = DetectedPerson()
                detected_person.modality = DetectedPerson.MODALITY_GENERIC_RGBD
                detected_person.detection_id = person_id
                person_id += 1
                detected_person.pose.pose = person_pose.pose
                self.detected_persons.detections.append(detected_person)

                self.people.people.append(person)

        now = rospy.Time.now()

        self.people.header.stamp = now
        self.people_pub.publish(self.people)

        self.people_poses.header.stamp = now
        self.people_poses_pub.publish(self.people_poses)

        self.people_bounding_boxes.header.stamp = now
        self.people_bounding_boxes_pub.publish(self.people_bounding_boxes)

        self.detected_persons.header.stamp = now
        self.detected_persons_pub.publish(self.detected_persons)