コード例 #1
0
class FaceRecog:
	def __init__(self):
		self.atomo = AtomicMsgs()
		rospy.Subscriber('/camera/face_recognition', faces_ids, self.face_cb)

	def face_cb(self, data):
		for fc in data.faces:
			self.atomo.face_recognition(fc.id, fc.name);
コード例 #2
0
class FaceRecog:
    def __init__(self):
        self.atomo = AtomicMsgs()
        rospy.Subscriber('/camera/face_recognition', faces_ids, self.face_cb)

    def face_cb(self, data):
        for fc in data.faces:
            self.atomo.face_recognition(fc.id, fc.name)
コード例 #3
0
class FaceTrack:

	# Control flags. Ideally, FaceTrack should publish targets using
	# ros_commo EvaControl class.
	C_EYES = 16
	C_FACE = 32
	# Face tracking will be disabled if neither of these flags are set.
	# (this allows for a manual over-ride of face-tracking by other
	# control processes.)
	C_FACE_TRACKING = C_FACE | C_EYES

	def __init__(self):

		# The OpenCog API. This is used to send face data to OpenCog.
		self.atomo = AtomicMsgs()

		# List of currently visible faces
		self.visible_faces = []

		# Subscribed pi_vision topics and events
		self.TOPIC_FACE_EVENT = "/camera/face_event"
		self.EVENT_NEW_FACE = "new_face"
		self.EVENT_LOST_FACE = "lost_face"
		self.EVENT_RECOGNIZED_FACE = "recognized_face"
		# Overrides current face being tracked by WebUI
		self.EVENT_TRACK_FACE = "track_face"

		self.TOPIC_FACE_LOCATIONS = "/camera/face_locations"

		# Face appearance/disappearance from pi_vision
		rospy.Subscriber(self.TOPIC_FACE_EVENT, FaceEvent, self.face_event_cb)

		# Face location information from pi_vision
		rospy.Subscriber(self.TOPIC_FACE_LOCATIONS, Faces, self.face_loc_cb)

		rospy.Subscriber("/behavior_control", Int32, self.behavior_control_cb)

		# Control Eyes and face by default
		self.control_mode = 255

	# ----------------------------------------------------------
	# Start tracking a face
	def add_face(self, faceid):
		if faceid in self.visible_faces:
			return

		self.visible_faces.append(faceid)

		logger.info("New face added to visibile faces: " +
			str(self.visible_faces))
		self.atomo.add_face_to_atomspace(faceid)


	# Stop tracking a face
	def remove_face(self, faceid):
		self.atomo.remove_face_from_atomspace(faceid)

		if faceid in self.visible_faces:
			self.visible_faces.remove(faceid)

		logger.info("Lost face; visibile faces now: " + str(self.visible_faces))

	# Force the robot to turn its attention to the given
	# face (to interact with, talk with) that face.
	def track_face(self, faceid):
		if faceid in self.visible_faces:
			logger.info("Face requested interaction: " + str(faceid))
			self.atomo.add_tracked_face_to_atomspace(faceid)

	# ----------------------------------------------------------
	# pi_vision ROS callbacks

	# pi_vision ROS callback, called when a new face is detected,
	# or a face is lost.
	def face_event_cb(self, data):
		if not self.control_mode & self.C_FACE_TRACKING:
			return

		if data.face_event == self.EVENT_NEW_FACE:
			self.add_face(data.face_id)

		elif data.face_event == self.EVENT_LOST_FACE:
			self.remove_face(data.face_id)

		elif data.face_event == self.EVENT_TRACK_FACE:
			self.track_face(data.face_id)

		elif data.face_event == self.EVENT_RECOGNIZED_FACE:
			self.atomo.face_recognition(data.face_id, data.recognized_id)

	# pi_vision ROS callback, called when pi_vision has new face
	# location data for us. This happens frequently (about 10x/second)
	def face_loc_cb(self, data):
		if not self.control_mode & self.C_FACE_TRACKING:
			return

		for face in data.faces:
			# Update location of a face. The location is stored in the
			# OpenCog space server (octomap).
			if face.id in self.visible_faces:
				self.atomo.update_face_octomap(face.id,
				            face.point.x, face.point.y, face.point.z)


	# Enable/disable Opencog face-tracking.  This is driven by the
	# master control GUI.
	def behavior_control_cb(self, data):
		# Is facetracking currently enabled?
		facetracking = self.control_mode & self.C_FACE_TRACKING
		self.control_mode = data.data
		print("New Control mode %i" % self.control_mode )

		# If face-tracking was enabled, and is now disabled ...
		if facetracking > 0 and self.control_mode & self.C_FACE_TRACKING == 0:
			self.atomo.update_ft_state_to_atomspace(False)
			# Need to clear faces:
			for face in self.visible_faces[:]:
				self.remove_face(face)

		elif self.control_mode & self.C_FACE_TRACKING > 0:
			self.atomo.update_ft_state_to_atomspace(True)
コード例 #4
0
class FaceTrack:

	# Control flags. Ideally, FaceTrack should publish targets using
	# ros_commo EvaControl class.
	C_EYES = 16
	C_FACE = 32
	# Face tracking will be disabled if neither of these flags are set.
	# (this allows for a manual over-ride of face-tracking by other
	# control processes.)
	C_FACE_TRACKING = C_FACE | C_EYES

	def __init__(self):

		# The OpenCog API. This is used to send face data to OpenCog.
		self.atomo = AtomicMsgs()
		self.atomo.create_face_octomap()

		# List of currently visible faces
		self.visible_faces = []

		# Subscribed pi_vision topics and events
		self.TOPIC_FACE_EVENT = "/camera/face_event"
		self.EVENT_NEW_FACE = "new_face"
		self.EVENT_LOST_FACE = "lost_face"
		self.EVENT_RECOGNIZED_FACE = "recognized_face"
		# Overrides current face being tracked by WebUI
		self.EVENT_TRACK_FACE = "track_face"

		self.TOPIC_FACE_LOCATIONS = "/camera/face_locations"

		# Face appearance/disappearance from pi_vision
		rospy.Subscriber(self.TOPIC_FACE_EVENT, FaceEvent, self.face_event_cb)

		# Face location information from pi_vision
		rospy.Subscriber(self.TOPIC_FACE_LOCATIONS, Faces, self.face_loc_cb)

		rospy.Subscriber("/behavior_control", Int32, self.behavior_control_cb)

		# Control Eyes and face by default
		self.control_mode = 255

	# ----------------------------------------------------------
	# Start tracking a face
	def add_face(self, faceid):
		if faceid in self.visible_faces:
			return

		self.visible_faces.append(faceid)

		logger.info("New face added to visibile faces: " +
			str(self.visible_faces))
		self.atomo.add_face_to_atomspace(faceid)


	# Stop tracking a face
	def remove_face(self, faceid):
		self.atomo.remove_face_from_atomspace(faceid)

		if faceid in self.visible_faces:
			self.visible_faces.remove(faceid)

		logger.info("Lost face; visibile faces now: " + str(self.visible_faces))

	# Force the robot to turn its attention to the given
	# face (to interact with, talk with) that face.
	def track_face(self, faceid):
		if faceid in self.visible_faces:
			logger.info("Face requested interaction: " + str(faceid))
			self.atomo.add_tracked_face_to_atomspace(faceid)

	# ----------------------------------------------------------
	# pi_vision ROS callbacks

	# pi_vision ROS callback, called when a new face is detected,
	# or a face is lost.  Also called for recognized faces.
	#
	# This callback handles recognized faces using a special message
	# format, published on the `/camera/face_locations`. Note that
	# there is also a different topic for recognized faces, called
	# `/camera/face_recognition`. See the `face-recog.py` file for
	# details. I am not sure what subsystem published which message
	# type. XXX FIXME - figure out why there are two different
	# face recognition subsystems, and standardize one which we
	# should use.
	def face_event_cb(self, data):
		if not self.control_mode & self.C_FACE_TRACKING:
			return

		if data.face_event == self.EVENT_NEW_FACE:
			self.add_face(data.face_id)

		elif data.face_event == self.EVENT_LOST_FACE:
			self.remove_face(data.face_id)

		elif data.face_event == self.EVENT_TRACK_FACE:
			self.track_face(data.face_id)

		elif data.face_event == self.EVENT_RECOGNIZED_FACE:
			self.atomo.face_recognition(data.face_id, data.recognized_id)

	# pi_vision ROS callback, called when pi_vision has new face
	# location data for us. This happens frequently (about 10x/second)
	def face_loc_cb(self, data):
		if not self.control_mode & self.C_FACE_TRACKING:
			return

		for face in data.faces:
			# Update location of a face. The location is stored in the
			# OpenCog space server (octomap).
			if face.id in self.visible_faces:
				self.atomo.update_face_octomap(face.id,
				            face.point.x, face.point.y, face.point.z)


	# Enable/disable Opencog face-tracking.  This is driven by the
	# master control GUI. XXX FIXME -- why should this ever be disabled?
	# OpenCog should always know about faces; perhaps it is congtrol of
	# head and eye movements that should be disabled?
	def behavior_control_cb(self, data):
		# Is facetracking currently enabled?
		facetracking = self.control_mode & self.C_FACE_TRACKING
		self.control_mode = data.data
		print("New Control mode %i" % self.control_mode )

		# If face-tracking was enabled, and is now disabled ...
		if facetracking > 0 and self.control_mode & self.C_FACE_TRACKING == 0:
			self.atomo.update_ft_state_to_atomspace(False)
			# Need to clear faces:
			for face in self.visible_faces[:]:
				self.remove_face(face)

		elif self.control_mode & self.C_FACE_TRACKING > 0:
			self.atomo.update_ft_state_to_atomspace(True)