Пример #1
0
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.

import logging
import rospy
from audio_power import AudioPower
from chat_track import ChatTrack
from face_track import FaceTrack
from sound_track import SoundTrack
from room_brightness import RoomBrightness
from saliency_track import SaliencyTrack


rospy.init_node("OpenCog_ROS_bridge")
logging.info("Starting the OpenCog ROS Bridge")
print "Starting the OpenCog ROS Bridge"

ap = AudioPower()
ct = ChatTrack()
ft = FaceTrack()
st = SoundTrack()
br = RoomBrightness()
sl = SaliencyTrack()

try:
	rospy.spin()
except rospy.ROSInterruptException as e:
	print(e)

print "Exit OpenCog ROS bridge"
Пример #2
0
	def __init__(self):

		self.blackboard = blackboard.Blackboard("rig expressions")

		config = ConfigParser.ConfigParser()
		config.readfp(open(os.path.join(os.path.dirname(__file__), "../behavior.cfg")))
		self.blackboard["current_emotion"] = config.get("emotion", "default_emotion")
		self.blackboard["current_emotion_intensity"] = config.getfloat("emotion", "default_emotion_intensity")
		self.blackboard["current_emotion_duration"] = config.getfloat("emotion", "default_emotion_duration")
		self.blackboard["emotion_classes"] = []
		self.blackboard["gesture_classes"] = []
		self.blackboard["emotion_scale_stage"] = config.getfloat("emotion", "emotion_scale_stage")
		self.blackboard["emotion_scale_closeup"] = config.getfloat("emotion", "emotion_scale_closeup")
		self.blackboard["gesture_scale_stage"] = config.getfloat("gesture", "gesture_scale_stage")
		self.blackboard["gesture_scale_closeup"] = config.getfloat("gesture", "gesture_scale_closeup")

		self.unpack_config_emotions(config, "frustrated_emotions")

		self.unpack_config_emotions(config, "positive_emotions")
		self.unpack_config_emotions(config, "non_positive_emotion")

		self.unpack_config_emotions(config, "bored_emotions")
		self.unpack_config_emotions(config, "non_bored_emotion")

		self.unpack_config_emotions(config, "sleep_emotions")
		self.unpack_config_emotions(config, "non_sleep_emotion")

		self.unpack_config_emotions(config, "wake_up_emotions")

		self.unpack_config_emotions(config, "new_arrival_emotions")

		self.unpack_config_gestures(config, "positive_gestures")

		self.unpack_config_gestures(config, "bored_gestures")

		self.unpack_config_gestures(config, "sleep_gestures")

		self.unpack_config_gestures(config, "wake_up_gestures")

		self.blackboard["min_duration_for_interaction"] = config.getfloat("interaction", "duration_min")
		self.blackboard["max_duration_for_interaction"] = config.getfloat("interaction", "duration_max")
		self.blackboard["time_to_change_face_target_min"] = config.getfloat("interaction", "time_to_change_face_target_min")
		self.blackboard["time_to_change_face_target_max"] = config.getfloat("interaction", "time_to_change_face_target_max")
		self.blackboard["glance_probability"] = config.getfloat("interaction", "glance_probability")
		self.blackboard["glance_probability_for_new_faces"] = config.getfloat("interaction", "glance_probability_for_new_faces")
		self.blackboard["glance_probability_for_lost_faces"] = config.getfloat("interaction", "glance_probability_for_lost_faces")
		self.blackboard["z_pitch_eyes"] = config.getfloat("interaction", "z_pitch_eyes")
		self.blackboard["max_glance_distance"] = config.getfloat("interaction", "max_glance_distance")
		self.blackboard["face_study_probabilities"] = config.getfloat("interaction", "face_study_probabilities")
		self.blackboard["face_study_duration_min"] = config.getfloat("interaction", "face_study_duration_min")
		self.blackboard["face_study_duration_max"] = config.getfloat("interaction", "face_study_duration_max")
		self.blackboard["face_study_z_pitch_nose"] = config.getfloat("interaction", "face_study_z_pitch_nose")
		self.blackboard["face_study_z_pitch_mouth"] = config.getfloat("interaction", "face_study_z_pitch_mouth")
		self.blackboard["face_study_y_pitch_left_ear"] = config.getfloat("interaction", "face_study_y_pitch_left_ear")
		self.blackboard["face_study_y_pitch_right_ear"] = config.getfloat("interaction", "face_study_y_pitch_right_ear")
		self.blackboard["sleep_probability"] = config.getfloat("boredom", "sleep_probability")
		self.blackboard["sleep_duration_min"] = config.getfloat("boredom", "sleep_duration_min")
		self.blackboard["sleep_duration_max"] = config.getfloat("boredom", "sleep_duration_max")
		self.blackboard["search_for_attention_index"] = 0
		self.blackboard["search_for_attention_duration_min"] = config.getfloat("boredom", "search_for_attention_duration_min")
		self.blackboard["search_for_attention_duration_max"] = config.getfloat("boredom", "search_for_attention_duration_max")
		self.blackboard["search_for_attention_targets"] = []
		self.unpack_config_look_around(config)
		self.blackboard["wake_up_probability"] = config.getfloat("boredom", "wake_up_probability")
		self.blackboard["time_to_wake_up"] = config.getfloat("boredom", "time_to_wake_up")

		##### Other System Variables #####
		self.blackboard["show_expression_since"] = None

		# ID's of faces newly seen, or lost. Integer ID.
		self.blackboard["new_face"] = 0
		self.blackboard["lost_face"] = 0
		# IDs of faces in the scene, updated once per cycle
		self.blackboard["face_targets"] = []
		# IDs of faces in the scene, updated immediately
		self.blackboard["background_face_targets"] = []
		self.blackboard["current_glance_target"] = 0
		self.blackboard["current_face_target"] = 0
		self.blackboard["interact_with_face_target_since"] = 0.0
		self.blackboard["sleep_since"] = 0.0
		self.blackboard["bored_since"] = 0.0
		self.blackboard["is_interruption"] = False
		self.blackboard["is_sleeping"] = False
		self.blackboard["behavior_tree_on"] = False
		self.blackboard["stage_mode"] = False
		# Flags to indicate which part of the face will be studied
		self.blackboard["face_study_nose"] = False
		self.blackboard["face_study_mouth"] = False
		self.blackboard["face_study_left_ear"] = False
		self.blackboard["face_study_right_ear"] = False


		##### ROS Connections #####
		self.facetrack = FaceTrack(self.blackboard)

		rospy.Subscriber("behavior_switch", String, self.behavior_switch_callback)
		rospy.Subscriber("/blender_api/available_emotion_states",
			AvailableEmotionStates, self.get_emotion_states_cb)

		rospy.Subscriber("/blender_api/available_gestures",
			AvailableGestures, self.get_gestures_cb)

		# Emotional content that the chatbot perceived i.e. did it hear
		# (or reply with) angry words, polite words, etc?
		# currently supplying string rather than specific EmotionState with timing,
		# allowing that to be handled here where timings have been tuned
		print ' setting up chatbot_affect link'
		rospy.Subscriber("/eva/chatbot_affect_perceive", String,
			self.chatbot_affect_perceive_callback)

		self.do_pub_gestures = True
		self.do_pub_emotions = True
		self.emotion_pub = rospy.Publisher("/blender_api/set_emotion_state",
			EmotionState, queue_size=1)
		self.gesture_pub = rospy.Publisher("/blender_api/set_gesture",
			SetGesture, queue_size=1)
		self.affect_pub = rospy.Publisher("/eva/chatbot_affect_express",
			EmotionState, queue_size=1)
		self.tree = self.build_tree()
		time.sleep(0.1)

		log_filename = os.path.join(os.path.abspath(
				os.path.dirname(__file__)), "../bhlog.csv")
		self.log_file = open(log_filename, 'wb')
		self.log_file.write('Action,Timestamp,Event\n')
		try:
			while not rospy.is_shutdown():
				self.tree.next()
		finally:
			self.log_file.close()