#run ant again #/ZenoDial/output_text, zenodial.java, output.java #/ZenoDial/text_input #/facedetect -- face detector #.. to add also pub and subscribe for #map zenodial output directly to itf_talk #upload changes to zenodial+robot, launch with ant ZenoDial #connect speech active and enable listen if __name__ == "__main__": rospy.init_node("RS1_behavior") #subscribe to distance, compass, face bound box, objects models to be loaded board = blackboard.Blackboard("behavior") board["wake_up_time"] = 6000 # 60 seconds board["stt"] = "" board["stt_read"] = True board["stop_counter"] = 0 board["sonar_cm"] = 0 board["compass_deg"] = 0 board["num_faces"] = 0 board["trackers"] = trackers board["tracker_watch"] = 0 board["to_track"] = "null" board[ "track_delay"] = 0 #delay motor movement commands about 1/2 sec or 1/4 sec board["face_memory"] = 0 #board["face_seen"]=face_box() board["img"] = Image()
def __init__(self): self.blackboard = blackboard.Blackboard("rig expressions") config = ConfigParser.ConfigParser() config.readfp(open(os.path.join(os.path.dirname(__file__), "../behavior.cfg"))) self.blackboard["current_emotion"] = config.get("emotion", "default_emotion") self.blackboard["current_emotion_intensity"] = config.getfloat("emotion", "default_emotion_intensity") self.blackboard["current_emotion_duration"] = config.getfloat("emotion", "default_emotion_duration") self.blackboard["emotion_classes"] = [] self.blackboard["gesture_classes"] = [] self.blackboard["emotion_scale_stage"] = config.getfloat("emotion", "emotion_scale_stage") self.blackboard["emotion_scale_closeup"] = config.getfloat("emotion", "emotion_scale_closeup") self.blackboard["gesture_scale_stage"] = config.getfloat("gesture", "gesture_scale_stage") self.blackboard["gesture_scale_closeup"] = config.getfloat("gesture", "gesture_scale_closeup") self.unpack_config_emotions(config, "frustrated_emotions") self.unpack_config_emotions(config, "positive_emotions") self.unpack_config_emotions(config, "non_positive_emotion") self.unpack_config_emotions(config, "bored_emotions") self.unpack_config_emotions(config, "non_bored_emotion") self.unpack_config_emotions(config, "sleep_emotions") self.unpack_config_emotions(config, "non_sleep_emotion") self.unpack_config_emotions(config, "wake_up_emotions") self.unpack_config_emotions(config, "new_arrival_emotions") self.unpack_config_gestures(config, "positive_gestures") self.unpack_config_gestures(config, "bored_gestures") self.unpack_config_gestures(config, "sleep_gestures") self.unpack_config_gestures(config, "wake_up_gestures") self.blackboard["min_duration_for_interaction"] = config.getfloat("interaction", "duration_min") self.blackboard["max_duration_for_interaction"] = config.getfloat("interaction", "duration_max") self.blackboard["time_to_change_face_target_min"] = config.getfloat("interaction", "time_to_change_face_target_min") self.blackboard["time_to_change_face_target_max"] = config.getfloat("interaction", "time_to_change_face_target_max") self.blackboard["glance_probability"] = config.getfloat("interaction", "glance_probability") self.blackboard["glance_probability_for_new_faces"] = config.getfloat("interaction", "glance_probability_for_new_faces") self.blackboard["glance_probability_for_lost_faces"] = config.getfloat("interaction", "glance_probability_for_lost_faces") self.blackboard["z_pitch_eyes"] = config.getfloat("interaction", "z_pitch_eyes") self.blackboard["max_glance_distance"] = config.getfloat("interaction", "max_glance_distance") self.blackboard["face_study_probabilities"] = config.getfloat("interaction", "face_study_probabilities") self.blackboard["face_study_duration_min"] = config.getfloat("interaction", "face_study_duration_min") self.blackboard["face_study_duration_max"] = config.getfloat("interaction", "face_study_duration_max") self.blackboard["face_study_z_pitch_nose"] = config.getfloat("interaction", "face_study_z_pitch_nose") self.blackboard["face_study_z_pitch_mouth"] = config.getfloat("interaction", "face_study_z_pitch_mouth") self.blackboard["face_study_y_pitch_left_ear"] = config.getfloat("interaction", "face_study_y_pitch_left_ear") self.blackboard["face_study_y_pitch_right_ear"] = config.getfloat("interaction", "face_study_y_pitch_right_ear") self.blackboard["sleep_probability"] = config.getfloat("boredom", "sleep_probability") self.blackboard["sleep_duration_min"] = config.getfloat("boredom", "sleep_duration_min") self.blackboard["sleep_duration_max"] = config.getfloat("boredom", "sleep_duration_max") self.blackboard["search_for_attention_index"] = 0 self.blackboard["search_for_attention_duration_min"] = config.getfloat("boredom", "search_for_attention_duration_min") self.blackboard["search_for_attention_duration_max"] = config.getfloat("boredom", "search_for_attention_duration_max") self.blackboard["search_for_attention_targets"] = [] self.unpack_config_look_around(config) self.blackboard["wake_up_probability"] = config.getfloat("boredom", "wake_up_probability") self.blackboard["time_to_wake_up"] = config.getfloat("boredom", "time_to_wake_up") ##### Other System Variables ##### self.blackboard["show_expression_since"] = None # ID's of faces newly seen, or lost. Integer ID. self.blackboard["new_face"] = 0 self.blackboard["lost_face"] = 0 # IDs of faces in the scene, updated once per cycle self.blackboard["face_targets"] = [] # IDs of faces in the scene, updated immediately self.blackboard["background_face_targets"] = [] self.blackboard["current_glance_target"] = 0 self.blackboard["current_face_target"] = 0 self.blackboard["interact_with_face_target_since"] = 0.0 self.blackboard["sleep_since"] = 0.0 self.blackboard["bored_since"] = 0.0 self.blackboard["is_interruption"] = False self.blackboard["is_sleeping"] = False self.blackboard["behavior_tree_on"] = False self.blackboard["stage_mode"] = False # Flags to indicate which part of the face will be studied self.blackboard["face_study_nose"] = False self.blackboard["face_study_mouth"] = False self.blackboard["face_study_left_ear"] = False self.blackboard["face_study_right_ear"] = False ##### ROS Connections ##### self.facetrack = FaceTrack(self.blackboard) rospy.Subscriber("behavior_switch", String, self.behavior_switch_callback) rospy.Subscriber("/blender_api/available_emotion_states", AvailableEmotionStates, self.get_emotion_states_cb) rospy.Subscriber("/blender_api/available_gestures", AvailableGestures, self.get_gestures_cb) # Emotional content that the chatbot perceived i.e. did it hear # (or reply with) angry words, polite words, etc? # currently supplying string rather than specific EmotionState with timing, # allowing that to be handled here where timings have been tuned print ' setting up chatbot_affect link' rospy.Subscriber("/eva/chatbot_affect_perceive", String, self.chatbot_affect_perceive_callback) self.do_pub_gestures = True self.do_pub_emotions = True self.emotion_pub = rospy.Publisher("/blender_api/set_emotion_state", EmotionState, queue_size=1) self.gesture_pub = rospy.Publisher("/blender_api/set_gesture", SetGesture, queue_size=1) self.affect_pub = rospy.Publisher("/eva/chatbot_affect_express", EmotionState, queue_size=1) self.tree = self.build_tree() time.sleep(0.1) log_filename = os.path.join(os.path.abspath( os.path.dirname(__file__)), "../bhlog.csv") self.log_file = open(log_filename, 'wb') self.log_file.write('Action,Timestamp,Event\n') try: while not rospy.is_shutdown(): self.tree.next() finally: self.log_file.close()
def BallDetected(data): board["ball_centered"] = False board["ball_detected"] = True board['ball'] = data #print "here" if __name__ == "__main__": # Initialize Node rospy.init_node('behavior', anonymous=False) robot_cmd_pub = rospy.Publisher('/command', String, queue_size=1) # Initialize subscriber rospy.Subscriber('/ball_pose', Point, BallDetected) # create an object of black board board = blackboard.Blackboard("My board") # publishers on board board["robot_cmd_pub"] = robot_cmd_pub board['ball'] = Point(0, 0, 0) # Init variables board["ball_detected"] = True board["ball_centered"] = True # Create a behavior tree using the blackboard object board be = Behavior(board) be_tree = be.main_tree # Wait for bluetooth to connect rate = rospy.Rate(100) #100hz # rospy.spin() while not rospy.is_shutdown(): ##rospy.loginfo("Here in the loop") be_tree.next()
Int32, queue_size=1) tilt_angle_pub = rospy.Publisher('/act/robot/set_tilt_angle', Int32, queue_size=1) robot_cmd_pub = rospy.Publisher('/act/robot/send_move_command', robot_cmd, queue_size=1) # Initialize subscribers rospy.Subscriber('/ball_pose', Point, faceDetected) rospy.Subscriber('/sense/robot/get_sonar_cm', sonar, readSonarDist) rospy.Subscriber('/sense/robot/get_compass_deg', compass, readCompass) # create an object of black board board = blackboard.Blackboard() # publishers on board board["pan_angle_pub"] = pan_angle_pub board["tilt_angle_pub"] = tilt_angle_pub board["robot_cmd_pub"] = robot_cmd_pub # Init variables board["angles"] = {"pan": [45], "tilt": [60]} board["sonar_data"] = {"distance": [200]} board["compass_data"] = {"heading": [0]} board["bodyPose"] = [] board["start_pan"] = 45 board["start_tilt"] = 60 board["HeadingAfterCT"] = [ ] # This records the heading right after the camera track has stopped
def setUp(self): director.init() self.bb = blackboard.Blackboard('test', )
#run ant again #/ZenoDial/output_text, zenodial.java, output.java #/ZenoDial/text_input #/facedetect -- face detector #.. to add also pub and subscribe for #map zenodial output directly to itf_talk #upload changes to zenodial+robot, launch with ant ZenoDial #connect speech active and enable listen if __name__ == "__main__": rospy.init_node("RS1_behavior") #subscribe to distance, compass, face bound box, objects models to be loaded board = blackboard.Blackboard(value="behavior") board["wake_up_time"] = 6000 # 60 seconds board["stt"] = "" board["stt_read"] = True board["stop_counter"] = 0 board["sonar_cm"] = 0 board["compass_deg"] = 0 board["num_faces"] = 0 board["trackers"] = trackers board["tracker_watch"] = 0 board["to_track"] = "null" board[ "track_delay"] = 0 #delay motor movement commands about 1/2 sec or 1/4 sec board["face_memory"] = 0 #board["face_seen"]=face_box() board["img"] = Image()
Int32, queue_size=1) tilt_angle_pub = rospy.Publisher('/act/robot/set_tilt_angle', Int32, queue_size=1) robot_cmd_pub = rospy.Publisher('/act/robot/send_move_command', robot_cmd, queue_size=1) # Initialize subscribers rospy.Subscriber('/ball_pose', Point, faceDetected) rospy.Subscriber('/sense/robot/get_sonar_cm', sonar, readSonarDist) rospy.Subscriber('/sense/robot/get_compass_deg', compass, readCompass) # create an object of black board board = blackboard.Blackboard('behavior') # publishers on board board["pan_angle_pub"] = pan_angle_pub board["tilt_angle_pub"] = tilt_angle_pub board["robot_cmd_pub"] = robot_cmd_pub # Init variables board["angles"] = {"pan": [45], "tilt": [60]} board["sonar_data"] = {"distance": [200]} board["compass_data"] = {"heading": [0]} board["bodyPose"] = [] board["start_pan"] = 45 board["start_tilt"] = 60 board["HeadingAfterCT"] = [ ] # This records the heading right after the camera track has stopped