class TTSFeedback: # Receive messages that indicate that TTS (or chatbot) has started # or finished vocalizing. def __init__(self): self.atomo = AtomicMsgs() rospy.Subscriber("speech_events", String, self.speech_event_cb) # Notification from text-to-speech (TTS) module, that it has # started, or stopped vocalizing. This message might be published # by either the TTS module itself, or by some external chatbot. # # rostopic pub --once speech_events std_msgs/String start # rostopic pub --once speech_events std_msgs/String stop def speech_event_cb(self, speech_event): print('speech_event, type ' + speech_event.data) if speech_event.data == "start": rospy.loginfo("starting speech") self.atomo.vocalization_started() elif speech_event.data == "stop": rospy.loginfo("ending speech") self.atomo.vocalization_ended() elif speech_event.data.startswith("duration"): rospy.loginfo("speech_event.data {}".format(speech_event.data)) else: rospy.logerr("unknown speech_events message: " + speech_event.data)
class RoomBrightness: def __init__(self): self.atomo = AtomicMsgs() rospy.Subscriber('/opencog/room_luminance', Luminance, self.bright_cb) def bright_cb(self, data): self.atomo.room_brightness(data.value)
class RoomBrightness: def __init__(self): self.atomo = AtomicMsgs() rospy.Subscriber('/opencog/room_luminance', Luminance, self.bright_cb) def bright_cb(self, data): self.atomo.room_brightness(data.value)
def __init__(self): # The OpenCog API. This is used to send face data to OpenCog. self.atomo = AtomicMsgs() # List of currently visible faces self.visible_faces = [] # Subscribed pi_vision topics and events self.TOPIC_FACE_EVENT = "/camera/face_event" self.EVENT_NEW_FACE = "new_face" self.EVENT_LOST_FACE = "lost_face" self.EVENT_RECOGNIZED_FACE = "recognized_face" # Overrides current face being tracked by WebUI self.EVENT_TRACK_FACE = "track_face" self.TOPIC_FACE_LOCATIONS = "/camera/face_locations" # Face appearance/disappearance from pi_vision rospy.Subscriber(self.TOPIC_FACE_EVENT, FaceEvent, self.face_event_cb) # Face location information from pi_vision rospy.Subscriber(self.TOPIC_FACE_LOCATIONS, Faces, self.face_loc_cb) rospy.Subscriber("/behavior_control", Int32, self.behavior_control_cb) # Control Eyes and face by default self.control_mode = 255
class FaceRecog: def __init__(self): self.atomo = AtomicMsgs() rospy.Subscriber('/camera/face_recognition', faces_ids, self.face_cb) def face_cb(self, data): for fc in data.faces: self.atomo.face_recognition(fc.id, fc.name)
class SaliencyTrack: def __init__(self): self.atomo = AtomicMsgs() rospy.Subscriber('/nmpt_saliency_point', targets, self.sal_cb) def sal_cb(self, data): loc = data.positions[0] self.atomo.saliency(loc.x,loc.y,loc.z,data.degree)
class FaceRecog: def __init__(self): self.atomo = AtomicMsgs() rospy.Subscriber('/camera/face_recognition', faces_ids, self.face_cb) def face_cb(self, data): for fc in data.faces: self.atomo.face_recognition(fc.id, fc.name);
class ChatTrack: def __init__(self): self.atomo = AtomicMsgs() rospy.Subscriber("chatbot_speech", ChatMessage, self.stt_cb) # --------------------------------------------------------------- # Speech-to-text callback def stt_cb(self, msg): if msg.confidence >= 50: self.atomo.who_said(msg.utterance)
class AudioPower: def __init__(self): self.atomo = AtomicMsgs() rospy.Subscriber("audio_sensors", audiodata, self.audio_cb) def audio_cb(self, data): print "Sudden sound change {}".format(data.suddenchange) self.atomo.audio_bang(data.suddenchange) self.atomo.audio_energy(data.Decibel)
class ChatTrack: def __init__(self): self.atomo = AtomicMsgs() rospy.Subscriber("chatbot_speech", ChatMessage, self.stt_cb) # --------------------------------------------------------------- # Speech-to-text callback def stt_cb(self, msg): if msg.confidence >= 50: self.atomo.who_said(msg.utterance)
class SaliencyTrack: def __init__(self): self.atomo = AtomicMsgs() rospy.Subscriber('/nmpt_saliency_point', targets, self.sal_cb) def sal_cb(self, data): loc = data.positions[0] z=-(loc.y*2.0-1.0) x=1.0 y=-1.0*(loc.x*2.0-1.0) #print "locations x="+str(x)+" y="+str(y)+" z="+str(z)+"\n" self.atomo.saliency(x,y,z,data.degree)
def __init__(self): # A list of parameter names that are mirrored in opencog # for controling psi-rules self.param_list = [] # Parameter dictionary that is used for updating states # recorded in the atomspace. It is used to cache the # atomspace values. self.param_dict = {} self.atomo = AtomicMsgs() rospy.Subscriber("/opencog_control/parameter_updates", Config, self.openpsi_control_cb)
class SaliencyTrack: def __init__(self): self.atomo = AtomicMsgs() rospy.Subscriber('/nmpt_saliency_point', targets, self.sal_cb) def sal_cb(self, data): loc = data.positions[0] z = -(loc.y * 2.0 - 1.0) x = 1.0 y = -1.0 * (loc.x * 2.0 - 1.0) #print "locations x="+str(x)+" y="+str(y)+" z="+str(z)+"\n" self.atomo.saliency(x, y, z, data.degree)
class ChatTrack: def __init__(self): self.atomo = AtomicMsgs() rospy.Subscriber("chatbot_speech", ChatMessage, self.chat_perceived_text_cb) # --------------------------------------------------------------- # Speech-to-text callback def chat_perceived_text_cb(self, msg): if msg.confidence >= 50: # XXX FIXME WTF Why are there two of these???? # Surely one of these is enough to do the trick! self.atomo.who_said(msg.utterance) self.atomo.perceived_text(msg.utterance)
def __init__(self): # The OpenCog API. This is used to send sound localization # data to OpenCog. self.atomo = AtomicMsgs() # Sound localization parameter_name = "sound_localization/mapping_matrix" if rospy.has_param(parameter_name): self.sl_matrix = rospy.get_param(parameter_name) rospy.Subscriber("/manyears/source_pose", PoseStamped, \ self.sound_cb) print "Sound localization is enabled" else : print "Sound localization is disabled"
class ChatTrack: def __init__(self): self.atomo = AtomicMsgs() rospy.Subscriber("chatbot_speech", ChatMessage, self.chat_perceived_text_cb) # --------------------------------------------------------------- # Speech-to-text callback def chat_perceived_text_cb(self, msg): if msg.confidence >= 50: # XXX FIXME WTF Why are there two of these???? # Surely one of these is enough to do the trick! self.atomo.who_said(msg.utterance) self.atomo.perceived_text(msg.utterance)
class Control: def __init__(self): self.atomo = AtomicMsgs() rospy.Subscriber("/behavior_switch", String, self.behavior_switch_cb) # The 'btree_on' and 'btree_off' data-strings shouldn't be used, # as they are meant for switching on and off non-opencog demos. def behavior_switch_cb(self, data): print "Received /behavior_switch " + data.data if data.data == "opencog_on": self.atomo.wholeshow_start() if data.data == "opencog_off": self.atomo.wholeshow_stop()
def __init__(self): # The OpenCog API. This is used to send face data to OpenCog. self.atomo = AtomicMsgs() self.atomo.create_face_octomap() # List of currently visible faces self.visible_faces = [] # Subscribed pi_vision topics and events self.TOPIC_FACE_EVENT = "/camera/face_event" self.EVENT_NEW_FACE = "new_face" self.EVENT_LOST_FACE = "lost_face" self.EVENT_RECOGNIZED_FACE = "recognized_face" # Overrides current face being tracked by WebUI self.EVENT_TRACK_FACE = "track_face" self.TOPIC_FACE_LOCATIONS = "/camera/face_locations" # Face appearance/disappearance from pi_vision rospy.Subscriber(self.TOPIC_FACE_EVENT, FaceEvent, self.face_event_cb) # Face location information from pi_vision rospy.Subscriber(self.TOPIC_FACE_LOCATIONS, Faces, self.face_loc_cb) rospy.Subscriber("/behavior_control", Int32, self.behavior_control_cb) # Control Eyes and face by default self.control_mode = 255
class SoundTrack: def __init__(self): # The OpenCog API. This is used to send sound localization # data to OpenCog. self.atomo = AtomicMsgs() # Sound localization parameter_name = "sound_localization/mapping_matrix" if rospy.has_param(parameter_name): self.sl_matrix = rospy.get_param(parameter_name) rospy.Subscriber("/manyears/source_pose", PoseStamped, \ self.sound_cb) print "Sound localization is enabled" else : print "Sound localization is disabled" # --------------------------------------------------------------- # Store the location of the strongest sound-source in the # OpenCog space server. This data arrives at a rate of about # 30 Hz, currently, from ManyEars. def sound_cb(self, msg): # Convert to camera coordinates, using an affine matrix # (which combines a rotation and translation). # # A typical sl_matrix looks like this: # # 0.943789 0.129327 0.304204 0.00736024 # -0.131484 0.991228 -0.0134787 0.00895614 # -0.303278 -0.0272767 0.952513 0.0272001 # 0 0 0 1 # vs = [msg.pose.position.x, \ msg.pose.position.y, \ msg.pose.position.z, \ 1] r = [0, 0, 0, 0] for i in range(0,3): for j in range(0,3): r[i] += self.sl_matrix[i][j] * vs[j] self.atomo.update_sound(r[0], r[1], r[2])
class ControlPsi: def __init__(self): # A list of parameter names that are mirrored in opencog # for controling psi-rules self.param_list = [] # Parameter dictionary that is used for updating states # recorded in the atomspace. It is used to cache the # atomspace values. self.param_dict = {} self.atomo = AtomicMsgs() rospy.Subscriber("/opencog_control/parameter_updates", Config, self.openpsi_control_cb) # For web-ui interface def openpsi_control_cb(self, data): """ This function is used for interactively modifying the weight of openpsi rules. """ param_yaml = rosmsg.get_yaml_for_msg(data.doubles + data.ints) self.param_list = yaml.load(param_yaml) for i in self.param_list: # Populate the parameter dictionary if i["name"] not in self.param_dict: self.param_dict[i["name"]] = i["value"] if i["name"] == "max_waiting_time": scm_str = '''(StateLink (AnchorNode "Chatbot: MaxWaitingTime") (TimeNode %f))''' % (i["value"]) else: scm_str = '''(StateLink (ListLink (ConceptNode "OpenPsi: %s") (ConceptNode "OpenPsi: weight")) (NumberNode %f))''' % (i["name"], i["value"]) self.atomo.evaluate_scm(scm_str)
class Affect: def __init__(self): self.atomo = AtomicMsgs() rospy.Subscriber("chatbot_affect_perceive", String, self.language_affect_perceive_cb) # The perceived emotional content of words spoken to the robot. # That is, were people being rude to the robot? Polite to it? Angry # with it? We subscribe; there may be multiple publishers of this # message: it might be supplied by some linguistic-processing module, # or it might be supplied by a chatbot. # # emo is of type std_msgs/String def language_affect_perceive_cb(self, emo): print 'chatbot perceived affect class =' + emo.data rospy.loginfo('chatbot perceived affect class =' + emo.data) if emo.data == "happy": # behavior tree will use these predicates self.atomo.affect_happy() else: self.atomo.affect_negative()
def __init__(self): self.atomo = AtomicMsgs() rospy.init_node('subscriber', anonymous=True) #print "hello" root_topic = rospy.get_param("/tensorflow_node/publishing/topic") node_topics = rospy.get_published_topics(root_topic) for topic,_ in node_topics: #print topic rospy.Subscriber(topic, TFNodeState, callback=self.destin_cb, callback_args=topic) rospy.spin()
class AudioPower: def __init__(self): self.atomo = AtomicMsgs() rospy.Subscriber("audio_sensors", audiodata, self.audio_cb) def audio_cb(self, data): #print "SuddenChange {}".format(data.SuddenChange) if data.SuddenChange: self.atomo.audio_bang(1.0) else: self.atomo.audio_bang(0.0) self.atomo.audio_energy(data.Decibel)
def __init__(self): self.atomo = AtomicMsgs() rospy.Subscriber('/opencog/room_luminance', Luminance, self.bright_cb)
def __init__(self): self.atomo = AtomicMsgs() rospy.Subscriber('/nmpt_saliency_point', targets, self.sal_cb)
def __init__(self): self.atomo = AtomicMsgs() rospy.Subscriber('/camera/face_recognition', faces_ids, self.face_cb)
def __init__(self): self.atomo = AtomicMsgs() rospy.Subscriber("chatbot_speech", ChatMessage, self.stt_cb)
def __init__(self): self.atomo = AtomicMsgs() rospy.Subscriber("chatbot_speech", ChatMessage, self.chat_perceived_text_cb)
def __init__(self): self.atomo = AtomicMsgs() rospy.Subscriber("chatbot_speech", ChatMessage, self.chat_perceived_text_cb)
def __init__(self): self.atomo = AtomicMsgs() rospy.Subscriber("/behavior_switch", String, self.behavior_switch_cb)
def __init__(self): self.atomo = AtomicMsgs() rospy.Subscriber("chatbot_affect_perceive", String, self.language_affect_perceive_cb)
def __init__(self): self.atomo = AtomicMsgs() rospy.Subscriber("speech_events", String, self.speech_event_cb)
class FaceTrack: # Control flags. Ideally, FaceTrack should publish targets using # ros_commo EvaControl class. C_EYES = 16 C_FACE = 32 # Face tracking will be disabled if neither of these flags are set. # (this allows for a manual over-ride of face-tracking by other # control processes.) C_FACE_TRACKING = C_FACE | C_EYES def __init__(self): # The OpenCog API. This is used to send face data to OpenCog. self.atomo = AtomicMsgs() # List of currently visible faces self.visible_faces = [] # Subscribed pi_vision topics and events self.TOPIC_FACE_EVENT = "/camera/face_event" self.EVENT_NEW_FACE = "new_face" self.EVENT_LOST_FACE = "lost_face" self.EVENT_RECOGNIZED_FACE = "recognized_face" # Overrides current face being tracked by WebUI self.EVENT_TRACK_FACE = "track_face" self.TOPIC_FACE_LOCATIONS = "/camera/face_locations" # Face appearance/disappearance from pi_vision rospy.Subscriber(self.TOPIC_FACE_EVENT, FaceEvent, self.face_event_cb) # Face location information from pi_vision rospy.Subscriber(self.TOPIC_FACE_LOCATIONS, Faces, self.face_loc_cb) rospy.Subscriber("/behavior_control", Int32, self.behavior_control_cb) # Control Eyes and face by default self.control_mode = 255 # ---------------------------------------------------------- # Start tracking a face def add_face(self, faceid): if faceid in self.visible_faces: return self.visible_faces.append(faceid) logger.info("New face added to visibile faces: " + str(self.visible_faces)) self.atomo.add_face_to_atomspace(faceid) # Stop tracking a face def remove_face(self, faceid): self.atomo.remove_face_from_atomspace(faceid) if faceid in self.visible_faces: self.visible_faces.remove(faceid) logger.info("Lost face; visibile faces now: " + str(self.visible_faces)) # Force the robot to turn its attention to the given # face (to interact with, talk with) that face. def track_face(self, faceid): if faceid in self.visible_faces: logger.info("Face requested interaction: " + str(faceid)) self.atomo.add_tracked_face_to_atomspace(faceid) # ---------------------------------------------------------- # pi_vision ROS callbacks # pi_vision ROS callback, called when a new face is detected, # or a face is lost. def face_event_cb(self, data): if not self.control_mode & self.C_FACE_TRACKING: return if data.face_event == self.EVENT_NEW_FACE: self.add_face(data.face_id) elif data.face_event == self.EVENT_LOST_FACE: self.remove_face(data.face_id) elif data.face_event == self.EVENT_TRACK_FACE: self.track_face(data.face_id) elif data.face_event == self.EVENT_RECOGNIZED_FACE: self.atomo.face_recognition(data.face_id, data.recognized_id) # pi_vision ROS callback, called when pi_vision has new face # location data for us. This happens frequently (about 10x/second) def face_loc_cb(self, data): if not self.control_mode & self.C_FACE_TRACKING: return for face in data.faces: # Update location of a face. The location is stored in the # OpenCog space server (octomap). if face.id in self.visible_faces: self.atomo.update_face_octomap(face.id, face.point.x, face.point.y, face.point.z) # Enable/disable Opencog face-tracking. This is driven by the # master control GUI. def behavior_control_cb(self, data): # Is facetracking currently enabled? facetracking = self.control_mode & self.C_FACE_TRACKING self.control_mode = data.data print("New Control mode %i" % self.control_mode ) # If face-tracking was enabled, and is now disabled ... if facetracking > 0 and self.control_mode & self.C_FACE_TRACKING == 0: self.atomo.update_ft_state_to_atomspace(False) # Need to clear faces: for face in self.visible_faces[:]: self.remove_face(face) elif self.control_mode & self.C_FACE_TRACKING > 0: self.atomo.update_ft_state_to_atomspace(True)
def __init__(self): self.atomo = AtomicMsgs() rospy.Subscriber('/nmpt_saliency_point', targets, self.sal_cb)
def __init__(self): self.atomo = AtomicMsgs() rospy.Subscriber('/camera/face_recognition', faces_ids, self.face_cb)
def __init__(self): self.atomo = AtomicMsgs() rospy.Subscriber("audio_sensors", audiodata, self.audio_cb)
class FaceTrack: # Control flags. Ideally, FaceTrack should publish targets using # ros_commo EvaControl class. C_EYES = 16 C_FACE = 32 # Face tracking will be disabled if neither of these flags are set. # (this allows for a manual over-ride of face-tracking by other # control processes.) C_FACE_TRACKING = C_FACE | C_EYES def __init__(self): # The OpenCog API. This is used to send face data to OpenCog. self.atomo = AtomicMsgs() self.atomo.create_face_octomap() # List of currently visible faces self.visible_faces = [] # Subscribed pi_vision topics and events self.TOPIC_FACE_EVENT = "/camera/face_event" self.EVENT_NEW_FACE = "new_face" self.EVENT_LOST_FACE = "lost_face" self.EVENT_RECOGNIZED_FACE = "recognized_face" # Overrides current face being tracked by WebUI self.EVENT_TRACK_FACE = "track_face" self.TOPIC_FACE_LOCATIONS = "/camera/face_locations" # Face appearance/disappearance from pi_vision rospy.Subscriber(self.TOPIC_FACE_EVENT, FaceEvent, self.face_event_cb) # Face location information from pi_vision rospy.Subscriber(self.TOPIC_FACE_LOCATIONS, Faces, self.face_loc_cb) rospy.Subscriber("/behavior_control", Int32, self.behavior_control_cb) # Control Eyes and face by default self.control_mode = 255 # ---------------------------------------------------------- # Start tracking a face def add_face(self, faceid): if faceid in self.visible_faces: return self.visible_faces.append(faceid) logger.info("New face added to visibile faces: " + str(self.visible_faces)) self.atomo.add_face_to_atomspace(faceid) # Stop tracking a face def remove_face(self, faceid): self.atomo.remove_face_from_atomspace(faceid) if faceid in self.visible_faces: self.visible_faces.remove(faceid) logger.info("Lost face; visibile faces now: " + str(self.visible_faces)) # Force the robot to turn its attention to the given # face (to interact with, talk with) that face. def track_face(self, faceid): if faceid in self.visible_faces: logger.info("Face requested interaction: " + str(faceid)) self.atomo.add_tracked_face_to_atomspace(faceid) # ---------------------------------------------------------- # pi_vision ROS callbacks # pi_vision ROS callback, called when a new face is detected, # or a face is lost. Also called for recognized faces. # # This callback handles recognized faces using a special message # format, published on the `/camera/face_locations`. Note that # there is also a different topic for recognized faces, called # `/camera/face_recognition`. See the `face-recog.py` file for # details. I am not sure what subsystem published which message # type. XXX FIXME - figure out why there are two different # face recognition subsystems, and standardize one which we # should use. def face_event_cb(self, data): if not self.control_mode & self.C_FACE_TRACKING: return if data.face_event == self.EVENT_NEW_FACE: self.add_face(data.face_id) elif data.face_event == self.EVENT_LOST_FACE: self.remove_face(data.face_id) elif data.face_event == self.EVENT_TRACK_FACE: self.track_face(data.face_id) elif data.face_event == self.EVENT_RECOGNIZED_FACE: self.atomo.face_recognition(data.face_id, data.recognized_id) # pi_vision ROS callback, called when pi_vision has new face # location data for us. This happens frequently (about 10x/second) def face_loc_cb(self, data): if not self.control_mode & self.C_FACE_TRACKING: return for face in data.faces: # Update location of a face. The location is stored in the # OpenCog space server (octomap). if face.id in self.visible_faces: self.atomo.update_face_octomap(face.id, face.point.x, face.point.y, face.point.z) # Enable/disable Opencog face-tracking. This is driven by the # master control GUI. XXX FIXME -- why should this ever be disabled? # OpenCog should always know about faces; perhaps it is congtrol of # head and eye movements that should be disabled? def behavior_control_cb(self, data): # Is facetracking currently enabled? facetracking = self.control_mode & self.C_FACE_TRACKING self.control_mode = data.data print("New Control mode %i" % self.control_mode ) # If face-tracking was enabled, and is now disabled ... if facetracking > 0 and self.control_mode & self.C_FACE_TRACKING == 0: self.atomo.update_ft_state_to_atomspace(False) # Need to clear faces: for face in self.visible_faces[:]: self.remove_face(face) elif self.control_mode & self.C_FACE_TRACKING > 0: self.atomo.update_ft_state_to_atomspace(True)
def __init__(self): self.atomo = AtomicMsgs() rospy.Subscriber("audio_sensors", audiodata, self.audio_cb)
def __init__(self): self.atomo = AtomicMsgs() rospy.Subscriber('/opencog/room_luminance', Luminance, self.bright_cb)
def __init__(self): self.atomo = AtomicMsgs() rospy.Subscriber("chatbot_speech", ChatMessage, self.stt_cb)