def __init__(self, robot_name="", wait_services=False): self.robot_name = robot_name self.tf_listener = tf_server.TFClient() # Body parts self.parts = dict() self.parts['base'] = base.Base(self.robot_name, self.tf_listener) self.parts['torso'] = torso.Torso(self.robot_name, self.tf_listener) self.parts['leftArm'] = arms.Arm(self.robot_name, self.tf_listener, side="left") self.parts['rightArm'] = arms.Arm(self.robot_name, self.tf_listener, side="right") self.parts['head'] = head.Head(self.robot_name, self.tf_listener) # Human Robot Interaction self.parts['lights'] = lights.Lights(self.robot_name, self.tf_listener) self.parts['speech'] = speech.Speech( self.robot_name, self.tf_listener, lambda: self.lights.set_color(1, 0, 0), lambda: self.lights.set_color(0, 0, 1)) self.parts['hmi'] = api.Api(self.robot_name, self.tf_listener) self.parts['ears'] = ears.Ears(self.robot_name, lambda: self.lights.set_color(0, 1, 0), lambda: self.lights.set_color(0, 0, 1)) self.parts['ebutton'] = ebutton.EButton(self.robot_name, self.tf_listener) # Reasoning/world modeling self.parts['ed'] = world_model_ed.ED(self.robot_name, self.tf_listener) # Miscellaneous self.pub_target = rospy.Publisher("/target_location", geometry_msgs.msg.Pose2D, queue_size=10) self.base_link_frame = "/" + self.robot_name + "/base_link" # Grasp offsets #TODO: Don't hardcode, load from parameter server to make robot independent. self.grasp_offset = geometry_msgs.msg.Point(0.5, 0.2, 0.0) # Create attributes from dict for k, v in self.parts.iteritems(): setattr(self, k, v) self.spindle = self.torso # (ToDo: kind of ugly, why do we still have spindle???) self.arms = OrderedDict( left=self.leftArm, right=self.rightArm ) # (ToDo: kind of ugly, why do we need this???) self.ears._hmi = self.hmi # ToDo: when ears is gone, remove this line # Wait for connections s = rospy.Time.now() for k, v in self.parts.iteritems(): v.wait_for_connections(1.0) e = rospy.Time.now() rospy.logdebug("Connecting took {} seconds".format((e - s).to_sec()))
def __init__(self, robot_name="", wait_services=False): self.robot_name = robot_name self.tf_listener = tf_server.TFClient() # Body parts self.base = base.Base(self.robot_name, self.tf_listener, wait_service=wait_services) self.torso = torso.Torso(self.robot_name, wait_service=wait_services) self.spindle = self.torso self.leftArm = arms.Arm(self.robot_name, "left", self.tf_listener) self.rightArm = arms.Arm(self.robot_name, "right", self.tf_listener) self.arms = OrderedDict(left=self.leftArm, right=self.rightArm) self.head = head.Head(self.robot_name) # Human Robot Interaction self.speech = speech.Speech(self.robot_name, wait_service=wait_services) self.ears = ears.Ears(self.robot_name) self.ebutton = ebutton.EButton() self.lights = lights.Lights() # Perception: can we get rid of this??? self.perception = perception_ed.PerceptionED( wait_service=wait_services) # Reasoning/world modeling self.ed = world_model_ed.ED(wait_service=wait_services) self.reasoner = reasoner.Reasoner() # Miscellaneous self.pub_target = rospy.Publisher("/target_location", geometry_msgs.msg.Pose2D, queue_size=10) self.base_link_frame = "/" + self.robot_name + "/base_link" #Grasp offsets #TODO: Don't hardcode, load from parameter server to make robot independent. self.grasp_offset = geometry_msgs.msg.Point(0.5, 0.2, 0.0)
def __init__(self, robot_name="", wait_services=False): self.robot_name = robot_name self.tf_listener = tf.TransformListener() # Body parts self.parts = dict() self.parts['base'] = base.Base(self.robot_name, self.tf_listener) self.parts['torso'] = torso.Torso(self.robot_name, self.tf_listener) self.parts['leftArm'] = arms.Arm(self.robot_name, self.tf_listener, side="left") self.parts['rightArm'] = arms.Arm(self.robot_name, self.tf_listener, side="right") self.parts['head'] = head.Head(self.robot_name, self.tf_listener) self.parts['perception'] = perception.Perception( self.robot_name, self.tf_listener) self.parts['ssl'] = ssl.SSL(self.robot_name, self.tf_listener) # Human Robot Interaction self.parts['lights'] = lights.Lights(self.robot_name, self.tf_listener) self.parts['speech'] = speech.Speech( self.robot_name, self.tf_listener, lambda: self.lights.set_color_colorRGBA(lights.SPEAKING), lambda: self.lights.set_color_colorRGBA(lights.RESET)) self.parts['hmi'] = api.Api( self.robot_name, self.tf_listener, lambda: self.lights.set_color_colorRGBA(lights.LISTENING), lambda: self.lights.set_color_colorRGBA(lights.RESET)) self.parts['ears'] = ears.Ears( self.robot_name, self.tf_listener, lambda: self.lights.set_color_colorRGBA(lights.LISTENING), lambda: self.lights.set_color_colorRGBA(lights.RESET)) self.parts['ebutton'] = ebutton.EButton(self.robot_name, self.tf_listener) # Reasoning/world modeling self.parts['ed'] = world_model_ed.ED(self.robot_name, self.tf_listener) # Miscellaneous self.pub_target = rospy.Publisher("/target_location", geometry_msgs.msg.Pose2D, queue_size=10) self.base_link_frame = "/" + self.robot_name + "/base_link" # Check hardware status self._hardware_status_sub = rospy.Subscriber( "/" + self.robot_name + "/hardware_status", DiagnosticArray, self.handle_hardware_status) # Grasp offsets #TODO: Don't hardcode, load from parameter server to make robot independent. self.grasp_offset = geometry_msgs.msg.Point(0.5, 0.2, 0.0) # Create attributes from dict for partname, bodypart in self.parts.iteritems(): setattr(self, partname, bodypart) self.arms = OrderedDict( left=self.leftArm, right=self.rightArm ) # (ToDo: kind of ugly, why do we need this???) self.ears._hmi = self.hmi # ToDo: when ears is gone, remove this line # Wait for connections s = rospy.Time.now() for partname, bodypart in self.parts.iteritems(): bodypart.wait_for_connections(1.0) e = rospy.Time.now() rospy.logdebug("Connecting took {} seconds".format((e - s).to_sec())) if not self.operational: not_operational_parts = [ name for name, part in self.parts.iteritems() if not part.operational ] rospy.logwarn("Not all hardware operational: {parts}".format( parts=not_operational_parts))