def __init__(self, dontInclude=[], wait_services=True): self.tf_listener = tf_server.TFClient() if 'head' not in dontInclude: self.head = head.Head() if 'base' not in dontInclude: self.base = ros_navstack_base.Base(self.tf_listener, wait_service=wait_services) if 'spindle' not in dontInclude: self.spindle = spindle.Spindle(wait_service=wait_services) if 'arms' not in dontInclude: self.arms = arms.Arms( self.tf_listener) #TODO: use self.tf_listener here if 'leftArm' not in dontInclude: self.leftArm = arms.Arm(arms.Side.LEFT, self.tf_listener) if 'rightArm' not in dontInclude: self.rightArm = arms.Arm(arms.Side.RIGHT, self.tf_listener) if 'ebutton' not in dontInclude: self.ebutton = ebutton.EButton() self.leftSide = arms.Side.LEFT self.rightSide = arms.Side.RIGHT self.pub_target = rospy.Publisher("/target_location", geometry_msgs.msg.Pose2D, queue_size=10)
def __init__(self, robot_name="", wait_services=False): self.robot_name = robot_name self.tf_listener = tf_server.TFClient() # Body parts self.parts = dict() self.parts['base'] = base.Base(self.robot_name, self.tf_listener) self.parts['torso'] = torso.Torso(self.robot_name, self.tf_listener) self.parts['leftArm'] = arms.Arm(self.robot_name, self.tf_listener, side="left") self.parts['rightArm'] = arms.Arm(self.robot_name, self.tf_listener, side="right") self.parts['head'] = head.Head(self.robot_name, self.tf_listener) # Human Robot Interaction self.parts['lights'] = lights.Lights(self.robot_name, self.tf_listener) self.parts['speech'] = speech.Speech( self.robot_name, self.tf_listener, lambda: self.lights.set_color(1, 0, 0), lambda: self.lights.set_color(0, 0, 1)) self.parts['hmi'] = api.Api(self.robot_name, self.tf_listener) self.parts['ears'] = ears.Ears(self.robot_name, lambda: self.lights.set_color(0, 1, 0), lambda: self.lights.set_color(0, 0, 1)) self.parts['ebutton'] = ebutton.EButton(self.robot_name, self.tf_listener) # Reasoning/world modeling self.parts['ed'] = world_model_ed.ED(self.robot_name, self.tf_listener) # Miscellaneous self.pub_target = rospy.Publisher("/target_location", geometry_msgs.msg.Pose2D, queue_size=10) self.base_link_frame = "/" + self.robot_name + "/base_link" # Grasp offsets #TODO: Don't hardcode, load from parameter server to make robot independent. self.grasp_offset = geometry_msgs.msg.Point(0.5, 0.2, 0.0) # Create attributes from dict for k, v in self.parts.iteritems(): setattr(self, k, v) self.spindle = self.torso # (ToDo: kind of ugly, why do we still have spindle???) self.arms = OrderedDict( left=self.leftArm, right=self.rightArm ) # (ToDo: kind of ugly, why do we need this???) self.ears._hmi = self.hmi # ToDo: when ears is gone, remove this line # Wait for connections s = rospy.Time.now() for k, v in self.parts.iteritems(): v.wait_for_connections(1.0) e = rospy.Time.now() rospy.logdebug("Connecting took {} seconds".format((e - s).to_sec()))
def __init__(self, robot_name="", wait_services=False): self.robot_name = robot_name self.tf_listener = tf_server.TFClient() # Body parts self.base = base.Base(self.robot_name, self.tf_listener, wait_service=wait_services) self.torso = torso.Torso(self.robot_name, wait_service=wait_services) self.spindle = self.torso self.leftArm = arms.Arm(self.robot_name, "left", self.tf_listener) self.rightArm = arms.Arm(self.robot_name, "right", self.tf_listener) self.arms = OrderedDict(left=self.leftArm, right=self.rightArm) self.head = head.Head(self.robot_name) # Human Robot Interaction self.speech = speech.Speech(self.robot_name, wait_service=wait_services) self.ears = ears.Ears(self.robot_name) self.ebutton = ebutton.EButton() self.lights = lights.Lights() # Perception: can we get rid of this??? self.perception = perception_ed.PerceptionED( wait_service=wait_services) # Reasoning/world modeling self.ed = world_model_ed.ED(wait_service=wait_services) self.reasoner = reasoner.Reasoner() # Miscellaneous self.pub_target = rospy.Publisher("/target_location", geometry_msgs.msg.Pose2D, queue_size=10) self.base_link_frame = "/" + self.robot_name + "/base_link" #Grasp offsets #TODO: Don't hardcode, load from parameter server to make robot independent. self.grasp_offset = geometry_msgs.msg.Point(0.5, 0.2, 0.0)
self._goal.tilt = tilt self._goal.end_time = end_time self._ac_head_ref_action.send_goal(self._goal, done_cb=self.__doneCallback, feedback_cb=self.__feedbackCallback) start = rospy.Time.now() if timeout != 0: rospy.logdebug( "Waiting for %d seconds to reach target ...".format(timeout)) while not (not ( (rospy.Time.now() - start) < rospy.Duration(timeout)) or self._at_setpoint): rospy.sleep(0.1) def __feedbackCallback(self, feedback): self._at_setpoint = feedback.at_setpoint def __doneCallback(self, terminal_state, result): self._goal = None self._at_setpoint = False ####################################### if __name__ == "__main__": import tf_server tf_listener = tf_server.TFClient() rospy.init_node('amigo_head_executioner', anonymous=True) head = Head("amigo", tf_listener)
def __init__(self, robot_name="", wait_services=False): self.robot_name = robot_name self.tf_listener = tf_server.TFClient() # Body parts self.parts = dict() self.parts['base'] = base.Base(self.robot_name, self.tf_listener) self.parts['torso'] = torso.Torso(self.robot_name, self.tf_listener) self.parts['leftArm'] = arms.Arm(self.robot_name, self.tf_listener, side="left") self.parts['rightArm'] = arms.Arm(self.robot_name, self.tf_listener, side="right") self.parts['head'] = head.Head(self.robot_name, self.tf_listener) self.parts['perception'] = perception.Perception( self.robot_name, self.tf_listener) self.parts['ssl'] = ssl.SSL(self.robot_name, self.tf_listener) # Human Robot Interaction self.parts['lights'] = lights.Lights(self.robot_name, self.tf_listener) self.parts['speech'] = speech.Speech( self.robot_name, self.tf_listener, lambda: self.lights.set_color_colorRGBA(lights.SPEAKING), lambda: self.lights.set_color_colorRGBA(lights.RESET)) self.parts['hmi'] = api.Api( self.robot_name, self.tf_listener, lambda: self.lights.set_color_colorRGBA(lights.LISTENING), lambda: self.lights.set_color_colorRGBA(lights.RESET)) self.parts['ears'] = ears.Ears( self.robot_name, self.tf_listener, lambda: self.lights.set_color_colorRGBA(lights.LISTENING), lambda: self.lights.set_color_colorRGBA(lights.RESET)) self.parts['ebutton'] = ebutton.EButton(self.robot_name, self.tf_listener) # Reasoning/world modeling self.parts['ed'] = world_model_ed.ED(self.robot_name, self.tf_listener) # Miscellaneous self.pub_target = rospy.Publisher("/target_location", geometry_msgs.msg.Pose2D, queue_size=10) self.base_link_frame = "/" + self.robot_name + "/base_link" # Check hardware status self._hardware_status_sub = rospy.Subscriber( "/" + self.robot_name + "/hardware_status", DiagnosticArray, self.handle_hardware_status) # Grasp offsets #TODO: Don't hardcode, load from parameter server to make robot independent. self.grasp_offset = geometry_msgs.msg.Point(0.5, 0.2, 0.0) # Create attributes from dict for partname, bodypart in self.parts.iteritems(): setattr(self, partname, bodypart) self.arms = OrderedDict( left=self.leftArm, right=self.rightArm ) # (ToDo: kind of ugly, why do we need this???) self.ears._hmi = self.hmi # ToDo: when ears is gone, remove this line # Wait for connections s = rospy.Time.now() for partname, bodypart in self.parts.iteritems(): bodypart.wait_for_connections(1.0) e = rospy.Time.now() rospy.logdebug("Connecting took {} seconds".format((e - s).to_sec())) if not self.operational: not_operational_parts = [ name for name, part in self.parts.iteritems() if not part.operational ] rospy.logwarn("Not all hardware operational: {parts}".format( parts=not_operational_parts))