def __init__(self, wait_services=False): super(Sergio, self).__init__(robot_name="sergio", wait_services=wait_services) self._ignored_parts = [ "leftArm", "rightArm", "torso", "spindle", "head" ] self.add_body_part('base', base.Base(self.robot_name, self.tf_listener)) self.add_body_part( 'torso', torso.Torso(self.robot_name, self.tf_listener, self.joint_states)) # Add arms (replace the '[[arm_name]]' and '[[side_name]]' strings with actual arm names.) #self.add_arm_part('[[arm name]]', arms.Arm(self.robot_name, self.tf_listener, side='[[side name]]')) self.add_body_part('head', head.Head(self.robot_name, self.tf_listener)) self.add_body_part( 'perception', perception.Perception(self.robot_name, self.tf_listener)) self.add_body_part( 'ssl', sound_source_localisation.SSL(self.robot_name, self.tf_listener)) # Human Robot Interaction self.add_body_part('lights', lights.Lights(self.robot_name, self.tf_listener)) self.add_body_part( 'speech', speech.Speech( self.robot_name, self.tf_listener, lambda: self.lights.set_color_colorRGBA(lights.SPEAKING), lambda: self.lights.set_color_colorRGBA(lights.RESET))) self.add_body_part( 'hmi', api.Api(self.robot_name, self.tf_listener, lambda: self.lights.set_color_colorRGBA(lights.LISTENING), lambda: self.lights.set_color_colorRGBA(lights.RESET))) self.add_body_part( 'ears', ears.Ears( self.robot_name, self.tf_listener, lambda: self.lights.set_color_colorRGBA(lights.LISTENING), lambda: self.lights.set_color_colorRGBA(lights.RESET))) ebutton_class = SimEButton if is_sim_mode() else ebutton.EButton self.add_body_part('ebutton', ebutton_class(self.robot_name, self.tf_listener)) # Reasoning/world modeling self.add_body_part( 'ed', world_model_ed.ED(self.robot_name, self.tf_listener)) self.configure()
def __init__(self, dontInclude=None, wait_services=False): """ Constructor :param dontInclude: Not supported anymore :param wait_services: Not supported anymore by robot class """ if dontInclude is None: dontInclude = [] super(Amigo, self).__init__(robot_name="amigo", wait_services=wait_services) self.add_body_part('base', base.Base(self.robot_name, self.tf_listener)) self.add_body_part( 'torso', torso.Torso(self.robot_name, self.tf_listener, self.get_joint_states)) self.add_arm_part( 'leftArm', arms.Arm(self.robot_name, self.tf_listener, self.get_joint_states, side="left")) self.add_arm_part( 'rightArm', arms.Arm(self.robot_name, self.tf_listener, self.get_joint_states, side="right")) self.add_body_part('head', head.Head(self.robot_name, self.tf_listener)) self.add_body_part( 'perception', perception.Perception(self.robot_name, self.tf_listener)) self.add_body_part( 'ssl', sound_source_localisation.SSL(self.robot_name, self.tf_listener)) # Human Robot Interaction self.add_body_part('lights', lights.Lights(self.robot_name, self.tf_listener)) self.add_body_part( 'speech', speech.Speech( self.robot_name, self.tf_listener, lambda: self.lights.set_color_colorRGBA(lights.SPEAKING), lambda: self.lights.set_color_colorRGBA(lights.RESET))) self.add_body_part( 'hmi', api.Api(self.robot_name, self.tf_listener, lambda: self.lights.set_color_colorRGBA(lights.LISTENING), lambda: self.lights.set_color_colorRGBA(lights.RESET))) self.add_body_part( 'ears', ears.Ears( self.robot_name, self.tf_listener, lambda: self.lights.set_color_colorRGBA(lights.LISTENING), lambda: self.lights.set_color_colorRGBA(lights.RESET))) ebutton_class = SimEButton if is_sim_mode() else ebutton.EButton self.add_body_part('ebutton', ebutton_class(self.robot_name, self.tf_listener)) # Reasoning/world modeling self.add_body_part( 'ed', world_model_ed.ED(self.robot_name, self.tf_listener)) self.configure()
def __init__(self, wait_services=False): super(Hero, self).__init__(robot_name="hero", wait_services=wait_services) self._ignored_parts = ["leftArm", "torso", "spindle", "head"] self.add_body_part('base', base.Base(self.robot_name, self.tf_listener)) self.add_body_part( 'torso', torso.Torso(self.robot_name, self.tf_listener, self.get_joint_states)) self.add_arm_part( 'leftArm', arms.ForceSensingArm(self.robot_name, self.tf_listener, self.get_joint_states, "left")) self.add_body_part('head', head.Head(self.robot_name, self.tf_listener)) self.add_body_part( 'perception', perception.Perception(self.robot_name, self.tf_listener, "/hero/head_rgbd_sensor/rgb/image_raw", "/hero/head_rgbd_sensor/project_2d_to_3d", camera_base_ns='hero/head_rgbd_sensor')) # self.add_body_part('ssl', ssl.SSL(self.robot_name, self.tf_listener)) # Human Robot Interaction self.add_body_part('lights', lights.Lights(self.robot_name, self.tf_listener)) self.add_body_part( 'speech', speech.Speech( self.robot_name, self.tf_listener, lambda: self.lights.set_color_colorRGBA(lights.SPEAKING), lambda: self.lights.set_color_colorRGBA(lights.RESET))) self.add_body_part( 'hmi', api.Api(self.robot_name, self.tf_listener, lambda: self.lights.set_color_colorRGBA(lights.LISTENING), lambda: self.lights.set_color_colorRGBA(lights.RESET))) self.add_body_part( 'ears', ears.Ears( self.robot_name, self.tf_listener, lambda: self.lights.set_color_colorRGBA(lights.LISTENING), lambda: self.lights.set_color_colorRGBA(lights.RESET))) ebutton_class = SimEButton if is_sim_mode() else ebutton.EButton self.add_body_part( 'ebutton', ebutton_class(self.robot_name, self.tf_listener, topic="/hero/runstop_button")) # Reasoning/world modeling self.add_body_part( 'ed', world_model_ed.ED(self.robot_name, self.tf_listener)) #rename joint names self.parts['leftArm'].joint_names = self.parts['leftArm'].load_param( 'skills/arm/joint_names') # These don't work for HSR because (then) Toyota's diagnostics aggregator makes the robot go into error somehow for arm in self.arms.itervalues(): arm.unsubscribe_hardware_status() for arm in self.arms.itervalues(): arm._operational = True # verify joint goal required for posing assert 'arm_out_of_way' in self.parts['leftArm'].default_configurations,\ "arm_out_of_way joint goal is not defined in {}_describtion skills.yaml".format(self.robot_name) self.configure()
#!/usr/bin/env python from ed_sensor_integration.srv import RayTrace from geometry_msgs.msg import PoseStamped from robot_skills import perception import rospy from std_msgs.msg import Header from tf import TransformListener rospy.init_node('ray_trace_example') p = perception.Perception("hero", None, None, camera_base_ns="hero/head_rgbd_sensor") srv_proxy = rospy.ServiceProxy("/hero/ed/ray_trace", RayTrace) tf_listener = TransformListener() while not rospy.is_shutdown(): rgb, depth, depth_info = p.get_rgb_depth_caminfo() if rgb: persons = p.detect_person_3d(*p.get_rgb_depth_caminfo()) for person in persons: if "is_pointing" in person.tags: try: map_pose = tf_listener.transformPose( "map", PoseStamped(header=Header( frame_id="head_rgbd_sensor_rgb_optical_frame", stamp=rospy.Time.now() -