def motionA(robot): robot.rightArm._send_joint_trajectory([[-0.050, 1.500, 1.500, 0.100, 0.150, 0.000, 0.000]], timeout=rospy.Duration(1)) for i in range(1): robot.head.look_at_point(msgs.PointStamped(0,0,0, frame_id="/amigo/grippoint_left")) robot.head.look_at_point(msgs.PointStamped(0,0,0, frame_id="/amigo/grippoint_left")) # robot.leftArm.send_joint_trajectory([gangnam_motionA_left], timeout=rospy.Duration(10)) # robot.rightArm.send_joint_trajectory([gangnam_motionA_right], timeout=rospy.Duration(10)) robot.leftArm._send_joint_trajectory([gangnam_poseA_left_pre_start], timeout=rospy.Duration(1)) # TODO: Make work with different robots. robot.rightArm._send_joint_trajectory([gangnam_poseA_right_pre_start], timeout=rospy.Duration(1)) # TODO: Make work with different robots. robot.leftArm._send_joint_trajectory([gangnam_poseA_left_start], timeout=rospy.Duration(1)) # TODO: Make work with different robots. robot.rightArm._send_joint_trajectory([gangnam_poseA_right_start], timeout=rospy.Duration(1)) # TODO: Make work with different robots. robot.leftArm._send_joint_trajectory([gangnam_poseA_left_end], timeout=rospy.Duration(1)) # TODO: Make work with different robots. robot.rightArm._send_joint_trajectory([gangnam_poseA_right_end], timeout=rospy.Duration(1)) # TODO: Make work with different robots. robot.leftArm._send_joint_trajectory([gangnam_poseA_left_start], timeout=rospy.Duration(1)) # TODO: Make work with different robots. robot.rightArm._send_joint_trajectory([gangnam_poseA_right_start], timeout=rospy.Duration(1)) # TODO: Make work with different robots. robot.leftArm._send_joint_trajectory([gangnam_poseA_left_end], timeout=rospy.Duration(1)) # TODO: Make work with different robots. robot.rightArm._send_joint_trajectory([gangnam_poseA_right_end], timeout=rospy.Duration(1)) # TODO: Make work with different robots. robot.leftArm._send_joint_trajectory([gangnam_poseA_left_start], timeout=rospy.Duration(1)) # TODO: Make work with different robots. robot.rightArm._send_joint_trajectory([gangnam_poseA_right_start], timeout=rospy.Duration(1)) # TODO: Make work with different robots. robot.leftArm._send_joint_trajectory([gangnam_poseA_left_end], timeout=rospy.Duration(1)) # TODO: Make work with different robots. robot.rightArm._send_joint_trajectory([gangnam_poseA_right_end], timeout=rospy.Duration(1)) # TODO: Make work with different robots. robot.leftArm.reset()
def look_at_hand(self, side, keep_tracking=True): """ Look at the left or right hand, expects string "left" or "right" Optionally, keep tracking can be disabled (keep_tracking=False) """ if (side == "left"): return self.setLookAtGoal(msgs.PointStamped(0,0,0,frame_id="/"+self.robot_name+"/grippoint_left")) elif (side == "right"): return self.setLookAtGoal(msgs.PointStamped(0,0,0,frame_id="/"+self.robot_name+"/grippoint_right")) else: rospy.logerr("No side specified for look_at_hand. Give me 'left' or 'right'") return False
def execute(self, gl): arm = self.arm_designator.resolve() if not arm: rospy.logerr("Could not resolve arm") return "point_failed" entity = self.point_entity_designator.resolve() if not entity: rospy.logerr("Could not resolve entity") return "target_lost" if arm == self.robot.leftArm: end_effector_frame_id = "/amigo/grippoint_left" elif arm == self.robot.rightArm: end_effector_frame_id = "/amigo/grippoint_right" target_position = msgs.PointStamped(entity.pose, frame_id="/map", stamp=rospy.Time()) rospy.loginfo( "[robot_smach_states:Point_at_object] Target position: {0}".format( target_position)) # Keep looking at end-effector for ar marker detection self.robot.head.look_at_point( msgs.PointStamped(0, 0, 0, frame_id=end_effector_frame_id)) # Transform to base link target_position_bl = transformations.tf_transform( target_position, "/map", "/amigo/base_link", tf_listener=self.robot.tf_listener) rospy.loginfo( "[robot_smach_states] Target position in base link: {0}".format( target_position_bl)) # Send goal if arm.send_goal(target_position_bl.x - 0.1, target_position_bl.y, target_position_bl.z, 0, 0, 0, 120, pre_grasp=True): rospy.loginfo("arm at object") else: rospy.loginfo("Arm cannot reach object") self.robot.head.reset() return 'point_succeeded'
def execute(self, userdata): selected_entity = self._selected_entity_designator.resolve() if not selected_entity: rospy.logerr("Could not resolve the selected entity!") return "failed" rospy.loginfo("The type of the entity is '%s'" % selected_entity.type) # If we don't know the entity type, try to classify again if selected_entity.type == "" or selected_entity.type == "unknown": # Make sure the head looks at the entity pos = selected_entity.pose.frame.p self._robot.head.look_at_point(msgs.PointStamped(pos.x(), pos.y(), 0.8, "/map"), timeout=10) # This is needed because the head is not entirely still when the look_at_point function finishes rospy.sleep(1) # Inspect the entity again self._robot.ed.update_kinect(selected_entity.id) # Classify the entity again try: selected_entity.type = self._robot.ed.classify(ids=[selected_entity.id])[0].type rospy.loginfo("We classified the entity again; type = %s" % selected_entity.type) except Exception as e: rospy.logerr(e) return self._get_action_outcome(selected_entity)
def execute(self, userdata): arm = self.arm_designator.resolve() if not arm: rospy.logerr("Could not resolve arm") return "failed" userdata.arm = arm.side entity = self.grab_entity_designator.resolve() if not entity: rospy.logerr("Could not resolve grab_entity") return "failed" # Open gripper (non-blocking) arm.send_gripper_goal('open', timeout=0) # Torso up (non-blocking) self.robot.torso.high() # Arm to position in a safe way arm.send_joint_trajectory('prepare_grasp', timeout=0) # Open gripper arm.send_gripper_goal('open', timeout=0.0) # Make sure the head looks at the entity pos = entity.pose.position self.robot.head.look_at_point(msgs.PointStamped( pos.x, pos.y, pos.z, "/map"), timeout=0.0) return 'succeeded'
def pickup(robot): print "joints = PICK_UP" robot.rightArm.send_gripper_goal_open() #rospy.sleep(rospy.Duration(2.0)) robot.rightArm.send_joint_goal(*poses["PICK_UP"][1]) robot.head.look_at_point( msgs.PointStamped(0, 0, 0, frame_id="/finger1_right")) #rospy.sleep(rospy.Duration(2.0)) robot.rightArm.send_joint_goal(*poses["RIGHT_PRE_GRASP1"][1]) robot.head.look_at_point( msgs.PointStamped(0, 0, 0, frame_id="/finger1_right")) #rospy.sleep(rospy.Duration(2.0)) robot.head.reset()
def macarena(robot): stopEvent = threading.Event() up_and_down_torso = threading.Thread(target=torso_up_down, args=(robot, "lower", "upper", stopEvent)) up_and_down_torso.start() #robot.torso.send_goal(0.3) up_and_down_head = threading.Thread(target=head_up_down, args=(robot, stopEvent)) #up_and_down_head.start() robot.head.look_at_point(msgs.PointStamped(0.2, 0, 1.3, frame_id="/amigo/base")) def _left(trajectory, timeout=10.0): #The underscore makes the outlining below easier to read robot.leftArm._send_joint_trajectory(trajectory, timeout=rospy.Duration(7)) def right(trajectory, timeout=10.0): robot.rightArm._send_joint_trajectory(trajectory, timeout=rospy.Duration(7)) #Defined shortcuts above try: for i in range(1): right(arm_straight_1, timeout=rospy.Duration(10)) _left(arm_straight_1, timeout=rospy.Duration(10)) right(arm_straight_2, timeout=rospy.Duration(5)) _left(arm_straight_2, timeout=rospy.Duration(5)) right(right_hand_to_left_shoulder, timeout=rospy.Duration(10)) _left(left_hand_to_right_shoulder, timeout=rospy.Duration(10)) right(right_arm_to_head_1, timeout=rospy.Duration(10)) #right(*right_arm_to_head_2,] timeout=rospy.Duration(10)) #_left(*left_arm_to_head_1) ], timeout=rospy.Duration(5)) #_left(*left_arm_to_head_2) ], timeout=rospy.Duration(5)) #_left(*left_arm_to_head_,] timeout=rospy.Duration(5)) _left(left_arm_to_head_4, timeout=rospy.Duration(5)) right(arm_to_hips_1, timeout=rospy.Duration(10)) _left(arm_to_hips_1, timeout=rospy.Duration(10)) right(arm_to_hips_2, timeout=rospy.Duration(5)) _left(arm_to_hips_2, timeout=rospy.Duration(5)) right(arm_to_hips_3, timeout=rospy.Duration(5)) _left(arm_to_hips_3, timeout=rospy.Duration(5)) right(arm_to_hips_4, timeout=rospy.Duration(5)) _left(arm_to_hips_4, timeout=rospy.Duration(5)) right(arm_to_hips_1, timeout=rospy.Duration(15)) _left(arm_to_hips_1, timeout=rospy.Duration(15)) stopEvent.set() up_and_down_torso.join() #up_and_down_head.join() robot.rightArm.reset() robot.leftArm.reset() robot.head.reset() except Exception, e: robot.speech.speak("Guys, could you help me, i'm stuck in the maca-rena") rospy.logerr(e)
def grasp(robot): print "joints = RIGHT_GRASP1" robot.rightArm.send_gripper_goal_close() robot.rightArm.send_joint_goal(*poses["RIGHT_PRE_GRASP1"][1]) #One option is to let Amigo look to his hands directly using a point relative to it: robot.head.look_at_point( msgs.PointStamped(0, 0, 0, frame_id="/finger1_right")) #rospy.sleep(rospy.Duration(4.0)) robot.rightArm.send_joint_goal(*poses["RIGHT_PRE_GRASP2"][1]) #rospy.sleep(rospy.Duration(0.5)) robot.rightArm.send_joint_goal(*poses["RIGHT_GRASP1"][1]) #Look at finger again robot.head.look_at_point( msgs.PointStamped(0, 0, 0, frame_id="/finger1_right"))
def walk_like_an_egyptian(robot): for i in range(2): robot.head.look_at_point( msgs.PointStamped(0, 0, 0, frame_id="/amigo/grippoint_left")) robot.head.look_at_point( msgs.PointStamped(0, 0, 0, frame_id="/amigo/grippoint_left")) robot.leftArm._send_joint_trajectory( egyptian_motion_left, timeout=rospy.Duration( 10)) # TODO: Make work with different robots. robot.head.look_at_point( msgs.PointStamped(0, 0, 0, frame_id="/amigo/grippoint_right")) robot.head.look_at_point( msgs.PointStamped(0, 0, 0, frame_id="/amigo/grippoint_right")) robot.rightArm._send_joint_trajectory( egyptian_motion_right, timeout=rospy.Duration( 10)) # TODO: Make work with different robots. robot.rightArm.reset() robot.leftArm.reset() robot.head.reset()
def motionB(robot): #import ipdb; ipdb.set_trace() for i in range(1): robot.leftArm._send_joint_trajectory([[-0.050, 0.500, 0.7150, 1.300, -0.15, 0.000, 0.000]], timeout=rospy.Duration(10)) robot.leftArm._send_joint_trajectory([[-0.050, 0.500, 1.5300, 0.700, -0.15, 0.000, 0.000]], timeout=rospy.Duration(1)) robot.leftArm._send_joint_trajectory([[-0.050, 1.500, 1.5300, 0.700, -0.15, 0.000, 0.000]], timeout=rospy.Duration(1)) robot.head.look_at_point(msgs.PointStamped(0,0,0, frame_id="/amigo/grippoint_right"), pan_vel=1.0, tilt_vel=1.0) robot.head.look_at_point(msgs.PointStamped(0,0,0, frame_id="/amigo/grippoint_right"), pan_vel=1.0, tilt_vel=1.0) #robot.rightArm._send_joint_trajectory([gangnam_poseB_right_pre_start], timeout=rospy.Duration(1)) # TODO: Make work with different robots. #Right is now in [ -0.050, 1.500, 1.500, 0.100, -0.15, 0.000, 0.000] robot.rightArm._send_joint_trajectory([[-0.050, 1.500, 0.000, 0.100, 0.250, 0.000, 0.000]], timeout=rospy.Duration(10)) robot.rightArm._send_joint_trajectory([[-0.050, 1.500, 0.000, 1.570, 0.250, 0.000, 0.000]], timeout=rospy.Duration(1)) robot.leftArm._send_joint_trajectory([[-0.050, 1.500, 1.5300, 1.300, -0.15, 0.000, 0.000]]) robot.rightArm._send_joint_trajectory([gangnam_poseB_right_end], timeout=rospy.Duration(1)) # TODO: Make work with different robots. robot.rightArm._send_joint_trajectory([gangnam_poseB_right_start], timeout=rospy.Duration(1)) # TODO: Make work with different robots. robot.rightArm._send_joint_trajectory([gangnam_poseB_right_end], timeout=rospy.Duration(1)) # TODO: Make work with different robots. robot.rightArm._send_joint_trajectory([gangnam_poseB_right_start], timeout=rospy.Duration(1)) # TODO: Make work with different robots. robot.rightArm._send_joint_trajectory([gangnam_poseB_right_end], timeout=rospy.Duration(1)) # TODO: Make work with different robots. robot.leftArm._send_joint_trajectory([[-0.050, 1.500, 1.5300, 0.800, -0.15, 0.000, 0.000]], timeout=rospy.Duration(1))
def _backup_register(self): # This only happens when the operator was just registered, and never tracked print "Operator already lost. Getting closest possible person entity at 1.5 m in front, radius = 1" self._operator = self._robot.ed.get_closest_possible_person_entity( radius=1, center_point=msg_constructors.PointStamped( x=1.5, y=0, z=1, frame_id="/%s/base_link" % self._robot.robot_name)) if self._operator: return True else: print "Operator still lost. Getting closest possible laser entity at 1.5 m in front, radius = 1" self._operator = self._robot.ed.get_closest_laser_entity( radius=1, center_point=msg_constructors.PointStamped( x=1.5, y=0, z=1, frame_id="/%s/base_link" % self._robot.robot_name)) if self._operator: return True else: print "Trying to register operator again" self._robot.speech.speak("Oops, let's try this again...", block=False) self._register_operator() self._operator = self._robot.ed.get_entity(id=self._operator_id) if self._operator: self._last_operator = self._operator return True return False
def execute(self, userdata=None): arm = self.arm_designator.resolve() if not arm: rospy.logerr("Could not resolve arm") return "failed" if arm == self.robot.arms["left"]: end_effector_frame_id = "/" + self.robot.robot_name + "/grippoint_left" ar_frame_id = "/hand_marker_left" elif arm == self.robot.arms["right"]: end_effector_frame_id = "/" + self.robot.robot_name + "/grippoint_right" ar_frame_id = "/hand_marker_right" target_position = self.grab_point_designator.resolve() if not target_position: rospy.loginfo( "Could not resolve grab_point_designator {0}: {1}".format( self.grab_point_designator)) return "target_lost" # Keep looking at end-effector for ar marker detection self.robot.head.look_at_goal( msgs.PointStamped(0, 0, 0, frame_id=end_effector_frame_id)) rospy.loginfo("[robot_smach_states:grasp] Target position: {0}".format( target_position)) target_position_bl = transformations.tf_transform( target_position.point, target_position.header.frame_id, "/amigo/base_link", tf_listener=self.robot.tf_listener) rospy.loginfo( "[robot_smach_states] Target position in base link: {0}".format( target_position_bl)) target_position_delta = geometry_msgs.msg.Point() # First check to see if visual servoing is possible self.robot.perception.toggle(['ar_pose']) # Transform point(0,0,0) in ar marker frame to grippoint frame ar_point = msgs.PointStamped(0, 0, 0, frame_id=ar_frame_id, stamp=rospy.Time()) # Check several times if transform is available cntr = 0 ar_marker_available = False while (cntr < 5 and not ar_marker_available): rospy.logdebug("Checking AR marker for the {0} time".format(cntr + 1)) ar_point_grippoint = transformations.tf_transform( ar_point, ar_frame_id, end_effector_frame_id, tf_listener=self.robot.tf_listener) if not ar_point_grippoint == None: ar_marker_available = True else: cntr += 1 rospy.sleep(0.2) # If transform is not available, try again, but use head movement as well if not ar_marker_available: arm.send_delta_goal(0.05, 0.0, 0.0, 0.0, 0.0, 0.0, timeout=5.0, frame_id=end_effector_frame_id, pre_grasp=False) self.robot.speech.speak("Let me have a closer look", block=False) ar_point_grippoint = transformations.tf_transform( ar_point, ar_frame_id, end_effector_frame_id, tf_listener=self.robot.tf_listener) rospy.loginfo( "AR marker in end-effector frame = {0}".format(ar_point_grippoint)) # Transform target position to grippoint frame target_position_grippoint = transformations.tf_transform( target_position, "/map", end_effector_frame_id, tf_listener=self.robot.tf_listener) rospy.loginfo("Target position in end-effector frame = {0}".format( target_position_grippoint)) # Compute difference = delta (only when both transformations have succeeded) and correct for offset ar_marker and grippoint if not (ar_point_grippoint == None or target_position_grippoint == None): target_position_delta = msgs.Point( target_position_grippoint.x - ar_point_grippoint.x + arm.markerToGrippointOffset.x, target_position_grippoint.y - ar_point_grippoint.y + arm.markerToGrippointOffset.y, target_position_grippoint.z - ar_point_grippoint.z + arm.markerToGrippointOffset.z) rospy.loginfo("Delta target = {0}".format(target_position_delta)) ar_marker_available = True else: ar_marker_available = False rospy.logwarn( "ar_marker_available (2) = {0}".format(ar_marker_available)) # Sanity check if ar_marker_available: #rospy.loginfo("Delta target = {0}".format(target_position_delta)) if (target_position_delta.x < 0 or target_position_delta.x > 0.6 or target_position_delta.y < -0.3 or target_position_delta.y > 0.3 or target_position_delta.z < -0.3 or target_position_delta.z > 0.3): rospy.logwarn("Ar marker detection probably incorrect") self.robot.speech.speak( "I guess I cannot see my hand properly", block=False) ar_marker_available = False rospy.logwarn( "ar_marker_available (3) = {0}".format(ar_marker_available)) # Switch off ar marker detection self.robot.perception.toggle([]) # Original, pregrasp is performed by the compute_pre_grasp node if not ar_marker_available: self.robot.speech.speak("Let's see", block=False) if arm.send_goal(target_position_bl.x, target_position_bl.y, target_position_bl.z, 0, 0, 0, 120, pre_grasp=True): rospy.loginfo("arm at object") else: rospy.logerr( "Goal unreachable: {0}".format(target_position_bl).replace( "\n", " ")) self.robot.speech.speak( "I am sorry but I cannot move my arm to the object position", block=False) return 'grab_failed' else: self.robot.speech.speak("Let's go", block=False) if arm.send_delta_goal(target_position_delta.x, target_position_delta.y, target_position_delta.z, 0, 0, 0, 120, frame_id=end_effector_frame_id, pre_grasp=True): rospy.loginfo("arm at object") else: rospy.logerr( "Goal unreachable: {0}".format(target_position_bl).replace( "\n", " ")) self.robot.speech.speak( "I am sorry but I cannot move my arm to the object position", block=False) return 'grab_failed' self.robot.head.reset_position(timeout=0.0) return 'grab_succeeded'
def _recover_operator(self): print "Trying to recover the operator" self._robot.head.look_at_standing_person() self._robot.speech.speak( "%s, please look at me while I am looking for you" % self._operator_name, block=False) # Wait for the operator and find his/her face operator_recovery_timeout = 60.0 #TODO: parameterize start_time = rospy.Time.now() recovered_operator = None look_distance = 2.0 look_angles = [ 0.0, math.pi / 6, math.pi / 4, math.pi / 2.3, 0.0, -math.pi / 6, -math.pi / 4, -math.pi / 2.3 ] head_goals = [ msg_constructors.PointStamped(x=look_distance * math.cos(angle), y=look_distance * math.sin(angle), z=1.7, frame_id="/%s/base_link" % self._robot.robot_name) for angle in look_angles ] i = 0 while (rospy.Time.now() - start_time).to_sec() < operator_recovery_timeout: self._robot.head.look_at_point(head_goals[i]) i += 1 if i == len(head_goals): i = 0 self._robot.head.wait_for_motion_done() print "Trying to detect faces..." rospy.logerr( "ed.detect _persons() method disappeared! This was only calling the face recognition module and we are using a new one now!" ) rospy.logerr("I will return an empty detection list!") detections = [] if not detections: detections = [] best_score = -0.5 # TODO: magic number best_detection = None for d in detections: print "name: %s" % d.name print "score: %f" % d.name_score if d.name == self._operator_name and d.name_score > best_score: best_score = d.name_score best_detection = d if not d.name: best_detection = None break if best_detection: print "Trying to find closest laser entity to face" print "best detection frame id: %s" % best_detection.pose.header.frame_id operator_pos = geometry_msgs.msg.PointStamped() operator_pos.header.stamp = best_detection.pose.header.stamp operator_pos.header.frame_id = best_detection.pose.header.frame_id operator_pos.point = best_detection.pose.pose.position self._face_pos_pub.publish(operator_pos) recovered_operator = self._robot.ed.get_closest_possible_person_entity( radius=self._lost_distance, center_point=best_detection.pose.pose.position) if not recovered_operator: recovered_operator = self._robot.ed.get_closest_laser_entity( radius=self._lost_distance, center_point=best_detection.pose.pose.position) if recovered_operator: print "Found one!" self._operator_id = recovered_operator.id print "Recovered operator id: %s" % self._operator_id self._operator = recovered_operator self._robot.speech.speak( "There you are! Go ahead, I'll follow you again", block=False) self._robot.head.close() self._time_started = rospy.Time.now() return True self._robot.head.close() self._turn_towards_operator() self._update_navigation() rospy.sleep(2.0) return False
def _register_operator(self): start_time = rospy.Time.now() self._robot.head.look_at_standing_person() if self._operator_id: operator = self._robot.ed.get_entity(id=self._operator_id) else: operator = None while not operator: if (rospy.Time.now() - start_time).to_sec() > self._operator_timeout: return False if self._ask_follow: self._robot.speech.speak("Should I follow you?", block=True) answer = self._robot.ears.recognize("<choice>", {"choice": ["yes", "no"]}) if answer and 'choice' in answer.choices: if answer.choices['choice'] == "yes": operator = self._robot.ed.get_closest_laser_entity( radius=0.5, center_point=msg_constructors.PointStamped( x=1.0, y=0, z=1, frame_id="/%s/base_link" % self._robot.robot_name)) if not operator: self._robot.speech.speak( "Please stand in front of me") else: if self._learn_face: self._robot.speech.speak( "Please look at me while I learn to recognize you.", block=True) self._robot.speech.speak("Just in case...", block=False) self._robot.head.look_at_standing_person() learn_person_start_time = rospy.Time.now() learn_person_timeout = 10.0 # TODO: Parameterize num_detections = 0 while num_detections < 5: rospy.logerr( "self._robot.ed.learn _person(self._operator_name) method disappeared!, returning False" ) if False: num_detections += 1 elif (rospy.Time.now() - learn_person_start_time ).to_sec() > learn_person_timeout: self._robot.speech.speak( "Please stand in front of me and look at me" ) operator = None break elif answer.choices['choice'] == "no": return False else: rospy.sleep(2) else: self._robot.speech.speak( "Something is wrong with my ears, please take a look!") return False else: operator = self._robot.ed.get_closest_possible_person_entity( radius=1, center_point=msg_constructors.PointStamped( x=1.5, y=0, z=1, frame_id="/%s/base_link" % self._robot.robot_name)) if not operator: rospy.sleep(1) print "We have a new operator: %s" % operator.id self._robot.speech.speak("Gotcha! I will follow you!", block=False) self._operator_id = operator.id self._operator = operator self._breadcrumbs.append(operator) self._robot.head.close() print("NOW!!!") rospy.sleep(3) return True
def grab_item(robot, selectedArm): rospy.loginfo("Starting to grab an item") import smach import robot_smach_states as states #Copied from RDO finale rospy.loginfo("Setting up state machine") sm = smach.StateMachine(outcomes=['Done', 'Aborted']) #sm.userdata.target = object_msgs.msg.ExecutionTarget() #drink is a global var #sm.userdata.known_object_list = rospy.get_param("known_object_list") #sm.userdata.desired_objects = [obj for obj in sm.userdata.known_object_list]# if obj["class_label"]=="soup_pack"] #drink is a global var sm.userdata.operator_name = "operator" #sm.userdata.rate = 10 #first used in LOOK_FOR_DRINK. TODO:refactor this parameter out #sm.userdata.command = "" #First used in LOOK_FOR_DRINK #sm.userdata.dropoff_location = object_msgs.msg.ExecutionTarget(name=sm.userdata.target.category,class_label="location",ID=-2) #Also used in LOOK_FOR_DRINK. rospy.loginfo("Userdata set") robot.leftArm.reset_arm() robot.rightArm.reset_arm() robot.reasoner.reset() robot.reasoner.detach_all_from_gripper("/amigo/grippoint_left") robot.reasoner.detach_all_from_gripper("/amigo/grippoint_right") with sm: smach.StateMachine.add('ANNOUNCE_LOOK_FOR_DRINK', states.Say( robot, "I wonder what objects I can see here!", block=False), transitions={'spoken': 'LOOK'}) query_lookat = Conjunction( Compound("current_exploration_target", "Target"), Compound("point_of_interest", "Target", Compound("point_3d", "X", "Y", "Z"))) rospy.logerr("TODO Loy/Janno/Sjoerd: make query use current_object") query_object = Compound("position", "ObjectID", Compound("point", "X", "Y", "Z")) smach.StateMachine.add('LOOK', states.LookForObjectsAtPoint( robot, query_object, msgs.PointStamped( 0.75, 0, 0.65, frame_id="/amigo/base_link")), transitions={ 'looking': 'LOOK', 'object_found': 'GRAB', 'no_object_found': 'SAY_NO_DRINK', 'abort': 'Aborted' }) query_grabpoint = Compound("position", "ObjectID", Compound("point", "X", "Y", "Z")) smach.StateMachine.add('GRAB', states.GrabMachine(selectedArm, robot, query_grabpoint), transitions={ 'succeeded': 'CARRYING_POSE', 'failed': 'REPORT_FAILED' }) smach.StateMachine.add('CARRYING_POSE', states.Carrying_pose(selectedArm, robot), transitions={ 'succeeded': 'SAY_SUCCEEDED', 'failed': 'Done' }) smach.StateMachine.add( 'SAY_NO_DRINK', states.Say_generated( robot, lambda userdata: "I can't find the drink I am looking for. You will have to get one yourself." .format(userdata.operator_name), input_keys=['operator_name']), transitions={'spoken': 'Done'}) smach.StateMachine.add( 'REPORT_FAILED', states.Say_generated( robot, lambda userdata: "I am sorry I could not get your drink.". format(userdata.operator_name), input_keys=['operator_name']), transitions={'spoken': 'Done'}) smach.StateMachine.add( 'SAY_SUCCEEDED', states.Say_generated(robot, lambda userdata: "Where would you like this?". format(userdata.operator_name), input_keys=['operator_name']), transitions={'spoken': 'Done'}) rospy.loginfo("State machine set up, start execution...") #import pdb; pdb.set_trace() result = sm.execute() rospy.loginfo("State machine executed. Result: {0}".format(result))