def take_tool(self, arm_index): '''Robot's response to TAKE_TOOL''' self._is_busy = True if self._demo_state == DemoState.READY_TO_TAKE: ## Robot closes the hand Arms.set_gripper_state(arm_index, GripperState.CLOSED, wait=True) ## Robot moves the hand near the camera to take a look at the tool self._move_to_arm_pose('look', arm_index, wait=True) self.tool_id = self.world.get_tool_id() if self.tool_id is None: ## Robot moves the arm back to the person can take the tool self._move_to_arm_pose('take', 0, wait=True) Response.say(RobotSpeech.ERROR_TOOL_NOT_RECOGNIZED) Response.perform_gaze_action(GazeGoal.SHAKE) self._demo_state = DemoState.NO_TOOL_NO_SURFACE else: self.session.new_action(self.tool_id) Response.say(RobotSpeech.RECOGNIZED_TOOL + str(self.tool_id)) self._demo_state = DemoState.HAS_TOOL_NO_SURFACE self.detect_surface() else: Response.say(RobotSpeech.ERROR_NOT_IN_TAKE_STATE) rospy.loginfo('Current state: ' + self._demo_state) self._is_busy = False
def _get_arm_states(self): '''Returns the current arms states in the right format''' abs_ee_poses = [Arms.get_ee_state(0), Arms.get_ee_state(1)] joint_poses = [Arms.get_joint_state(0), Arms.get_joint_state(1)] states = [None, None] for arm_index in [0, 1]: nearest_obj = self.world.get_nearest_object( abs_ee_poses[arm_index]) if (nearest_obj == None): states[arm_index] = ArmState(ArmState.ROBOT_BASE, abs_ee_poses[arm_index], joint_poses[arm_index], Object()) else: # Relative rel_ee_pose = World.transform( abs_ee_poses[arm_index], 'base_link', nearest_obj.name) states[arm_index] = ArmState(ArmState.OBJECT, rel_ee_pose, joint_poses[arm_index], nearest_obj) return states
def __init__(self): self.recipe = Loader.Recipe_Loader() self.left_arm = Arms.LeftArm("Left", "I") self.right_arm = Arms.RightArm("Right", "I") self.both_arms = Arms.Arms("Both", "S") self.left_stepper = Steppers.LeftStepper('L', '+', 0) self.right_stepper = Steppers.RightStepper('R', '+', 0) self.both_stepper = Steppers.Stepper('B', '+', 0) self.initArms() self.initSteppers()
def __init__(self): self.database = db.Database() self.left_arm = Arms.LeftArm("Left", "I") self.right_arm = Arms.RightArm("Right", "I") self.both_arms = Arms.Arms("Both", "S") self.left_stepper = Steppers.LeftStepper('L', '+', 0) self.right_stepper = Steppers.RightStepper('R', '+', 0) self.both_stepper = Steppers.Stepper('B', '+', 0) self.initArms() self.initSteppers()
def _save_arm_to_trajectory(self): '''Saves current arm state into continuous trajectory''' if (Interaction._arm_trajectory != None): states = self._get_arm_states() if not Interaction._is_relative_motion else map(lambda a_ind: ArmState(ArmState.ROBOT_BASE, Arms.get_ee_state(a_ind), Arms.get_joint_state(a_ind), Object()), [0, 1]) Interaction._arm_trajectory.rArm.append(states[0]) Interaction._arm_trajectory.lArm.append(states[1]) Interaction._arm_trajectory.timing.append( rospy.Time.now() - Interaction._trajectory_start_time)
def _is_reachable(self): '''Checks if there is an IK solution for action step''' dummy, is_reachable = Arms.solve_ik_for_arm(self.arm_index, self.get_target()) rospy.loginfo('Reachability of pose in ActionStepMarker : ' + str(is_reachable)) return is_reachable
def close_hand(self, arm_index): '''Closes gripper on the indicated side''' if Arms.set_gripper_state(arm_index, GripperState.CLOSED): speech_response = Response.close_responses[arm_index] return [speech_response, Response.glance_actions[arm_index]] else: return [Response.already_closed_responses[arm_index], Response.glance_actions[arm_index]]
def __init__(self): self._servos = LX16AServos() self._servoManager = SmartServoManager(lX16AServos=self._servos, ramp=0, maxSpeed=1) self._arms = Arms(self._servoManager) self._neck = Neck(self._servoManager) self._servoManager.Start() self._neck.SetLeftRight(0) self._neck.SetUpDown(0) self._bodyLeds = RgbLeds([ my_path + '/../Gfx/Body/hearth2.gif', my_path + '/../../RoobertGifs/e8nZC.gif', my_path + '/../../RoobertGifs/U9LwW86.gif', my_path + '/../../RoobertGifs/Spin_Toad.gif', my_path + '/../../RoobertGifs/haleye.gif', my_path + '/../../RoobertGifs/Yoshi_render.gif' ])
def close_hand(self, arm_index): '''Closes gripper on the indicated side''' if Arms.set_gripper_state(arm_index, GripperState.CLOSED): speech_response = Response.close_responses[arm_index] if (Interaction._is_programming and self.session.n_actions() > 0): self.save_gripper_step(arm_index, GripperState.CLOSED) speech_response = (speech_response + ' ' + RobotSpeech.STEP_RECORDED) return [speech_response, Response.glance_actions[arm_index]] else: return [Response.already_closed_responses[arm_index], Response.glance_actions[arm_index]]
def __init__(self): self.arms = Arms() self.world = World() self.session = Session(object_list=self.world.get_frame_list(), is_debug=True) self._viz_publisher = rospy.Publisher('visualization_marker_array', MarkerArray) rospy.Subscriber('recognized_command', Command, self.speech_command_cb) rospy.Subscriber('gui_command', GuiCommand, self.gui_command_cb) self._undo_function = None self.responses = { Command.TEST_MICROPHONE: Response(Interaction.empty_response, [RobotSpeech.TEST_RESPONSE, GazeGoal.NOD]), Command.NEW_DEMONSTRATION: Response(self.create_action, None), Command.TAKE_TOOL: Response(self.take_tool, 0), Command.RELEASE_TOOL: Response(self.open_hand, 0), Command.DETECT_SURFACE: Response(self.record_object_pose, None), Command.START_RECORDING: Response(self.start_recording, None), Command.STOP_RECORDING: Response(self.stop_recording, None), Command.REPLAY_DEMONSTRATION: Response(self.execute_action, None), Command.SAVE_ARM_POSE: Response(self.save_arm_pose, None) } rospy.loginfo('Will wait until arms ready to respond.') while ((self.arms.get_ee_state(0) is None) or (self.arms.get_ee_state(1) is None)): time.sleep(0.1) rospy.loginfo('Starting to move to the initial pose.') self._move_to_arm_pose('initial', 0) self._move_to_arm_pose('initial', 1) rospy.loginfo('Interaction initialized.')
def __init__(self): self.arms = Arms() self.world = World() self.session = Session(object_list=self.world.get_frame_list(), is_debug=True) self._viz_publisher = rospy.Publisher('visualization_marker_array', MarkerArray) rospy.Subscriber('recognized_command', Command, self.speech_command_cb) rospy.Subscriber('gui_command', GuiCommand, self.gui_command_cb) self._undo_function = None self.responses = { Command.TEST_MICROPHONE: Response(Interaction.empty_response, [RobotSpeech.TEST_RESPONSE, GazeGoal.NOD]), Command.RELAX_RIGHT_ARM: Response(self.relax_arm, 0), Command.RELAX_LEFT_ARM: Response(self.relax_arm, 1), Command.OPEN_RIGHT_HAND: Response(self.open_hand, 0), Command.OPEN_LEFT_HAND: Response(self.open_hand, 1), Command.CLOSE_RIGHT_HAND: Response(self.close_hand, 0), Command.CLOSE_LEFT_HAND: Response(self.close_hand, 1), Command.STOP_EXECUTION: Response(self.stop_execution, None), Command.UNDO: Response(self.undo, None), Command.DELETE_ALL_STEPS: Response(self.delete_all_steps, None), Command.DELETE_LAST_STEP: Response(self.delete_last_step, None), Command.REPEAT_LAST_STEP: Response(self.repeat_step, None), Command.FREEZE_RIGHT_ARM: Response(self.freeze_arm, 0), Command.FREEZE_LEFT_ARM: Response(self.freeze_arm, 1), Command.CREATE_NEW_ACTION: Response(self.create_action, None), Command.EXECUTE_ACTION: Response(self.execute_action, None), Command.NEXT_ACTION: Response(self.next_action, None), Command.PREV_ACTION: Response(self.previous_action, None), Command.SAVE_POSE: Response(self.save_step, None), Command.RECORD_OBJECT_POSE: Response( self.record_object_pose, None), Command.START_RECORDING_MOTION: Response( self.start_recording, False), Command.START_RECORDING_RELATIVE_MOTION: Response( self.start_recording, True), Command.STOP_RECORDING_MOTION: Response(self.stop_recording, None) } rospy.loginfo('Interaction initialized.')
def __init__(self): self.arms = Arms() self.world = World() self.session = Session(object_list=self.world.get_frame_list(), is_debug=True) self._viz_publisher = rospy.Publisher('visualization_marker_array', MarkerArray) self._demo_state = None self._is_busy = True rospy.Subscriber('recognized_command', Command, self.speech_command_cb) rospy.Subscriber('gui_command', GuiCommand, self.gui_command_cb) self.responses = { Command.TEST_MICROPHONE: Response(self.default_response, [RobotSpeech.TEST_RESPONSE, GazeGoal.NOD]), Command.TAKE_TOOL: Response(self.take_tool, 0), Command.RELEASE_TOOL: Response(self.release_tool, 0), Command.START_RECORDING: Response(self.start_recording, None), Command.STOP_RECORDING: Response(self.stop_recording, None), Command.REPLAY_DEMONSTRATION: Response(self.replay_demonstration, None) Command.DETECT_SURFACE: Response(self.detect_surface, None) } rospy.loginfo('Will wait until arms ready to respond.') while ((self.arms.get_ee_state(0) is None) or (self.arms.get_ee_state(1) is None)): time.sleep(0.1) rospy.loginfo('Starting to move to the initial pose.') # TODO: Make it possible to take with either hand self._move_to_arm_pose('take', 0) self._move_to_arm_pose('away', 1) self._wait_for_arms() self._demo_state = DemoState.READY_TO_TAKE Response.say(RobotSpeech.HAND_TOOL_REQUEST) Response.perform_gaze_action(GazeGoal.GLANCE_RIGHT_EE) self._is_busy = False rospy.loginfo('Interaction initialized.')
def start_recording(self, relative_motion=False): '''Starts recording continuous motion''' if (self.session.n_actions() > 0): if (Interaction._is_programming): if (not Interaction._is_recording_motion): Interaction._is_recording_motion = True Interaction._arm_trajectory = ArmTrajectory() Interaction._trajectory_start_time = rospy.Time.now() Interaction._is_relative_motion = relative_motion if (relative_motion): Interaction._relative_motion_start = map(lambda a_ind: Arms.get_ee_state(a_ind), [0, 1]) return [RobotSpeech.STARTED_RECORDING_MOTION, GazeGoal.NOD] else: return [RobotSpeech.ALREADY_RECORDING_MOTION, GazeGoal.SHAKE] else: return ['Action ' + str(self.session.current_action_index) + RobotSpeech.ERROR_NOT_IN_EDIT, GazeGoal.SHAKE] else: return [RobotSpeech.ERROR_NO_SKILLS, GazeGoal.SHAKE]
class Interaction: '''Finite state machine for the human interaction''' _is_programming = True _is_recording_motion = False _arm_trajectory = None _trajectory_start_time = None def __init__(self): self.arms = Arms() self.world = World() self.session = Session(object_list=self.world.get_frame_list(), is_debug=True) self._viz_publisher = rospy.Publisher('visualization_marker_array', MarkerArray) rospy.Subscriber('recognized_command', Command, self.speech_command_cb) rospy.Subscriber('gui_command', GuiCommand, self.gui_command_cb) self._undo_function = None self.responses = { Command.TEST_MICROPHONE: Response(Interaction.empty_response, [RobotSpeech.TEST_RESPONSE, GazeGoal.NOD]), Command.NEW_DEMONSTRATION: Response(self.create_action, None), Command.TAKE_TOOL: Response(self.take_tool, 0), Command.RELEASE_TOOL: Response(self.open_hand, 0), Command.DETECT_SURFACE: Response(self.record_object_pose, None), Command.START_RECORDING: Response(self.start_recording, None), Command.STOP_RECORDING: Response(self.stop_recording, None), Command.REPLAY_DEMONSTRATION: Response(self.execute_action, None), Command.SAVE_ARM_POSE: Response(self.save_arm_pose, None) } rospy.loginfo('Will wait until arms ready to respond.') while ((self.arms.get_ee_state(0) is None) or (self.arms.get_ee_state(1) is None)): time.sleep(0.1) rospy.loginfo('Starting to move to the initial pose.') self._move_to_arm_pose('initial', 0) self._move_to_arm_pose('initial', 1) rospy.loginfo('Interaction initialized.') def load_known_arm_poses(self): '''This loads important poses from the hard drive''' # TODO pass def open_hand(self, arm_index): '''Opens gripper on the indicated side''' if self.arms.set_gripper_state(arm_index, GripperState.OPEN): speech_response = Response.open_responses[arm_index] if (Interaction._is_programming and self.session.n_actions() > 0): self.save_gripper_step(arm_index, GripperState.OPEN) speech_response = (speech_response + ' ' + RobotSpeech.STEP_RECORDED) return [speech_response, Response.glance_actions[arm_index]] else: return [Response.already_open_responses[arm_index], Response.glance_actions[arm_index]] def take_tool(self, arm_index): self.close_hand(arm_index) self._move_to_arm_pose('initial', arm_index) def close_hand(self, arm_index): '''Closes gripper on the indicated side''' if Arms.set_gripper_state(arm_index, GripperState.CLOSED): speech_response = Response.close_responses[arm_index] return [speech_response, Response.glance_actions[arm_index]] else: return [Response.already_closed_responses[arm_index], Response.glance_actions[arm_index]] def relax_arm(self, arm_index): '''Relaxes arm on the indicated side''' if self.arms.set_arm_mode(arm_index, ArmMode.RELEASE): return [Response.release_responses[arm_index], Response.glance_actions[arm_index]] else: return [Response.already_released_responses[arm_index], Response.glance_actions[arm_index]] def freeze_arm(self, arm_index): '''Stiffens arm on the indicated side''' if self.arms.set_arm_mode(arm_index, ArmMode.HOLD): return [Response.hold_responses[arm_index], Response.glance_actions[arm_index]] else: return [Response.already_holding_responses[arm_index], Response.glance_actions[arm_index]] def edit_action(self, dummy=None): '''Goes back to edit mode''' if (self.session.n_actions() > 0): if (Interaction._is_programming): return [RobotSpeech.ALREADY_EDITING, GazeGoal.SHAKE] else: Interaction._is_programming = True return [RobotSpeech.SWITCH_TO_EDIT_MODE, GazeGoal.NOD] else: return [RobotSpeech.ERROR_NO_SKILLS, GazeGoal.SHAKE] def save_action(self, dummy=None): '''Goes out of edit mode''' self.session.save_current_action() Interaction._is_programming = False return [RobotSpeech.ACTION_SAVED + ' ' + str(self.session.current_action_index), GazeGoal.NOD] def create_action(self, dummy=None): '''Creates a new empty action''' self._move_to_arm_pose('take', 0) self.world.clear_all_objects() self.session.new_action() Interaction._is_programming = True return [RobotSpeech.SKILL_CREATED + ' ' + str(self.session.current_action_index), GazeGoal.NOD] def next_action(self, dummy=None): '''Switches to next action''' if (self.session.n_actions() > 0): if self.session.next_action(self.world.get_frame_list()): return [RobotSpeech.SWITCH_SKILL + ' ' + str(self.session.current_action_index), GazeGoal.NOD] else: return [RobotSpeech.ERROR_NEXT_SKILL + ' ' + str(self.session.current_action_index), GazeGoal.SHAKE] else: return [RobotSpeech.ERROR_NO_SKILLS, GazeGoal.SHAKE] def previous_action(self, dummy=None): '''Switches to previous action''' if (self.session.n_actions() > 0): if self.session.previous_action(self.world.get_frame_list()): return [RobotSpeech.SWITCH_SKILL + ' ' + str(self.session.current_action_index), GazeGoal.NOD] else: return [RobotSpeech.ERROR_PREV_SKILL + ' ' + str(self.session.current_action_index), GazeGoal.SHAKE] else: return [RobotSpeech.ERROR_NO_SKILLS, GazeGoal.SHAKE] def delete_all_steps(self, dummy=None): '''Deletes all steps in the current action''' if (self.session.n_actions() > 0): if (Interaction._is_programming): if self.session.n_frames() > 0: self.session.clear_current_action() self._undo_function = self._resume_all_steps return [RobotSpeech.SKILL_CLEARED, GazeGoal.NOD] else: return [RobotSpeech.SKILL_EMPTY, None] else: return ['Action ' + str(self.session.current_action_index) + RobotSpeech.ERROR_NOT_IN_EDIT, GazeGoal.SHAKE] else: return [RobotSpeech.ERROR_NO_SKILLS, GazeGoal.SHAKE] def _resume_all_steps(self): '''Resumes all steps after clearing''' self.session.undo_clear() return [RobotSpeech.ALL_POSES_RESUMED, GazeGoal.NOD] def _resume_last_step(self): '''Resumes last step after deleting''' self.session.resume_deleted_step() return [RobotSpeech.POSE_RESUMED, GazeGoal.NOD] def stop_execution(self, dummy=None): '''Stops ongoing execution''' if (self.arms.is_executing()): self.arms.stop_execution() return [RobotSpeech.STOPPING_EXECUTION, GazeGoal.NOD] else: return [RobotSpeech.ERROR_NO_EXECUTION, GazeGoal.SHAKE] def save_gripper_step(self, arm_index, gripper_state): '''Saves an action step that involves a gripper state change''' if (self.session.n_actions() > 0): if (Interaction._is_programming): states = self._get_arm_states() step = ActionStep() step.type = ActionStep.ARM_TARGET step.armTarget = ArmTarget(states[0], states[1], 0.2, 0.2) actions = [self.arms.get_gripper_state(0), self.arms.get_gripper_state(1)] actions[arm_index] = gripper_state step.gripperAction = GripperAction(actions[0], actions[1]) self.session.add_step_to_action(step, self.world.get_frame_list()) def start_recording(self, dummy=None): '''Starts recording continuous motion''' if (self.session.n_actions() > 0): if (Interaction._is_programming): if (not Interaction._is_recording_motion): Interaction._is_recording_motion = True Interaction._arm_trajectory = ArmTrajectory() Interaction._trajectory_start_time = rospy.Time.now() if self.session.n_frames() > 0: self.session.clear_current_action() return [RobotSpeech.STARTED_RECORDING_MOTION, GazeGoal.NOD] else: return [RobotSpeech.ALREADY_RECORDING_MOTION, GazeGoal.SHAKE] else: return ['Action ' + str(self.session.current_action_index) + RobotSpeech.ERROR_NOT_IN_EDIT, GazeGoal.SHAKE] else: return [RobotSpeech.ERROR_NO_SKILLS, GazeGoal.SHAKE] def stop_recording(self, dummy=None): '''Stops recording continuous motion''' if (Interaction._is_recording_motion): Interaction._is_recording_motion = False traj_step = ActionStep() traj_step.type = ActionStep.ARM_TRAJECTORY waited_time = Interaction._arm_trajectory.timing[0] for i in range(len(Interaction._arm_trajectory.timing)): Interaction._arm_trajectory.timing[i] -= waited_time Interaction._arm_trajectory.timing[i] += rospy.Duration(0.1) '''If motion was relative, record transformed pose''' traj_step.armTrajectory = ArmTrajectory( Interaction._arm_trajectory.rArm[:], Interaction._arm_trajectory.lArm[:], Interaction._arm_trajectory.timing[:], Interaction._arm_trajectory.rRefFrame, Interaction._arm_trajectory.lRefFrame, Interaction._arm_trajectory.rRefFrameObject, Interaction._arm_trajectory.lRefFrameObject) traj_step.gripperAction = GripperAction( self.arms.get_gripper_state(0), self.arms.get_gripper_state(1)) self.session.add_step_to_action(traj_step, self.world.get_frame_list()) Interaction._arm_trajectory = None Interaction._trajectory_start_time = None return [RobotSpeech.STOPPED_RECORDING_MOTION + ' ' + RobotSpeech.STEP_RECORDED, GazeGoal.NOD] else: return [RobotSpeech.MOTION_NOT_RECORDING, GazeGoal.SHAKE] def _fix_trajectory_ref(self): '''Makes the reference frame of continuous trajectories uniform''' (r_ref, _), (_, r_ref_obj) = self._find_dominant_ref( Interaction._arm_trajectory.rArm) (l_ref, _), (_, l_ref_obj) = self._find_dominant_ref( Interaction._arm_trajectory.lArm) for i in range(len(Interaction._arm_trajectory.timing)): Interaction._arm_trajectory.rArm[i] = World.convert_ref_frame( Interaction._arm_trajectory.rArm[i], r_ref, r_ref_obj) Interaction._arm_trajectory.lArm[i] = World.convert_ref_frame( Interaction._arm_trajectory.lArm[i], l_ref, l_ref_obj) Interaction._arm_trajectory.rRefFrame = r_ref Interaction._arm_trajectory.rRefFrameObject = r_ref_obj Interaction._arm_trajectory.lRefFrame = l_ref Interaction._arm_trajectory.lRefFrameObject = l_ref_obj def _find_dominant_ref(self, arm_traj): '''Finds the most dominant reference frame in a continuous trajectory''' def addEnt(dic, ent): key = (ent.refFrame, ent.refFrameObject.name) if (key in dic): dic[key] = (1 + dic[key][0], dic[key][1]) else: dic[key] = (1, ent.refFrameObject) return dic cnt = reduce(addEnt, arm_traj, {}) return reduce(lambda a, b: a if a[1][0] > b[1][0] else b, cnt.items()) def _save_arm_to_trajectory(self): '''Saves current arm state into continuous trajectory''' if (Interaction._arm_trajectory != None): states = self._get_arm_states() Interaction._arm_trajectory.rArm.append(states[0]) Interaction._arm_trajectory.lArm.append(states[1]) Interaction._arm_trajectory.timing.append( rospy.Time.now() - Interaction._trajectory_start_time) def _move_to_arm_pose(self, pose_name, arm_index): '''Moves the robot's arm to a pre-specified arm pose''' action = self.session.pose_set[pose_name] if action is None: rospy.logwarn('Arm pose does not exist:' + pose_name) else: step = action.get_step(0) if arm_index == 0: self.arms.start_move_to_pose(step.armTarget.rArm, 0) self.arms.set_gripper_state(0, step.gripperAction.rGripper) else: self.arms.start_move_to_pose(step.armTarget.lArm, 1) self.arms.set_gripper_state(1, step.gripperAction.lGripper) rospy.loginfo('Moved arm ' + str(arm_index) + ' to pose ' + pose_name) def save_arm_pose(self, dummy=None): '''Saves current arm state as an action step''' states = self._get_arm_states() step = ActionStep() step.type = ActionStep.ARM_TARGET step.armTarget = ArmTarget(states[0], states[1], 0.2, 0.2) step.gripperAction = GripperAction( self.arms.get_gripper_state(0), self.arms.get_gripper_state(1)) self.session.save_arm_pose(step, self.world.get_frame_list()) return [RobotSpeech.STEP_RECORDED, GazeGoal.NOD] def _get_arm_states(self): '''Returns the current arms states in the right format''' abs_ee_poses = [Arms.get_ee_state(0), Arms.get_ee_state(1)] joint_poses = [Arms.get_joint_state(0), Arms.get_joint_state(1)] states = [None, None] for arm_index in [0, 1]: nearest_obj = self.world.get_nearest_object( abs_ee_poses[arm_index]) if (nearest_obj == None): states[arm_index] = ArmState(ArmState.ROBOT_BASE, abs_ee_poses[arm_index], joint_poses[arm_index], Object()) else: # Relative rel_ee_pose = World.transform( abs_ee_poses[arm_index], 'base_link', nearest_obj.name) states[arm_index] = ArmState(ArmState.OBJECT, rel_ee_pose, joint_poses[arm_index], nearest_obj) return states def execute_action(self, dummy=None): '''Starts the execution of the current action''' if (self.session.n_actions() > 0): if (self.session.n_frames() > 0): self.session.save_current_action() action = self.session.get_current_action() if (action.is_object_required()): if (self.world.update_object_pose()): self.session.get_current_action().update_objects( self.world.get_frame_list()) self.arms.start_execution(action) else: return [RobotSpeech.OBJECT_NOT_DETECTED, GazeGoal.SHAKE] else: self.arms.start_execution(action) return [RobotSpeech.START_EXECUTION + ' ' + str(self.session.current_action_index), None] else: return [RobotSpeech.EXECUTION_ERROR_NOPOSES + ' ' + str(self.session.current_action_index), GazeGoal.SHAKE] else: return [RobotSpeech.ERROR_NO_SKILLS, GazeGoal.SHAKE] def speech_command_cb(self, command): '''Callback for when a speech command is receieved''' if command.command in self.responses.keys(): rospy.loginfo('\033[32m Calling response for command ' + command.command + '\033[0m') response = self.responses[command.command] if (not self.arms.is_executing()): if (self._undo_function != None): response.respond() self._undo_function = None else: response.respond() else: if command.command == Command.STOP_EXECUTION: response.respond() else: rospy.logwarn('Ignoring speech command during execution: ' + command.command) else: switch_command = 'SWITCH_TO_ACTION' if (switch_command in command.command): action_no = command.command[ len(switch_command):len(command.command)] action_no = int(action_no) if (self.session.n_actions() > 0): self.session.switch_to_action(action_no, self.world.get_frame_list()) response = Response(Interaction.empty_response, [RobotSpeech.SWITCH_SKILL + str(action_no), GazeGoal.NOD]) else: response = Response(Interaction.empty_response, [RobotSpeech.ERROR_NO_SKILLS, GazeGoal.SHAKE]) response.respond() else: rospy.logwarn('\033[32m This command (' + command.command + ') is unknown. \033[0m') def gui_command_cb(self, command): '''Callback for when a GUI command is received''' if (not self.arms.is_executing()): if (self.session.n_actions() > 0): if (command.command == GuiCommand.SWITCH_TO_ACTION): action_no = command.param self.session.switch_to_action(action_no, self.world.get_frame_list()) response = Response(Interaction.empty_response, [RobotSpeech.SWITCH_SKILL + str(action_no), GazeGoal.NOD]) response.respond() elif (command.command == GuiCommand.SELECT_ACTION_STEP): step_no = command.param self.session.select_action_step(step_no) rospy.loginfo('Selected action step ' + str(step_no)) else: rospy.logwarn('\033[32m This command (' + command.command + ') is unknown. \033[0m') else: response = Response(Interaction.empty_response, [RobotSpeech.ERROR_NO_SKILLS, GazeGoal.SHAKE]) response.respond() else: rospy.logwarn('Ignoring GUI command during execution: ' + command.command) def update(self): '''General update for the main loop''' self.arms.update() if (self.arms.status != ExecutionStatus.NOT_EXECUTING): if (self.arms.status != ExecutionStatus.EXECUTING): self._end_execution() if (Interaction._is_recording_motion): self._save_arm_to_trajectory() is_world_changed = self.world.update() if (self.session.n_actions() > 0): action = self.session.get_current_action() action.update_viz() r_target = action.get_requested_targets(0) if (r_target != None): self.arms.start_move_to_pose(r_target, 0) action.reset_targets(0) l_target = action.get_requested_targets(1) if (l_target != None): self.arms.start_move_to_pose(l_target, 1) action.reset_targets(1) action.delete_requested_steps() states = self._get_arm_states() action.change_requested_steps(states[0], states[1]) if (is_world_changed): rospy.loginfo('The world has changed.') self.session.get_current_action().update_objects( self.world.get_frame_list()) time.sleep(0.1) def _end_execution(self): '''Responses for when the action execution ends''' if (self.arms.status == ExecutionStatus.SUCCEEDED): Response.say(RobotSpeech.EXECUTION_ENDED) Response.perform_gaze_action(GazeGoal.NOD) elif (self.arms.status == ExecutionStatus.PREEMPTED): Response.say(RobotSpeech.EXECUTION_PREEMPTED) Response.perform_gaze_action(GazeGoal.SHAKE) else: Response.say(RobotSpeech.EXECUTION_ERROR_NOIK) Response.perform_gaze_action(GazeGoal.SHAKE) self.arms.status = ExecutionStatus.NOT_EXECUTING def record_object_pose(self, dummy=None): '''Makes the robot look for a table and objects''' if (self.world.update_object_pose()): if (self.session.n_actions() > 0): self.session.get_current_action().update_objects( self.world.get_frame_list()) return [RobotSpeech.START_STATE_RECORDED, GazeGoal.NOD] else: return [RobotSpeech.OBJECT_NOT_DETECTED, GazeGoal.SHAKE] def save_experiment_state(self): '''Saves session state''' self.session.save_current_action() @staticmethod def empty_response(responses): '''Default response to speech commands''' return responses
class HardwareDevices(): _bodyLeds = None _arms = None _neck = None _servoManager = None _servos = None __singleton = None _released = False @staticmethod def singleton(): if (HardwareDevices.__singleton == None): HardwareDevices.__singleton = HardwareDevices() return HardwareDevices.__singleton @property def arms(self): return self._arms @property def neck(self): return self._neck @property def BodyLeds(self): return self._bodyLeds def __init__(self): self._servos = LX16AServos() self._servoManager = SmartServoManager(lX16AServos=self._servos, ramp=0, maxSpeed=1) self._arms = Arms(self._servoManager) self._neck = Neck(self._servoManager) self._servoManager.Start() self._neck.SetLeftRight(0) self._neck.SetUpDown(0) self._bodyLeds = RgbLeds([ my_path + '/../Gfx/Body/hearth2.gif', my_path + '/../../RoobertGifs/e8nZC.gif', my_path + '/../../RoobertGifs/U9LwW86.gif', my_path + '/../../RoobertGifs/Spin_Toad.gif', my_path + '/../../RoobertGifs/haleye.gif', my_path + '/../../RoobertGifs/Yoshi_render.gif' ]) def Release(self): if (self._released == False): self._released = True print("releasing hardware devices") if (self._bodyLeds != None): self._bodyLeds.Release() if (self._arms != None): self._arms.Release() if (self._neck != None): self._neck.Release() if (self._servoManager != None): self._servoManager.Release() if (self._servos != None): self._servos.Release() def __del__(self): self.Release()
class Interaction: '''Finite state machine for the human interaction''' _arm_trajectory = None _trajectory_start_time = None def __init__(self): self.arms = Arms() self.world = World() self.session = Session(object_list=self.world.get_frame_list(), is_debug=True) self._viz_publisher = rospy.Publisher('visualization_marker_array', MarkerArray) self._demo_state = None self._is_busy = True rospy.Subscriber('recognized_command', Command, self.speech_command_cb) rospy.Subscriber('gui_command', GuiCommand, self.gui_command_cb) self.responses = { Command.TEST_MICROPHONE: Response(self.default_response, [RobotSpeech.TEST_RESPONSE, GazeGoal.NOD]), Command.TAKE_TOOL: Response(self.take_tool, 0), Command.RELEASE_TOOL: Response(self.release_tool, 0), Command.START_RECORDING: Response(self.start_recording, None), Command.STOP_RECORDING: Response(self.stop_recording, None), Command.REPLAY_DEMONSTRATION: Response(self.replay_demonstration, None) Command.DETECT_SURFACE: Response(self.detect_surface, None) } rospy.loginfo('Will wait until arms ready to respond.') while ((self.arms.get_ee_state(0) is None) or (self.arms.get_ee_state(1) is None)): time.sleep(0.1) rospy.loginfo('Starting to move to the initial pose.') # TODO: Make it possible to take with either hand self._move_to_arm_pose('take', 0) self._move_to_arm_pose('away', 1) self._wait_for_arms() self._demo_state = DemoState.READY_TO_TAKE Response.say(RobotSpeech.HAND_TOOL_REQUEST) Response.perform_gaze_action(GazeGoal.GLANCE_RIGHT_EE) self._is_busy = False rospy.loginfo('Interaction initialized.') def take_tool(self, arm_index): '''Robot's response to TAKE_TOOL''' self._is_busy = True if self._demo_state == DemoState.READY_TO_TAKE: ## Robot closes the hand Arms.set_gripper_state(arm_index, GripperState.CLOSED, wait=True) ## Robot moves the hand near the camera to take a look at the tool self._move_to_arm_pose('look', arm_index, wait=True) self.tool_id = self.world.get_tool_id() if self.tool_id is None: ## Robot moves the arm back to the person can take the tool self._move_to_arm_pose('take', 0, wait=True) Response.say(RobotSpeech.ERROR_TOOL_NOT_RECOGNIZED) Response.perform_gaze_action(GazeGoal.SHAKE) self._demo_state = DemoState.NO_TOOL_NO_SURFACE else: self.session.new_action(self.tool_id) Response.say(RobotSpeech.RECOGNIZED_TOOL + str(self.tool_id)) self._demo_state = DemoState.HAS_TOOL_NO_SURFACE self.detect_surface() else: Response.say(RobotSpeech.ERROR_NOT_IN_TAKE_STATE) rospy.loginfo('Current state: ' + self._demo_state) self._is_busy = False def detect_surface(self): self._is_busy = True if self._demo_state == DemoState.HAS_TOOL_NO_SURFACE: ## Robot moves the arm away and looks at the surface self._move_to_arm_pose('away', 0, wait=True) self.surface = self.world.get_surface() if self.surface is None: Response.say(RobotSpeech.ERROR_NO_SURFACE) Response.perform_gaze_action(GazeGoal.SHAKE) else: Response.say(RobotSpeech.SURFACE_DETECTED) self._move_to_arm_pose('ready', arm_index, wait=True) Response.say(RobotSpeech.READY_FOR_DEMO) self._demo_state = DemoState.READY_FOR_DEMO self._is_busy = False def release_tool(self, arm_index): self.busy = True if (self._demo_state != DemoState.READY_TO_TAKE and self._demo_state != DemoState.PLAYING_DEMO and self._demo_state != DemoState.RECORDING_DEMO): self._move_to_arm_pose('take', 0, wait=True) self.arms.set_gripper_state(arm_index, GripperState.OPEN, wait=True) Response.say(RobotSpeech.TOOL_RELEASED) Response.perform_gaze_action(GazeGoal.GLANCE_RIGHT_EE) self._demo_state = DemoState.READY_TO_TAKE else: Response.perform_gaze_action(GazeGoal.SHAKE) Response.say(RobotSpeech.ERROR_NOT_IN_RELEASE_STATE) rospy.loginfo('Current state: ' + self._demo_state) self.busy = False def start_recording(self, dummy=None): '''Starts recording continuous motion''' self.busy = True if (self._demo_state == DemoState.READY_FOR_DEMO or self._demo_state == DemoState.HAS_RECORDED_DEMO): Interaction._arm_trajectory = ArmTrajectory() Interaction._trajectory_start_time = rospy.Time.now() if self.session.n_frames() > 0: self.session.clear_current_action() self.relax_arm(0) self._demo_state = DemoState.RECORDING_DEMO Response.say(RobotSpeech.STARTED_RECORDING) Response.perform_gaze_action(GazeGoal.NOD) elif (self._demo_state == DemoState.RECORDING_DEMO): Response.perform_gaze_action(GazeGoal.SHAKE) Response.say(RobotSpeech.ERROR_ALREADY_RECORDING) else: Response.perform_gaze_action(GazeGoal.SHAKE) Response.say(RobotSpeech.ERROR_NOT_READY_TO_RECORD) rospy.loginfo('Current state: ' + self._demo_state) self.busy = False def stop_recording(self, dummy=None): '''Stops recording continuous motion''' self.busy = True if (self._demo_state == DemoState.RECORDING_DEMO): traj_step = ActionStep() traj_step.type = ActionStep.ARM_TRAJECTORY waited_time = Interaction._arm_trajectory.timing[0] for i in range(len(Interaction._arm_trajectory.timing)): Interaction._arm_trajectory.timing[i] -= waited_time Interaction._arm_trajectory.timing[i] += rospy.Duration(0.1) '''If motion was relative, record transformed pose''' traj_step.armTrajectory = ArmTrajectory( Interaction._arm_trajectory.rArm[:], Interaction._arm_trajectory.lArm[:], Interaction._arm_trajectory.timing[:], Interaction._arm_trajectory.rRefFrame, Interaction._arm_trajectory.lRefFrame, Interaction._arm_trajectory.rRefFrameObject, Interaction._arm_trajectory.lRefFrameObject) traj_step.gripperAction = GripperAction( self.arms.get_gripper_state(0), self.arms.get_gripper_state(1)) self.session.add_step_to_action(traj_step, self.world.get_frame_list()) Interaction._arm_trajectory = None Interaction._trajectory_start_time = None self.session.save_current_action() self.freeze_arm(0) self._demo_state = DemoState.HAS_RECORDED_DEMO Response.say(RobotSpeech.STOPPED_RECORDING) Response.perform_gaze_action(GazeGoal.NOD) else: Response.say(RobotSpeech.ERROR_NOT_RECORDING) Response.perform_gaze_action(GazeGoal.SHAKE) rospy.loginfo('Current state: ' + self._demo_state) self.busy = False def replay_demonstration(self, dummy=None): '''Starts the execution of the current demonstration''' self.busy = True execution_z_offset = 0.00 if (self._demo_state == DemoState.HAS_RECORDED_DEMO): self.session.save_current_action() action = self.session.get_current_action() self.arms.start_execution(action, execution_z_offset) Response.say(RobotSpeech.STARTED_REPLAY) self._demo_state = DemoState.PLAYING_DEMO else: Response.say(RobotSpeech.ERROR_CANNOT_REPLAY) Response.perform_gaze_action(GazeGoal.SHAKE) rospy.loginfo('Current state: ' + self._demo_state) self.busy = False def _end_replay(self): '''Responses for when the action execution ends''' if (self.arms.status == ExecutionStatus.SUCCEEDED): if self._demo_state == DemoState.PLAYING_DEMO: Response.say(RobotSpeech.ENDED_REPLAY) Response.perform_gaze_action(GazeGoal.NOD) self._demo_state = DemoState.HAS_RECORDED_DEMO else: rospy.loginfo('Non-replay motion successful.') elif (self.arms.status == ExecutionStatus.PREEMPTED): if self._demo_state == DemoState.PLAYING_DEMO: Response.say(RobotSpeech.ERROR_PREEMPTED_REPLAY) Response.perform_gaze_action(GazeGoal.SHAKE) self._demo_state = DemoState.HAS_RECORDED_DEMO else: rospy.loginfo('Non-replay motion pre-empted.') else: if self._demo_state == DemoState.PLAYING_DEMO: Response.say(RobotSpeech.ERROR_REPLAY_NOT_POSSIBLE) Response.perform_gaze_action(GazeGoal.SHAKE) self._demo_state = DemoState.HAS_RECORDED_DEMO else: rospy.loginfo('Non-replay motion has no IK.') self.arms.status = ExecutionStatus.NOT_EXECUTING def default_response(self, responses): '''Default response to speech commands''' self.busy = True speech_resp = responses[0] gaze_resp = responses[1] # Speech response if (speech_resp != None): Response.say(speech_resp) Response.respond_with_sound(speech_resp) # Gaze response if (gaze_resp != None): Response.perform_gaze_action(gaze_resp) rospy.loginfo('Current state: ' + self._demo_state) self.busy = False def relax_arm(self, arm_index): '''Relaxes arm on the indicated side''' if self.arms.set_arm_mode(arm_index, ArmMode.RELEASE): return True else: return False def freeze_arm(self, arm_index): '''Stiffens arm on the indicated side''' if self.arms.set_arm_mode(arm_index, ArmMode.HOLD): return True else: return False def stop_execution(self, dummy=None): '''Stops ongoing execution''' if (self._demo_state == DemoState.PLAYING_DEMO and self.arms.is_executing()): self.arms.stop_execution() Respoonse.say(RobotSpeech.STOPPING_EXECUTION) Response.perform_gaze_action(GazeGoal.NOD) else: Response.perform_gaze_action(GazeGoal.SHAKE) Respoonse.say(RobotSpeech.ERROR_IS_NOT_PLAYING) def _save_arm_to_trajectory(self): '''Saves current arm state into continuous trajectory''' if (Interaction._arm_trajectory != None): states = self._get_arm_states() Interaction._arm_trajectory.rArm.append(states[0]) Interaction._arm_trajectory.lArm.append(states[1]) Interaction._arm_trajectory.timing.append( rospy.Time.now() - Interaction._trajectory_start_time) def _move_to_arm_pose(self, pose_name, arm_index, wait=False): '''Moves the robot's arm to a pre-specified arm pose''' action = self.session.pose_set[pose_name] if action is None: rospy.logwarn('Arm pose does not exist:' + pose_name) else: step = action.get_step(0) if arm_index == 0: self.arms.start_move_to_pose(step.armTarget.rArm, 0) if wait: self._wait_for_arms() self.arms.set_gripper_state(0, step.gripperAction.rGripper, wait=wait) else: self.arms.start_move_to_pose(step.armTarget.lArm, 1) if wait: self._wait_for_arms() self.arms.set_gripper_state(1, step.gripperAction.lGripper, wait=wait) rospy.loginfo('Moved arm ' + str(arm_index) + ' to pose ' + pose_name) def _wait_for_arms(self): rospy.loginfo('Will wait until the arms get in place.') while (self.arms.is_executing()): time.sleep(0.1) rospy.loginfo('Arms are in place.') def _get_arm_states(self): '''Returns the current arms states in the right format''' abs_ee_poses = [Arms.get_ee_state(0), Arms.get_ee_state(1)] joint_poses = [Arms.get_joint_state(0), Arms.get_joint_state(1)] states = [None, None] for arm_index in [0, 1]: nearest_obj = self.world.get_nearest_object( abs_ee_poses[arm_index]) if (nearest_obj == None): states[arm_index] = ArmState(ArmState.ROBOT_BASE, abs_ee_poses[arm_index], joint_poses[arm_index], Object()) else: # Relative rel_ee_pose = World.transform( abs_ee_poses[arm_index], 'base_link', nearest_obj.name) states[arm_index] = ArmState(ArmState.OBJECT, rel_ee_pose, joint_poses[arm_index], nearest_obj) return states def speech_command_cb(self, command): '''Callback for when a speech command is received''' if command.command in self.responses.keys(): rospy.loginfo('\033[32m Calling response for command ' + command.command + '\033[0m') response = self.responses[command.command] if (not self.arms.is_executing() and not self._is_busy): response.respond() else: if command.command == Command.STOP_EXECUTION: response.respond() else: rospy.logwarn('Ignoring speech command during execution or busy: ' + command.command) else: switch_command = 'SWITCH_TO_ACTION' if (switch_command in command.command): action_no = command.command[ len(switch_command):len(command.command)] action_no = int(action_no) if (self.session.n_actions() > 0): self.session.switch_to_action(action_no, self.world.get_frame_list()) response = Response(self.default_response, [RobotSpeech.SWITCH_SKILL + str(action_no), GazeGoal.NOD]) else: response = Response(self.default_response, [RobotSpeech.ERROR_NO_SKILLS, GazeGoal.SHAKE]) response.respond() else: rospy.logwarn('\033[32m This command (' + command.command + ') is unknown. \033[0m') def gui_command_cb(self, command): '''Callback for when a GUI command is received''' if (not self.arms.is_executing()): if (self.session.n_actions() > 0): if (command.command == GuiCommand.SWITCH_TO_ACTION): action_no = command.param self.session.switch_to_action(action_no, self.world.get_frame_list()) response = Response(self.default_response, [RobotSpeech.SWITCH_SKILL + str(action_no), GazeGoal.NOD]) response.respond() elif (command.command == GuiCommand.SELECT_ACTION_STEP): step_no = command.param self.session.select_action_step(step_no) rospy.loginfo('Selected action step ' + str(step_no)) else: rospy.logwarn('\033[32m This command (' + command.command + ') is unknown. \033[0m') else: response = Response(self.default_response, [RobotSpeech.ERROR_NO_SKILLS, GazeGoal.SHAKE]) response.respond() else: rospy.logwarn('Ignoring GUI command during execution: ' + command.command) def update(self): '''General update for the main loop''' self.arms.update() if (self.arms.status != ExecutionStatus.NOT_EXECUTING): if (self.arms.status != ExecutionStatus.EXECUTING): self._end_replay() if (self._demo_state == DemoState.RECORDING_DEMO): self._save_arm_to_trajectory() if (self.session.n_actions() > 0): action = self.session.get_current_action() action.update_viz() time.sleep(0.1)