Beispiel #1
0
 def get_target_pose(self, target_offset):
     pos = self.target_model_pose.position
     rot = self.target_model_pose.orientation
     pos_arr = np.array([[pos.x, pos.y, pos.z - .93]])
     rot_arr = Quaternion(x=rot.x, y=rot.y, z=rot.z,
                          w=rot.w).rotation_matrix
     return np.ndarray.flatten(
         get_ee_points(target_offset, pos_arr, rot_arr).T)
Beispiel #2
0
    def __get_ee_pose(self, joint_angles):
        EE_POINTS = np.array(self._hyperparams['end_effector_points'])

        pose = self.arm.kinematics.forward_position_kinematics(
            joint_angles)  #[x, y, z, ax, ay, az, w]
        translation = np.array(pose[:3]).reshape(1, 3)
        rot = Quaternion(np.roll(pose[3:], 1)).rotation_matrix

        return np.ndarray.flatten(
            get_ee_points(EE_POINTS,
                          np.array(translation).reshape((1, 3)), rot).T)
Beispiel #3
0
 def request_goal_state(self, event=None):
     self.request_mode('goalstate')
     sample = self._agent.get_data(arm=self._actuator_type)
     ja = sample.get(JOINT_ANGLES)
     ee_pos = sample.get(END_EFFECTOR_POSITIONS)
     ee_rot = sample.get(END_EFFECTOR_ROTATIONS)
     ee_tgt = np.ndarray.flatten(
         get_ee_points(self._agent._hyperparams['ee_points'], ee_pos, ee_rot).T
     )
     self._agent._hyperparams['ee_points_tgt'] = [ee_tgt]
     self._target_position = (ja, ee_pos, ee_rot)
     self._agent._target_ja = [ja]
Beispiel #4
0
    rospy.init_node('gps_agent_ur_ros_node')

    # Set starting end effector position using TF
    tf = TransformListener()

    # Sleep for .1 secs to give the node a chance to kick off
    rospy.sleep(1)
    time = tf.getLatestCommonTime(WRIST_3_LINK, BASE_LINK)

    x0[joint_dim:(joint_dim +
                  NUM_EE_POINTS * EE_POINTS.shape[1])] = get_position(
                      tf, WRIST_3_LINK, BASE_LINK, time)

    # Initialize target end effector position
    ee_tgt = np.ndarray.flatten(
        get_ee_points(EE_POINTS, ee_pos_tgt, ee_rot_tgt).T)

    reset_condition = {JOINT_ANGLES: INITIAL_JOINTS, JOINT_VELOCITIES: []}

    x0s.append(x0)
    ee_tgts.append(ee_tgt)
    reset_conditions.append(reset_condition)

if not os.path.exists(common['data_files_dir']):
    os.makedirs(common['data_files_dir'])

agent = {
    'type':
    AgentURROS,
    'dt':
    TIMESTEP,
Beispiel #5
0
for i in xrange(common['conditions']):

    ja_x0, ee_pos_x0, ee_rot_x0 = load_pose_from_npz(
        common['target_filename'], 'trial_arm', str(i), 'initial'
    )
    ja_aux, _, _ = load_pose_from_npz(
        common['target_filename'], 'auxiliary_arm', str(i), 'initial'
    )
    _, ee_pos_tgt, ee_rot_tgt = load_pose_from_npz(
        common['target_filename'], 'trial_arm', str(i), 'target'
    )

    x0 = np.zeros(32)
    x0[:7] = ja_x0
    x0[14:(14+9)] = np.ndarray.flatten(
        get_ee_points(EE_POINTS, ee_pos_x0, ee_rot_x0).T
    )

    ee_tgt = np.ndarray.flatten(
        get_ee_points(EE_POINTS, ee_pos_tgt, ee_rot_tgt).T
    )

    aux_x0 = np.zeros(7)
    aux_x0[:] = ja_aux

    reset_condition = {
        TRIAL_ARM: {
            'mode': JOINT_SPACE,
            'data': x0[0:7],
        },
        AUXILIARY_ARM: {
Beispiel #6
0
for i in xrange(common['conditions']):

    ja_x0, ee_pos_x0, ee_rot_x0 = load_pose_from_npz(
        common['target_filename'], 'trial_arm', str(i), 'initial'
    )
    ja_aux, _, _ = load_pose_from_npz(
        common['target_filename'], 'auxiliary_arm', str(i), 'initial'
    )
    _, ee_pos_tgt, ee_rot_tgt = load_pose_from_npz(
        common['target_filename'], 'trial_arm', str(i), 'target'
    )

    x0 = np.zeros(32)
    x0[:7] = ja_x0
    x0[14:(14+3*EE_POINTS.shape[0])] = np.ndarray.flatten(
        get_ee_points(EE_POINTS, ee_pos_x0, ee_rot_x0).T
    )

    ee_tgt = np.ndarray.flatten(
        get_ee_points(EE_POINTS, ee_pos_tgt, ee_rot_tgt).T
    )

    aux_x0 = np.zeros(7)
    aux_x0[:] = ja_aux

    reset_condition = {
        TRIAL_ARM: {
            'mode': JOINT_SPACE,
            'data': x0[0:7],
        },
        AUXILIARY_ARM: {
Beispiel #7
0
    def compute_reference_trajectory(self, condition, policy):
        super(GearExperiment, self).reset(condition)  # Call the super method
        target = self._hyperparams['targets'][condition]
        best_ref_ee, best_ref_ja = None, None  # HAHAHAHAHA
        best_dist = float("inf")  # Infinity itself
        best_plan = None
        best_T = None  # Set this up
        self.T = self.final_T  # Gotta use the final T or something like that

        while True:
            for attempt in range(
                    self.attempts):  # Let's do asked attempts on plan
                if self.use_saved_traj:  # If there is a plan to read
                    best_plan = self.load_plan(self.plan_filename)
                    best_dist = self.get_dist(best_plan)  # Best plan!
                    break  # We can leave this place
                else:  # Otherwise just create a motion plan
                    plan = self.plan_end_effector(target['position'],
                                                  target['orientation'])

                if plan is not None:
                    cur_dist = self.get_dist(
                        plan)  # Get the distance of cur plan
                    #self.dists.append(cur_dist)
                    # If it beats it we need to use it! Woah!
                    if cur_dist < best_dist:
                        best_dist = cur_dist  # This is the current best distance
                        best_plan = plan  # This is the best plan
                        # Save the display trajectory and stuff
                        self.best_saved_traj[condition] = self.saved_traj

            copy_plan = copy.deepcopy(best_plan)  # Copy the plan
            self.reverse_plan(copy_plan)  # Reverse the plan
            self.reset_plans[
                condition] = copy_plan  # This is the plan to get out of here!

            # Only continue on with these calculations if necessary
            plan_joints = [
                np.array(point.positions)
                for point in best_plan.joint_trajectory.points
            ]
            best_ref_ja = interpolate(plan_joints, self.T_interpolation)
            best_ref_ja.extend([best_ref_ja[-1]] *
                               (self.T - self.T_interpolation))
            ref_poses = self.forward_kinematics(best_ref_ja, 'torso_lift_link')
            best_ref_ee = []
            ee_offsets = self._hyperparams['end_effector_points']

            for pose in ref_poses:
                position, orientation = np.array([pose[:3]]), pose[3:]
                rotation_mat = quaternion_matrix(orientation)[:3, :3]
                points = np.ndarray.flatten(
                    get_ee_points(ee_offsets, position, rotation_mat).T)
                best_ref_ee.append(points)

            # Publish it so it is the last thing you see before question
            self.publishDisplayTrajectory(self.best_saved_traj[condition])

            if not self.require_approval or yesno(
                    'Does this trajectory look ok?'):
                print("this is the distance of the best one: " +
                      str(best_dist))
                break

        if self.use_saved_traj:  # If we already have one
            with open(self.pickled_traj_filename,
                      'r') as f:  # Read from the pickled place
                self.best_saved_traj[condition] = pickle.load(
                    f)  # Load that pickled DisplayTrajectory!

        self.publishDisplayTrajectory(
            self.best_saved_traj[condition])  # Publish it so it is the last

        #with open('the_distances.txt', 'w') as f:
        #    f.write('These are the distances: \n')
        #    f.write(str(self.dists))
        #    f.write('\nThis is the mean distance: ' + str(np.mean(np.array(self.dists))) + '\n')
        #    f.write('Success rate: ' + str(len(self.dists)) + ' / ' + str(self.attempts) + '\n')

        self.save_plan(best_plan)  # Save the trajectory
        if self.varying_T:  # If we are going for the varying T plan
            start_ind = self.final_T  # Start off with the full timestep length
        if self.chosen_parts is None:  # If there isn't a specific plan segment chunk
            # For now just take one / segments of the references to initialize
            start_ind = int(1.0 / self.segments * len(best_ref_ee))
        else:  # If there is, just use it
            #closest_T = find_closest_T(best_ref_ja, best_ref_ee, B4_PEG_JA, B4_PEG_EE)
            closest_T = 139
            self.chosen_parts[
                0] = closest_T - 10  # Just a little bit of buffer lmao
            start_ind = self.chosen_parts[
                0]  # Set the starting ind to what we have chosen
            print("This is the closest found T: " + str(closest_T))

# Provide copies of the reference ee and ja
        self.full_ref_ee[condition] = list(np.copy(np.array(best_ref_ee)))
        self.full_ref_ja[condition] = list(np.copy(np.array(best_ref_ja)))
        self.T = start_ind  # How many there is to use

        # If we are just taking part of the trajectory
        if start_ind != self.final_T:  # Add padding
            new_ref_ja, new_ref_ee = best_ref_ja[:
                                                 start_ind], best_ref_ee[:
                                                                         start_ind]
            # This is adding more timesteps to the last step in the segment
            new_ref_ja.extend([best_ref_ja[start_ind - 1]] * self.padding)
            new_ref_ee.extend([best_ref_ee[start_ind - 1]] * self.padding)
            self.T += self.padding  # Put in the padding lmao
        else:  # Otherwise, the goal already has padding
            new_ref_ja, new_ref_ee = best_ref_ja, best_ref_ee
# Initialize using the beginning part of the trajectory
        policy.__init__(*init_pd_ref(self._hyperparams['init_traj_distr'],
                                     new_ref_ja, new_ref_ee))

        with open('pickled_robot_traj_cond' + str(condition), 'w') as f:
            pickle.dump(self.best_saved_traj[condition], f)

        ref_offsets = np.array(best_ref_ee) - best_ref_ee[-1]

        traj_info = {
            'ja': np.array(best_ref_ja),
            'ee': np.array(best_ref_ee),
            'offsets': ref_offsets,
            'flattened': ref_offsets.flatten()
        }
        self.cur_T[condition] = self.T  # Update this as well
        self.trajectories[condition] = traj_info
        return traj_info
Beispiel #8
0
ee_rot_tgts = np.array([[[-0.9987, -0.0500, 0.0094],\
    [-0.0498, 0.9986, 0.0150],\
    [-0.0101, 0.0145, -0.9998]],\
    [[-0.9987, -0.0500, 0.0094],\
    [-0.0498, 0.9986, 0.0150],\
    [-0.0101, 0.0145, -0.9998]]])

tgt_state = np.loadtxt(EXP_DIR + 'target_state', delimiter=',')

# TODO(chelsea/zoe) : Move this code to a utility function
# Set up each condition.
for i in xrange(common['conditions']):
    x0 = np.zeros(24)
    x0[:3] = ja_x0s[0]
    x0[6:(6+3*EE_POINTS.shape[0])] = np.ndarray.flatten(
        get_ee_points(EE_POINTS, ee_pos_x0s[0], ee_rot_x0s[0]).T
    )

    ee_tgt = np.ndarray.flatten(
        get_ee_points(EE_POINTS, ee_pos_tgts[0], ee_rot_tgts[0]).T
    )

    aux_x0 = np.zeros(3)
    aux_x0[:] = ja_x0s[0]

    reset_condition = {
        TRIAL_ARM: {
            'mode': JOINT_SPACE,
            'data': x0[0:3],
        },
        AUXILIARY_ARM: {
Beispiel #9
0
    def compute_reference_trajectory(self, condition, policy):
        self.reset(condition)
        target = self._hyperparams['targets'][condition]
        best_ref_ee = None  # HAHAHAHAHA to store the best one
        best_ref_ja = None  # HAHAHAHAHA to store the best one
        best_dist = float("inf")  # Infinity itself
        best_plan = None
        plans_made = 0  # Count how many plans were actually created

        while True:

            for attempt in range(self.attempts):  # Make this many attempts
                plan = self.plan_end_effector(target['position'],
                                              target['orientation'])
                if plan is None:  # Leave this iteration lmao
                    continue
                plans_made += 1  # Increment plans made
                cur_dist = self.get_dist(plan)  # Get the current distance

                # If it beats it we need to use it! Woah!
                if cur_dist < best_dist:
                    plan_joints = [
                        np.array(point.positions)
                        for point in plan.joint_trajectory.points
                    ]
                    ref_ja = interpolate(plan_joints, self.T_interpolation)
                    ref_ja.extend([ref_ja[-1]] *
                                  (self.T - self.T_interpolation))
                    ref_poses = self.forward_kinematics(
                        ref_ja, 'torso_lift_link')
                    ref_ee = []
                    ee_offsets = self._hyperparams['end_effector_points']
                    for pose in ref_poses:
                        position, orientation = np.array([pose[:3]]), pose[3:]
                        rotation_mat = quaternion_matrix(orientation)[:3, :3]
                        points = np.ndarray.flatten(
                            get_ee_points(ee_offsets, position,
                                          rotation_mat).T)
                        ref_ee.append(points)
                    best_dist = cur_dist
                    best_ref_ee, best_ref_ja = ref_ee, ref_ja
                    best_plan = plan
                    self.best_saved_traj = self.saved_traj  # Save the trajectory and stuff

            # Plot the very best plan that we found!
            plot_trajectories([best_ref_ee])

            if not self.require_approval or yesno(
                    'Does this trajectory look ok?'):
                print("Of all the " + str(plans_made) + " plans made, ")
                print("this is the distance of the best one: " +
                      str(best_dist))
                break

        if self.use_saved_traj:  # If we already have one
            with open(self.pickled_traj_filename,
                      'r') as f:  # Read from the pickled place
                self.best_saved_traj = pickle.load(
                    f)  # Load that pickled DisplayTrajectory!

        self.publishDisplayTrajectory(
            self.best_saved_traj)  # Publish it so it is the last

        self.trajectories[condition] = best_ref_ee
        policy.__init__(*init_pd_ref(self._hyperparams['init_traj_distr'],
                                     best_ref_ja, best_ref_ee))

        with open('pickled_robot_traj', 'w') as f:
            pickle.dump(self.best_saved_traj, f)
        ref_offsets = np.array(best_ref_ee) - best_ref_ee[-1]

        return {
            'ja': np.array(best_ref_ja),
            'ee': np.array(best_ref_ee),
            'offsets': ref_offsets,
            'flattened': ref_offsets.flatten()
        }
Beispiel #10
0
    )
    ja_aux, _, _ = load_pose_from_npz(
        common['target_filename'], 'auxiliary_arm', str(i), 'initial'
    )
    _, ee_pos_tgt, ee_rot_tgt = load_pose_from_npz(
        common['target_filename'], 'trial_arm', str(i), 'target'
    )
    '''
    ee_rot_x0 = np.array([[0.6086, -0.72184, 0.3293052],
                          [-0.546578, 0.37591, 0.92504],
                          [-0.791526, -0.58105, 0.18935]])

    x0 = np.zeros((SENSOR_DIMS[JOINT_ANGLES] * 2 + 3 * EE_POINTS.shape[0] * 2))
    x0[:5] = [1.5596, 0.12270, -1.22824, -0.5233, -0.0]
    x0[10:(10 + 3 * EE_POINTS.shape[0])] = np.ndarray.flatten(
        get_ee_points(EE_POINTS, np.array([0.125645, -0.1788422, -0.36095773]),
                      ee_rot_x0).T)

    ee_ja = [0.256174, 0.22396, -0.854427, -1.030, -0.86368]
    ee_rot_tgt = np.array([[0.6867, 0.5401067, 0.48645617],
                           [-0.4588635, -0.19689, 0.866416],
                           [0.56373678, -0.58105, 0.18935]])
    ee_tgt = np.ndarray.flatten(
        get_ee_points(EE_POINTS, np.array([0.31013, -0.251094, 0.07471]),
                      ee_rot_tgt).T)

    reset_condition = {
        TRIAL_ARM: {
            'mode': JOINT_SPACE,
            'data': x0[0:5],
        },
    }
Beispiel #11
0
    ee_pos_tgt = np.array(eeposes[i]).reshape(1, 3)
    ee_rot_quat = Quaternion(np.roll(
        eerot[i], 1))  # shift one position so that we have [w,x,y,z]
    ee_rot_tgt = ee_rot_quat.rotation_matrix.reshape((3, 3))

    state_space = sum(SENSOR_DIMS.values()) - SENSOR_DIMS[ACTION]
    print "state_space", state_space

    # Initialized to start position and inital velocities are 0
    x0 = np.zeros(state_space)
    x0[:DOFs] = init_q[i]

    # Initialize target end effector position
    ee_tgt = np.ndarray.flatten(
        get_ee_points(EE_POINTS, ee_pos_tgt, ee_rot_tgt).T).reshape((1, -1))

    reset_condition = {
        JOINT_ANGLES: init_q[i],
    }

    x0s.append(x0)
    ee_tgts.append(ee_tgt)
    reset_conditions.append(reset_condition)

if not os.path.exists(common['data_files_dir']):
    os.makedirs(common['data_files_dir'])

agent = {
    'type':
    AgentUR,