Ejemplo n.º 1
0
 def generate_sigma_points(self, mean, covariance):
     sigmas = []
     sigmas.append(mean)
     if not check_symmetry(covariance):
         pdb.set_trace()
     temp = pos_sem_def_sqrt(covariance)
     if any(isnan(temp)):
         rospy.logerr("Sqrt matrix contained a NaN. Matrix was %s",
                 covariance)
     temp = temp * sqrt(self.n + self.kf_lambda)
     for i in range(0,self.n):
         #Must use columns in order to get the write thing out of the
         #Cholesky decomposition
         #(http://en.wikipedia.org/wiki/Cholesky_decomposition#Kalman_filters)
         column = temp[:,i].reshape((self.n,1))
         #Build the noise quaternion
         noise_vec = column[0:3,0]
         noise_quat = SF9DOF_UKF.build_noise_quat(noise_vec)
         original_quat = SF9DOF_UKF.recover_quat(mean)
         #Do the additive sample
         perturbed_quat = tf_math.quaternion_multiply(noise_quat,original_quat)
         #perturbed_quat = tf_math.unit_vector(perturbed_quat)
         new_mean = mean + column
         new_mean[0:3,0] = perturbed_quat[0:3,0]
         sigmas.append(new_mean)
         #Do the subtractive sample
         conj_noise_quat = tf_math.quaternion_conjugate(noise_quat)
         perturbed_quat = tf_math.quaternion_multiply(conj_noise_quat, original_quat)
         #perturbed_quat = tf_math.unit_vector(perturbed_quat)
         new_mean = mean - column
         new_mean[0:3,0] = perturbed_quat[0:3,0]
         sigmas.append(new_mean)
     return sigmas
    def _motion_regression_6d(self, pnts, qt, t):
        """
        Compute translational and rotational velocities and accelerations in
        the inertial frame at the target time t.

        [1] Sittel, Florian, Joerg Mueller, and Wolfram Burgard. Computing
            velocities and accelerations from a pose time sequence in
            three-dimensional space. Technical Report 272, University of
            Freiburg, Department of Computer Science, 2013.
        """

        lin_vel = np.zeros(3)
        lin_acc = np.zeros(3)

        q_d = np.zeros(4)
        q_dd = np.zeros(4)

        for i in range(3):
            v, a = self._motion_regression_1d(
                [(pnt['pos'][i], pnt['t']) for pnt in pnts], t)
            lin_vel[i] = v
            lin_acc[i] = a

        for i in range(4):
            v, a = self._motion_regression_1d(
                [(pnt['rot'][i], pnt['t']) for pnt in pnts], t)
            q_d[i] = v
            q_dd[i] = a

        # Keeping all velocities and accelerations in the inertial frame
        ang_vel = 2 * quaternion_multiply(q_d, quaternion_conjugate(qt))
        ang_acc = 2 * quaternion_multiply(q_dd, quaternion_conjugate(qt))

        return np.hstack((lin_vel, ang_vel[0:3])), np.hstack((lin_acc, ang_acc[0:3]))
Ejemplo n.º 3
0
 def generate_sigma_points(self, mean, covariance):
     sigmas = []
     sigmas.append(mean)
     temp = numpy.linalg.cholesky(covariance)
     temp = temp * sqrt(self.n + self.kf_lambda)
     for i in range(0,self.n):
         #Must use columns in order to get the write thing out of the
         #Cholesky decomposition
         #(http://en.wikipedia.org/wiki/Cholesky_decomposition#Kalman_filters)
         column = temp[:,i].reshape((self.n,1))
         #Build the noise quaternion
         noise_vec = column[0:3,0]
         noise_quat = SF9DOF_UKF.build_noise_quat(noise_vec)
         original_quat = SF9DOF_UKF.recover_quat(mean)
         #Do the additive sample
         perturbed_quat = tf_math.quaternion_multiply(noise_quat,original_quat)
         #perturbed_quat = tf_math.unit_vector(perturbed_quat)
         new_mean = mean + column
         new_mean[0:3,0] = perturbed_quat[0:3,0]
         sigmas.append(new_mean)
         #Do the subtractive sample
         conj_noise_quat = tf_math.quaternion_conjugate(noise_quat)
         perturbed_quat = tf_math.quaternion_multiply(conj_noise_quat, original_quat)
         #perturbed_quat = tf_math.unit_vector(perturbed_quat)
         new_mean = mean - column
         new_mean[0:3,0] = perturbed_quat[0:3,0]
         sigmas.append(new_mean)
     return sigmas
 def publish_relative_frames(self, data):
   tmp_dict = {}
   for segment in data.segments:
     if(segment.is_quaternion):
       tmp_dict[segment.id] = (segment.position,segment.quat)
     else:	  
       tmp_dict[segment.id] = (segment.position,segment.euler)
   
   for idx in tmp_dict.keys():
     p_idx = xsens_client.get_segment_parent_id(idx)
     child_data = tmp_dict[idx]
     if(p_idx==-1):
       continue
     elif(p_idx==0):
       new_quat = tft.quaternion_multiply(child_data[1],tft.quaternion_inverse((1,1,1,1)))#if(segment.is_quaternion): TODO Handle Euler
       #self.sendTransform(child_data[0],
       self.sendTransform(child_data[0],
           child_data[1],
           #(1.0,0,0,0),#FIXME
           rospy.Time.now(),
           xsens_client.get_segment_name(idx),
           self.ref_frame)
     else:
       parent_data = tmp_dict[p_idx]
       parent_transform = tf.Transform(parent_data[1],parent_data[0])
       child_transform = tf.Transform(child_data[1],child_data[0])
       new_trans = parent_transform.inversetimes(child_transform)
       new_quat = tft.quaternion_multiply(child_data[1],tft.quaternion_inverse(parent_data[1]))
       tmp_pos = (child_data[0][0]-parent_data[0][0],child_data[0][1]-parent_data[0][1] ,child_data[0][2]-parent_data[0][2] )
       #tmp_pos = (child_data[0][0] - parent_data[0][0],child_data[0][1] - parent_data[0][1],child_data[0][2] - parent_data[0][2])
       #self.sendTransform(qv_mult(parent_data[1],tmp_pos),
       #self.sendTransform(tmp_pos,
   	#		new_quat,
         #child_data[1],
       self.sendTransform(new_trans.getOrigin(), new_trans.getRotation(),rospy.Time.now(),xsens_client.get_segment_name(idx),xsens_client.get_segment_name(p_idx))
Ejemplo n.º 5
0
    def align_poses(self, ps1, ps2):

        self.aul.update_frame(ps1)
        ps2.header.stamp = rospy.Time.now()
        self.tfl.waitForTransform(ps2.header.frame_id, 'lh_utility_frame',
                                  rospy.Time.now(), rospy.Duration(3.0))
        ps2_in_ps1 = self.tfl.transformPose('lh_utility_frame', ps2)

        ang = math.atan2(-ps2_in_ps1.pose.position.z,
                         -ps2_in_ps1.pose.position.y) + (math.pi / 2)
        q_st_rot = transformations.quaternion_about_axis(ang, (1, 0, 0))
        q_st_new = transformations.quaternion_multiply([
            ps1.pose.orientation.x, ps1.pose.orientation.y,
            ps1.pose.orientation.z, ps1.pose.orientation.w
        ], q_st_rot)
        ps1.pose.orientation = Quaternion(*q_st_new)

        self.aul.update_frame(ps2)
        ps1.header.stamp = rospy.Time.now()
        self.tfl.waitForTransform(ps1.header.frame_id, 'lh_utility_frame',
                                  rospy.Time.now(), rospy.Duration(3.0))
        ps1_in_ps2 = self.tfl.transformPose('lh_utility_frame', ps1)
        ang = math.atan2(ps1_in_ps2.pose.position.z,
                         ps1_in_ps2.pose.position.y) + (math.pi / 2)

        q_st_rot = transformations.quaternion_about_axis(ang, (1, 0, 0))
        q_st_new = transformations.quaternion_multiply([
            ps2.pose.orientation.x, ps2.pose.orientation.y,
            ps2.pose.orientation.z, ps2.pose.orientation.w
        ], q_st_rot)
        ps2.pose.orientation = Quaternion(*q_st_new)
        return ps1, ps2
Ejemplo n.º 6
0
def rotate(v, q):
    """
    Rotate vector v according to quaternion q
    """
    q2 = np.r_[v[0:3], 0.0]
    return transformations.quaternion_multiply(
        transformations.quaternion_multiply(q, q2),
        transformations.quaternion_conjugate(q))[0:3]
Ejemplo n.º 7
0
        def process_touch_pose(ud):
            ######################################################################### 
            # tranformation logic for manipulation
            # put touch pose in torso frame
            frame_B_touch = util.pose_msg_to_mat(ud.touch_click_pose)
            torso_B_frame = self.get_transform("torso_lift_link", 
                                               ud.touch_click_pose.header.frame_id)
            if torso_B_frame is None:
                return 'tf_failure'
            torso_B_touch_bad = torso_B_frame * frame_B_touch

            # rotate pixel23d the right way
            t_pos, t_quat = util.pose_mat_to_pq(torso_B_touch_bad)
            # rotate so x axis pointing out
            quat_flip_rot = tf_trans.quaternion_from_euler(0.0, np.pi/2.0, 0.0)
            quat_flipped = tf_trans.quaternion_multiply(t_quat, quat_flip_rot)
            # rotate around x axis so the y axis is flat
            mat_flipped = tf_trans.quaternion_matrix(quat_flipped)
            rot_angle = np.arctan(-mat_flipped[2,1] / mat_flipped[2,2])
            quat_ortho_rot = tf_trans.quaternion_from_euler(rot_angle + np.pi, 0.0, 0.0)
            quat_flipped_ortho = tf_trans.quaternion_multiply(quat_flipped, quat_ortho_rot)

            torso_B_touch = util.pose_pq_to_mat(t_pos, quat_flipped_ortho)

            # offset the touch location by the approach tranform
            appr_B_tool = self.get_transform(self.tool_approach_frame, self.tool_frame) 
            if appr_B_tool is None:
                return 'tf_failure'
            torso_B_touch_appr = torso_B_touch * appr_B_tool

            # project the approach back into the wrist
            appr_B_wrist = self.get_transform(self.tool_frame, 
                                              self.arm + "_wrist_roll_link") 
            if appr_B_wrist is None:
                return 'tf_failure'

            torso_B_wrist = torso_B_touch_appr * appr_B_wrist
            ######################################################################### 
            # create PoseStamped
            appr_wrist_ps = util.pose_mat_to_stamped_msg('torso_lift_link',
                                                         torso_B_wrist)
            appr_tool_ps = util.pose_mat_to_stamped_msg('torso_lift_link', 
                                                        torso_B_touch_appr)
            touch_tool_ps = util.pose_mat_to_stamped_msg('torso_lift_link', 
                                                         torso_B_touch)

            # visualization
            self.wrist_pub.publish(appr_wrist_ps)
            self.appr_pub.publish(appr_tool_ps)
            self.touch_pub.publish(touch_tool_ps)

            # set return values
            ud.appr_wrist_mat = torso_B_wrist
            ud.appr_tool_ps = appr_tool_ps
            ud.touch_tool_ps = touch_tool_ps

            
            return 'succeeded'
Ejemplo n.º 8
0
def calc_offset_angle(current):
    """Calculates the offset angle from the x axis positiion"""

    x_axis = [1, 0, 0, 0]
    a = quaternion_multiply(x_axis, quaternion_inverse(current))
    rotation = quaternion_multiply(quaternion_multiply(a, x_axis), quaternion_inverse(a))
    angle = atan2(rotation[1], rotation[0])

    return angle
Ejemplo n.º 9
0
def rotate_vect_by_quat(v, q):
    '''
    Rotate a vector by a quaterion.
    v' = q*vq

    q should be [x,y,z,w] (standard ros convention)
    '''
    cq = np.array([-q[0], -q[1], -q[2], q[3]])
    cq_v = trans.quaternion_multiply(cq, v)
    v = trans.quaternion_multiply(cq_v, q)
    v[1:] *= -1  # Only seemed to work when I flipped this around, is there a problem with the math here?
    return np.array(v)[:3]
Ejemplo n.º 10
0
    def increment_reference(self):
        '''
        Steps the model reference (trajectory to track) by one self.timestep.

        '''
        # Frame management quantities
        R_ref = trns.quaternion_matrix(self.q_ref)[:3, :3]
        y_ref = trns.euler_from_quaternion(self.q_ref)[2]

        # Convert body PD gains to world frame
        kp = R_ref.dot(self.kp_body).dot(R_ref.T)
        kd = R_ref.dot(self.kd_body).dot(R_ref.T)

        # Compute error components (desired - reference), using "smartyaw"
        p_err = self.p_des - self.p_ref
        v_err = -self.v_ref
        w_err = -self.w_ref
        if npl.norm(p_err) <= self.heading_threshold:
            q_err = trns.quaternion_multiply(self.q_des, trns.quaternion_inverse(self.q_ref))
        else:
            q_direct = trns.quaternion_from_euler(0, 0, np.angle(p_err[0] + (1j * p_err[1])))
            q_err = trns.quaternion_multiply(q_direct, trns.quaternion_inverse(self.q_ref))
        y_err = trns.euler_from_quaternion(q_err)[2]

        # Combine error components into error vectors
        err = np.concatenate((p_err, [y_err]))
        errdot = np.concatenate((v_err, [w_err]))
        wrench = (kp.dot(err)) + (kd.dot(errdot))

        # Compute world frame thruster matrix (B) from thruster geometry, and then map wrench to thrusts
        B = np.concatenate((R_ref.dot(self.thruster_directions.T), R_ref.dot(self.lever_arms.T)))
        B_3dof = np.concatenate((B[:2, :], [B[5, :]]))
        command = self.thruster_mapper(wrench, B_3dof)
        wrench_saturated = B.dot(command)

        # Use model drag to find drag force on virtual boat
        twist_body = R_ref.T.dot(np.concatenate((self.v_ref, [self.w_ref])))
        D_body = np.zeros_like(twist_body)
        for i, v in enumerate(twist_body):
            if v >= 0:
                D_body[i] = self.D_body_positive[i]
            else:
                D_body[i] = self.D_body_negative[i]
        drag_ref = R_ref.dot(D_body * twist_body * abs(twist_body))

        # Step forward the dynamics of the virtual boat
        self.a_ref = (wrench_saturated[:2] - drag_ref[:2]) / self.mass_ref
        self.aa_ref = (wrench_saturated[5] - drag_ref[2]) / self.inertia_ref
        self.p_ref = self.p_ref + (self.v_ref * self.timestep)
        self.q_ref = trns.quaternion_from_euler(0, 0, y_ref + (self.w_ref * self.timestep))
        self.v_ref = self.v_ref + (self.a_ref * self.timestep)
        self.w_ref = self.w_ref + (self.aa_ref * self.timestep)
    def _getPokePose(self, pos, orient):

        print orient
        if numpy.dot(tft.quaternion_matrix(orient)[0:3,2], (0,0,1)) < 0:
            print "fliiping arm pose"
            orient = tft.quaternion_multiply(orient, (1,0,0,0))
        tilt_adjust = tft.quaternion_about_axis(self.tilt_angle[self.arm_using], (0,1,0))
        new_orient = tft.quaternion_multiply(orient, tilt_adjust)
        pos[2] += self.above #push above cm above table 
#DEBUG
        #print new_orient

        return (pos, orient, new_orient)
Ejemplo n.º 12
0
def euler_to_quat_deg(roll, pitch, yaw):
    roll = roll * (math.pi / 180.0)
    pitch = pitch * (math.pi / 180.0)
    yaw = yaw * (math.pi / 180.0)
    xaxis = (1, 0, 0)
    yaxis = (0, 1, 0)
    zaxis = (0, 0, 1)
    qx = transformations.quaternion_about_axis(roll, xaxis)
    qy = transformations.quaternion_about_axis(pitch, yaxis)
    qz = transformations.quaternion_about_axis(yaw, zaxis)
    q = transformations.quaternion_multiply(qx, qy)
    q = transformations.quaternion_multiply(q, qz)
    print q
 def position_control(user_data, self):
   if self.inside_workspace(self.master_pos):
     self.command_pos = self.slave_synch_pos + self.master_pos / self.position_ratio
     # TODO: Rotations are driving me crazy! 
     relative_rot = tr.quaternion_multiply(self.master_synch_rot, tr.quaternion_inverse(self.master_rot))
     self.command_rot = tr.quaternion_multiply(self.slave_synch_rot, relative_rot)
     #~ self.command_rot = np.array(self.master_rot)
     return 'stay'
   else:
     self.command_pos = np.array(self.slave_pos)
     self.command_rot = np.array(self.slave_rot)
     self.vib_start_time = rospy.get_time()
     self.loginfo('State machine transitioning: POSITION_CONTROL:leave-->VIBRATORY_PHASE')
     return 'leave'
Ejemplo n.º 14
0
    def listen_imu_quaternion(self, imu):
        """Doesn't work b/c desired yaw is too far from possible yaw"""
        current_orientation = np.array([imu.orientation.x,
                                        imu.orientation.y,
                                        imu.orientation.z,
                                        imu.orientation.w])
        # current_angular_speed = np.array([imu.angular_velocity.x,
        #                                   imu.angular_velocity.y,
        #                                   imu.angular_velocity.z])
        diff_orientation = transformations.quaternion_multiply(
            self.desired_orientation,
            # Unit quaternion => inverse = conjugate / norm = congugate
            transformations.quaternion_conjugate(current_orientation))

        assert np.allclose(
            transformations.quaternion_multiply(diff_orientation,
                                                current_orientation),
            self.desired_orientation)

        # diff_r, diff_p, diff_y = transformations.euler_from_quaternion(
        #     diff_orientation)
        # rospy.loginfo("Orientation error (quaternion): {}".format(diff_orientation))
        # rospy.loginfo(
        #     "Orientation error (deg): {}".format(
        #         np.rad2deg([diff_r, diff_p, diff_y]))
        # )
        # out = self.pitch_controller(diff_p)

        corrected_orientation = transformations.quaternion_multiply(
            quaternion_power(diff_orientation, 1.5),
            self.desired_orientation)
        rospy.loginfo(
            "Desired orientation (deg): {}".format(
                np.rad2deg(transformations.euler_from_quaternion(
                    self.desired_orientation))))
        rospy.loginfo(
            "Corrected orientation (deg): {}".format(
                np.rad2deg(transformations.euler_from_quaternion(
                    corrected_orientation))))

        rospy.loginfo("Desired position: {}".format(
            self.desired_position))

        setpoint_pose = Pose(self.desired_position, corrected_orientation)

        servo_angles = self.platform.ik(setpoint_pose)
        rospy.logdebug(
            "Servo angles (deg): {}".format(np.rad2deg(servo_angles)))
        self.publish_servo_angles(servo_angles)
 def rate_control(user_data, self):
   if not self.inside_workspace(self.master_pos):
     # Send the force feedback to the master
     self.force_feedback = (self.k_rate * self.master_pos + self.b_rate * self.master_vel) * -1.0
     # Send the rate command to the slave
     distance = sqrt(np.sum((self.master_pos - self.rate_pivot) ** 2)) / self.position_ratio
     self.command_pos += (self.rate_gain * distance * self.normalize_vector(self.master_pos)) / self.position_ratio
     relative_rot = tr.quaternion_multiply(self.master_synch_rot, tr.quaternion_inverse(self.master_rot))
     self.command_rot = tr.quaternion_multiply(self.slave_synch_rot, relative_rot)
     return 'stay'
   else:
     self.command_pos = np.array(self.slave_pos)
     self.command_rot = np.array(self.slave_rot)
     self.force_feedback = np.zeros(3)
     self.loginfo('State machine transitioning: RATE_CONTROL:leave-->GO_TO_CENTER')
     return 'leave'
Ejemplo n.º 16
0
    def generate_quat(self, s):
        s = max(0, s)
        s = min(s, 1)

        last_s = s - self._s_step
        if last_s == 0:
            last_s = 0

        if s == 0:
            self._last_rot = deepcopy(self._init_rot)
            return self._init_rot

        this_pos = self.generate_pos(s)
        last_pos = self.generate_pos(last_s)
        dx = this_pos[0] - last_pos[0]
        dy = this_pos[1] - last_pos[1]
        dz = this_pos[2] - last_pos[2]

        rotq = self._compute_rot_quat(dx, dy, dz)
        self._last_rot = deepcopy(rotq)

        # Calculating the step for the heading offset
        q_step = quaternion_about_axis(
            self._interp_fcns['heading'](s),
            np.array([0, 0, 1]))
        # Adding the heading offset to the rotation quaternion
        rotq = quaternion_multiply(rotq, q_step)
        return rotq
Ejemplo n.º 17
0
    def publish_vector(self, m_id):
        new_m = copy.deepcopy(self.m)
        new_m.id = m_id
        u, v, p = 1, np.random.uniform(0, np.pi), np.random.uniform(0, 2 * np.pi)
        pos = self.sspace.spheroidal_to_cart((u, v, p))
        new_m.points.append(Point(*pos))

        df_du = self.sspace.partial_u((u, v, p))
        df_du *= 0.1 / np.linalg.norm(df_du)
        new_m.points.append(Point(*(pos+df_du)))
        
        self.ma.markers.append(new_m)
        self.vis_pub.publish(self.ma)

        rot_gripper = np.pi/4.

        nx, ny, nz = df_du.T.A[0] / np.linalg.norm(df_du)
        j = np.sqrt(1./(1.+ny*ny/(nz*nz)))
        k = -ny*j/nz
        norm_rot = np.mat([[-nx,  ny*k - nz*j,  0],      
                           [-ny,  -nx*k,        j],      
                           [-nz,  nx*j,         k]])
        _, norm_quat = PoseConverter.to_pos_quat(np.mat([0, 0, 0]).T, norm_rot)
        rot_angle = np.arctan(-norm_rot[2,1] / norm_rot[2,2])
        quat_ortho_rot = tf_trans.quaternion_from_euler(rot_angle + np.pi + rot_gripper, 0.0, 0.0)
        norm_quat_ortho = tf_trans.quaternion_multiply(norm_quat, quat_ortho_rot)
        ps_msg = PoseConverter.to_pose_stamped_msg("torso_lift_link", pos, norm_quat_ortho)
        self.pose_pub.publish(ps_msg)
Ejemplo n.º 18
0
def draw_axes(pub, id, ns, pose_stamped, scale=(0.05, 0.1, 0.1), text=""):
    ps = PointStamped()
    ps.header = pose_stamped.header
    ps.point = pose_stamped.pose.position
    q_x = quaternion_to_array(pose_stamped.pose.orientation)
    q_y = quaternion_multiply(q_x, quaternion_about_axis(pi/2, (0, 1, 0)))
    q_z = quaternion_multiply(q_x, quaternion_about_axis(pi/2, (0, 0, 1)))
    if id > 999:
        rospy.logerr('Currently can\'t visualize markers with id > 999.')
        return
    place_arrow(ps, pub, id, ns, (1, 0, 0),\
            scale, array_to_quaternion(q_x))
    place_arrow(ps, pub, id + 1000, ns, (0, 1, 0),\
            scale, array_to_quaternion(q_y))
    place_arrow(ps, pub, id + 2000, ns, (0, 0, 1),\
            scale, array_to_quaternion(q_z), text=text)
Ejemplo n.º 19
0
def deltaPose(currPose, desPose, posFrame=None, rotFrame=None):
    """
    Returns pose0 - pose1
    """

    currPose, desPose = tfx.pose(currPose), tfx.pose(desPose)
    currPoseFrame, desPoseFrame = currPose.frame, desPose.frame
    
    currPos, desPos = currPose.position, desPose.position
    currRot, desRot = currPose.orientation, desPose.orientation

    if posFrame is not None and currPoseFrame is not None:
        tf_currPos_to_posFrame = tfx.lookupTransform(posFrame, currPoseFrame, wait=10)
        currPos = tf_currPos_to_posFrame * currPos

        tf_desPos_to_posFrame = tfx.lookupTransform(posFrame, desPoseFrame, wait=10)
        desPos = tf_desPos_to_posFrame * desPos

    if rotFrame is not None and currPoseFrame is not None:
        tf_currRot_to_rotFrame = tfx.lookupTransform(rotFrame, currPoseFrame, wait=10)
        currRot = tf_currRot_to_rotFrame * currRot

        tf_desRot_to_rotFrame = tfx.lookupTransform(rotFrame, desPoseFrame, wait=10)
        desRot = tf_desRot_to_rotFrame * desRot

    deltaPosition = desPos.array - currPos.array
    
    currQuat, desQuat = tfx.tb_angles(currRot).quaternion, tfx.tb_angles(desRot).quaternion
    deltaQuat = tft.quaternion_multiply(tft.quaternion_inverse(currQuat), desQuat)

    deltaPose = tfx.pose(deltaPosition, deltaQuat)

    return deltaPose
Ejemplo n.º 20
0
 def pixel_2_3d_callback(self, msg):
     msg.header.stamp = rospy.Time.now()
     self.tf_listener.waitForTransform("torso_lift_link", msg.header.frame_id, msg.header.stamp, rospy.Duration(4))
     msg = self.tf_listener.transformPose("torso_lift_link", msg)
     x, y, z = (msg.pose.position.x, msg.pose.position.y, msg.pose.position.z)
     self.pix1_pub.publish(msg)
     quat = [msg.pose.orientation.x, msg.pose.orientation.y, 
             msg.pose.orientation.z, msg.pose.orientation.w]
     quat_flip_rot = tf_trans.quaternion_from_euler(0.0, np.pi/2.0, 0.0)
     quat_flipped = tf_trans.quaternion_multiply(quat, quat_flip_rot)
     mat_flipped = tf_trans.quaternion_matrix(quat_flipped)
     rot_angle = np.arctan(-mat_flipped[2,1] / mat_flipped[2,2])
     quat_ortho_rot = tf_trans.quaternion_from_euler(rot_angle + np.pi, 0.0, 0.0)
     quat_flipped_ortho = tf_trans.quaternion_multiply(quat_flipped, quat_ortho_rot)
     msg.pose.orientation = Quaternion(*quat_flipped_ortho)
     touch_face_at_pose(msg)
Ejemplo n.º 21
0
 def correct_mean(est_mean, correction):
     error_quat = SF9DOF_UKF.recover_quat(correction)
     original_quat = SF9DOF_UKF.recover_quat(est_mean)
     corrected_quat = tf_math.quaternion_multiply(error_quat, original_quat)
     corrected_mean = est_mean + correction
     corrected_mean[0:3,0] = corrected_quat[0:3,0]
     return corrected_mean
Ejemplo n.º 22
0
    def listen_imu(self, imu):
        # node can sleep for 16 out of 20 ms without dropping any messages
        # rospy.sleep(16.0/1000.0)
        # self.NN += 1
        # if rospy.get_time() - self.t0 > 1:
        #     rospy.logfatal("Published {} messages in 1 s, {} Hz".format(self.NN, self.NN))
        #     self.NN, self.t0 = 0, rospy.get_time()
        current_orientation = np.array([imu.orientation.x,
                                        imu.orientation.y,
                                        imu.orientation.z,
                                        imu.orientation.w])
        current_angular_speed = np.array([imu.angular_velocity.x,
                                          imu.angular_velocity.y,
                                          0.0])
                                          
        current_rpy = list(transformations.euler_from_quaternion(current_orientation))
        # No care about yaw, but must be close to zero
        current_rpy[-1] = 0
        desired_rpy = list(transformations.euler_from_quaternion(self.desired_orientation))
        desired_rpy[-1] = 0
        corrected = self.rp_pid(desired_rpy, current_rpy, current_angular_speed)
        corrected_orientation = transformations.quaternion_multiply(
            transformations.quaternion_from_euler(*corrected),
            transformations.quaternion_from_euler(*current_rpy))

        setpoint_pose = Pose(self.desired_position, corrected_orientation)

        servo_angles = self.platform.ik(setpoint_pose)
        rospy.logdebug(
            "Servo angles (deg): {}".format(np.rad2deg(servo_angles)))
        self.publish_servo_angles(servo_angles)
Ejemplo n.º 23
0
def quat_QuTem( quat_mean, n, stdDev ):

    # Gaussian random quaternion
    x = (np.array([np.random.normal(0., 1., n)]).T *stdDev[0]*stdDev[0])
    y = (np.array([np.random.normal(0., 1., n)]).T *stdDev[1]*stdDev[1])
    z = (np.array([np.random.normal(0., 1., n)]).T *stdDev[2]*stdDev[2])

    mag = np.zeros((n,1))
    for i in xrange(len(x)):
        mag[i,0] = np.sqrt([x[i,0]**2+y[i,0]**2+z[i,0]**2])
    
    axis  = np.hstack([x/mag, y/mag, z/mag])    
    ## angle = np.array([np.random.normal(0., stdDev[3]**2.0, n)]).T
    angle = np.zeros([len(x),1])
    for i in xrange(len(x)):
        rnd = 0.0
        while True:
            rnd = np.random.normal(0.,1.)
            if rnd <= np.pi and rnd > -np.pi:
                break
        angle[i,0] = rnd + np.pi
    
    # Convert the gaussian axis and angle distribution to unit quaternion distribution
    # angle should be limited to positive range...
    s = np.sin(angle / 2.0);
    quat_rnd = np.hstack([axis*s, np.cos(angle/2.0)])

    # Multiplication with mean quat
    q = np.zeros((n,4))    
    for i in xrange(len(x)):
        q[i,:] = tft.quaternion_multiply(quat_mean, quat_rnd[i,:])
       
    return q
  def cb_master_pose(self, msg):
    pos = np.array([msg.pose.position.x, msg.pose.position.y, msg.pose.position.z])
    self.master_pos = self.change_axes(pos)

    real_rot = np.array([msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z, msg.pose.orientation.w])
    if (self.arot_is):
        q1_aux = tr.quaternion_multiply(real_rot, self.q_arot_value)
    else:
        q1_aux = real_rot
        
    if (self.mrot_is):
        q2_aux = tr.quaternion_multiply(self.q_mrot_value, q1_aux)
    else:
        q2_aux = q1_aux
        
    self.master_rot = q2_aux
Ejemplo n.º 25
0
def aim_pose_to(ps, pts, point_dir=(1,0,0)):
    if not (ps.header.frame_id.lstrip('/') == pts.header.frame_id.lstrip('/')):
        rospy.logerr("[Pose_Utils.aim_pose_to]:"+
                     "Pose and point must be in same frame: %s, %s"
                    %(ps.header.frame_id, pt2.header.frame_id))
    target_pt = np.array((pts.point.x, pts.point.y, pts.point.z))
    base_pt = np.array((ps.pose.position.x,
                        ps.pose.position.y,
                        ps.pose.position.z)) 
    base_quat = np.array((ps.pose.orientation.x, ps.pose.orientation.y,
                          ps.pose.orientation.z, ps.pose.orientation.w))

    b_to_t_vec = np.array((target_pt[0]-base_pt[0],
                           target_pt[1]-base_pt[1],
                           target_pt[2]-base_pt[2]))
    b_to_t_norm = np.divide(b_to_t_vec, np.linalg.norm(b_to_t_vec))

    point_dir_hom = (point_dir[0], point_dir[1], point_dir[2], 1)
    base_rot_mat = tft.quaternion_matrix(base_quat)
    point_dir_hom = np.dot(point_dir_hom, base_rot_mat.T)
    point_dir = np.array((point_dir_hom[0]/point_dir_hom[3],
                         point_dir_hom[1]/point_dir_hom[3],
                         point_dir_hom[2]/point_dir_hom[3]))
    point_dir_norm = np.divide(point_dir, np.linalg.norm(point_dir))
    axis = np.cross(point_dir_norm, b_to_t_norm)
    angle = np.arccos(np.vdot(point_dir_norm, b_to_t_norm))
    quat = tft.quaternion_about_axis(angle, axis)
    new_quat = tft.quaternion_multiply(quat, base_quat)
    ps.pose.orientation = Quaternion(*new_quat)
    def odom_transform_2d(self, data, new_frame, trans, rot):
        # NOTES: only in 2d rotation
        # also, it removes covariance, etc information
        odom_new = Odometry()
        odom_new.header = data.header
        odom_new.header.frame_id = new_frame

        odom_new.pose.pose.position.x = data.pose.pose.position.x + trans[0]
        odom_new.pose.pose.position.y = data.pose.pose.position.y + trans[1]
        odom_new.pose.pose.position.z = data.pose.pose.position.z + trans[2]
        # rospy.loginfo('td: %s tr: %s' % (str(type(data)), str(type(rot))))
        q = data.pose.pose.orientation
        q_tuple = (q.x, q.y, q.z, q.w,)
        # Quaternion(*(tft quaternion)) converts a tf-style quaternion to a ROS msg quaternion
        odom_new.pose.pose.orientation = Quaternion(*tft.quaternion_multiply(q_tuple, rot))

        heading_change = quaternion_to_heading(rot)

        odom_new.twist.twist.linear.x = data.twist.twist.linear.x*math.cos(heading_change) - data.twist.twist.linear.y*math.sin(heading_change)
        odom_new.twist.twist.linear.y = data.twist.twist.linear.y*math.cos(heading_change) + data.twist.twist.linear.x*math.sin(heading_change)
        odom_new.twist.twist.linear.z = 0

        odom_new.twist.twist.angular.x = 0
        odom_new.twist.twist.angular.y = 0
        odom_new.twist.twist.angular.z = data.twist.twist.angular.z

        return odom_new
    def _generate_vel(self, s=None):
        if self._stamped_pose_only:
            return np.zeros(6)
        cur_s = (self._cur_s if s is None else s)
        last_s = cur_s - self.interpolator.s_step

        if last_s < 0 or cur_s > 1:
            return np.zeros(6)

        q_cur = self.interpolator.generate_quat(cur_s)
        q_last = self.interpolator.generate_quat(last_s)

        cur_pos = self.interpolator.generate_pos(cur_s)
        last_pos = self.interpolator.generate_pos(last_s)

        ########################################################
        # Computing angular velocities
        ########################################################
        # Quaternion difference to the last step in the inertial frame
        q_diff = quaternion_multiply(q_cur, quaternion_inverse(q_last))
        # Angular velocity
        ang_vel = 2 * q_diff[0:3] / self._t_step

        vel = [(cur_pos[0] - last_pos[0]) / self._t_step,
               (cur_pos[1] - last_pos[1]) / self._t_step,
               (cur_pos[2] - last_pos[2]) / self._t_step,
               ang_vel[0],
               ang_vel[1],
               ang_vel[2]]
        return np.array(vel)
Ejemplo n.º 28
0
    def update(self, dt, desired, current):
        (p, o), (p_dot, o_dot) = current
        (desired_p, desired_o), (desired_p_dot, desired_o_dot), (desired_p_dotdot, desired_o_dotdot) = desired
        world_from_body = transformations.quaternion_matrix(o)[:3, :3]
        x_dot = numpy.concatenate([
            world_from_body.dot(p_dot),
            world_from_body.dot(o_dot),
        ])

        # world_from_desiredbody = transformations.quaternion_matrix(desired_o)[:3, :3]
        desired_x_dot = numpy.concatenate([
            world_from_body.dot(desired_p_dot),
            world_from_body.dot(desired_o_dot),
        ])
        desired_x_dotdot = numpy.concatenate([
            world_from_body.dot(desired_p_dotdot),
            world_from_body.dot(desired_o_dotdot),
        ])

        error_position_world = numpy.concatenate([
            desired_p - p,
            quat_to_rotvec(transformations.quaternion_multiply(
                desired_o,
                transformations.quaternion_inverse(o),
            )),
        ])
        if self.config['two_d_mode']:
            error_position_world = error_position_world * [1, 1, 0, 0, 0, 1]

        world_from_body2 = numpy.zeros((6, 6))
        world_from_body2[:3, :3] = world_from_body2[3:, 3:] = world_from_body

        # Permitting lambda assignment b/c legacy
        body_gain = lambda x: world_from_body2.dot(x).dot(world_from_body2.T)  # noqa

        error_velocity_world = (desired_x_dot + body_gain(numpy.diag(self.config['k'])).dot(error_position_world)) - x_dot
        if self.config['two_d_mode']:
            error_velocity_world = error_velocity_world * [1, 1, 0, 0, 0, 1]

        pd_output = body_gain(numpy.diag(self.config['ks'])).dot(error_velocity_world)

        output = pd_output
        if self.config['use_rise']:
            rise_term_int = body_gain(numpy.diag(self.config['ks'] * self.config['alpha'])).dot(error_velocity_world) + \
                body_gain(numpy.diag(self.config['beta'])).dot(numpy.sign(error_velocity_world))

            self._rise_term = self._rise_term + dt / 2 * (rise_term_int + self._rise_term_int_prev)
            self._rise_term_int_prev = rise_term_int

            output = output + self._rise_term
        else:
            # zero rise term so it doesn't wind up over time
            self._rise_term = numpy.zeros(6)
            self._rise_term_int_prev = numpy.zeros(6)
        output = output + body_gain(numpy.diag(self.config['accel_feedforward'])).dot(desired_x_dotdot)
        output = output + body_gain(numpy.diag(self.config['vel_feedforward'])).dot(desired_x_dot)

        # Permitting lambda assignment b/c legacy
        wrench_from_vec = lambda output: (world_from_body.T.dot(output[0:3]), world_from_body.T.dot(output[3:6]))  # noqa
        return wrench_from_vec(pd_output), wrench_from_vec(output)
Ejemplo n.º 29
0
    def moveToMarker(self):  
        try:
            now=rospy.Time.now()
            self.listener.waitForTransform('/map', self.marker, now, rospy.Duration(10))
            trans, rot=self.listener.lookupTransform('/map', self.marker, now)  
            print "Have a TF frame"            
            self.move_base_client.wait_for_server()
            g = MoveBaseGoal()
            g.target_pose.header.frame_id = '/map'
            g.target_pose.header.stamp=rospy.Time.now()	
            #Add offset so that the PR2 sits right in front of the tag instead of running it over or getting the navigation stack stuck
            x=trans[0]-0.50                                  
            y=trans[1]
            z=trans[2]
            #Switch rotation around to be in the opposite direction        
            q1=np.array([rot[0], rot[1], rot[2], rot[3]])
            q1_con=tft.quaternion_conjugate(q1) #get the conjugate
            rot_y_axis=np.array([0.0*sin(pi/4), 1.0*sin(pi/4), 0.0*sin(pi/4), cos(pi/4)])
            q3=tft.quaternion_multiply(q1_con, q1)
            q4=tft.quaternion_multiply(rot_y_axis, q3)
            q_fin=tft.quaternion_multiply(q1, q4)

            g.target_pose.pose.position.x = x          
            g.target_pose.pose.position.y = y	        
            g.target_pose.pose.position.z = z
            g.target_pose.pose.orientation.x = q_fin[0]
            g.target_pose.pose.orientation.y = q_fin[1]
            g.target_pose.pose.orientation.z = q_fin[2]
            g.target_pose.pose.orientation.w = q_fin[3]
            
            self.move_base_client.send_goal(g)
            print "Made it this far, sent goal"
                     
            
            success_to_box=self.move_base_client.wait_for_result()
    
            if success_to_box:
                print "Movement to box successful"
                self.at_box=True
               
            else:
                print "Movement to box failed"
                self.at_box=False   
                

        except(tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException, tf.Exception):
            print "TF Exception, try again"
Ejemplo n.º 30
0
    def generate_grasp_poses(self, object_pose):
        # Compute all the points of the sphere with step X
        # http://math.stackexchange.com/questions/264686/how-to-find-the-3d-coordinates-on-a-celestial-spheres-surface
        radius = self._grasp_desired_distance
        ori_x = 0.0
        ori_y = 0.0
        ori_z = 0.0
        sphere_poses = []
        rotated_q = quaternion_from_euler(0.0, 0.0, math.radians(180))

        yaw_qtty = int((self._max_degrees_yaw - self._min_degrees_yaw) / self._step_degrees_yaw)  # NOQA
        pitch_qtty = int((self._max_degrees_pitch - self._min_degrees_pitch) / self._step_degrees_pitch)  # NOQA
        info_str = "Creating poses with parameters:\n" + \
            "Radius: " + str(radius) + "\n" \
            "Yaw from: " + str(self._min_degrees_yaw) + \
            " to " + str(self._max_degrees_yaw) + " with step " + \
            str(self._step_degrees_yaw) + " degrees.\n" + \
            "Pitch from: " + str(self._min_degrees_pitch) + \
            " to " + str(self._max_degrees_pitch) + \
            " with step " + str(self._step_degrees_pitch) + " degrees.\n" + \
            "Total: " + str(yaw_qtty) + " yaw * " + str(pitch_qtty) + \
            " pitch = " + str(yaw_qtty * pitch_qtty) + " grap poses."
        rospy.loginfo(info_str)

        # altitude is yaw
        for altitude in range(self._min_degrees_yaw, self._max_degrees_yaw, self._step_degrees_yaw):  # NOQA
            altitude = math.radians(altitude)
            # azimuth is pitch
            for azimuth in range(self._min_degrees_pitch, self._max_degrees_pitch, self._step_degrees_pitch):  # NOQA
                azimuth = math.radians(azimuth)
                # This gets all the positions
                x = ori_x + radius * math.cos(azimuth) * math.cos(altitude)
                y = ori_y + radius * math.sin(altitude)
                z = ori_z + radius * math.sin(azimuth) * math.cos(altitude)
                # this gets all the vectors pointing outside of the center
                # quaternion as x y z w
                q = quaternion_from_vectors([radius, 0.0, 0.0], [x, y, z])
                # Cannot compute so the vectors are parallel
                if q is None:
                    # with this we add the missing arrow
                    q = rotated_q
                # We invert the orientations to look inwards by multiplying
                # with a quaternion 180deg rotation on yaw
                q = quaternion_multiply(q, rotated_q)

                # We actually want roll to be always 0.0 so we approach
                # the object with the gripper always horizontal
                # this rotation can be tuned with the dynamic params
                # multiplying later on
                roll, pitch, yaw = euler_from_quaternion(q)
                q = quaternion_from_euler(math.radians(0.0), pitch, yaw)

                x += object_pose.pose.position.x
                y += object_pose.pose.position.y
                z += object_pose.pose.position.z
                current_pose = Pose(
                    Point(x, y, z), Quaternion(*q))
                sphere_poses.append(current_pose)
        return sphere_poses
Ejemplo n.º 31
0
	def attractorField(self): 
		# Based on algorithm found here: 

		# linear attractive velocity
		# find euclidean distance between goal pose and current pose
		lin_dist = np.linalg.norm(self.goal_position - self.eef_position)
		if (lin_dist < self.min_attractive_field_threshold):
			self.attractor_linear_vel = (np.zeros((1,3)))[0]
		else: 
			if (lin_dist < self.max_attractive_field_radius): 
				self.attractor_linear_vel = (self.goal_position - self.eef_position)
			else: 
				self.attractor_linear_vel = self.lin_attractive_scaling_factor_*(self.goal_position - self.eef_position)/lin_dist


		# angular attractive velocity 
		# find distance between current eef orientation and goal orientation
		diff_quat = tfs.quaternion_multiply(self.goal_orientation, tfs.quaternion_inverse(self.eef_orientation))
		diff_quat = diff_quat / np.linalg.norm(diff_quat)
		self.angle_to_goal = 2*math.acos(diff_quat[3]) # 0 to 2pi. only rotation in one direction.
		if self.angle_to_goal > math.pi: 
			self.angle_to_goal -= 2*math.pi
			self.angle_to_goal = abs(self.angle_to_goal)
			diff_quat = -diff_quat

		self.attractor_angular_vel = (np.zeros((1,3)))[0]
		norm_den = math.sqrt(1 - diff_quat[3]*diff_quat[3])
		if norm_den < self.max_rot_attractive_field_threshold:
			self.attractor_angular_vel[0] = diff_quat[0]
			self.attractor_angular_vel[1] = diff_quat[1]
			self.attractor_angular_vel[2] = diff_quat[2]
		else:
			self.attractor_angular_vel[0] = diff_quat[0]/norm_den
			self.attractor_angular_vel[1] = diff_quat[1]/norm_den
			self.attractor_angular_vel[2] = diff_quat[2]/norm_den
			self.attractor_angular_vel[:] = self.ang_attractive_scaling_factor_*self.attractor_angular_vel[:] #scale the velocity
		if abs(self.angle_to_goal) < self.min_rot_attractive_field_threshold:
			self.attractor_angular_vel = (np.zeros((1,3)))[0]
Ejemplo n.º 32
0
    def close_to_goal(self):
        # check if goal_pose in use
        if not self.goal_pose:
            return False

        # create quats from msg
        goal_quat_list = [
            self.goal_pose.orientation.x,
            self.goal_pose.orientation.y,
            self.goal_pose.orientation.z,
            -self.goal_pose.orientation.w,  # invert goal quat
        ]
        current_quat_list = [
            self.odom.pose.pose.orientation.x,
            self.odom.pose.pose.orientation.y,
            self.odom.pose.pose.orientation.z,
            self.odom.pose.pose.orientation.w,
        ]
        q_r = quaternion_multiply(current_quat_list, goal_quat_list)

        # convert relative quat to euler
        (roll_diff, pitch_diff, yaw_diff) = euler_from_quaternion(q_r)

        # check if close to goal
        diff_list = [
            abs(self.goal_pose.position.x - self.odom.pose.pose.position.x),
            abs(self.goal_pose.position.y - self.odom.pose.pose.position.y),
            abs(self.goal_pose.position.z - self.odom.pose.pose.position.z),
            roll_diff,
            pitch_diff,
            yaw_diff,
        ]
        is_close = True
        for diff, bound in zip(diff_list, self.goal_boundry):
            if diff > bound:
                is_close = False

        return is_close
    def get_qderivative_rotation(self):
        quat_between = transmethods.quaternion_multiply(
            self.goal_quat,
            transmethods.quaternion_inverse(self.quat_after_trans))

        rotation_derivative = quat_between[0:-1]
        rotation_derivative /= np.linalg.norm(rotation_derivative)

        if self.dist_rotation_aftertrans > self.ROTATION_DELTA_SWITCH:
            rotation_derivative_magnitude = self.ROTATION_LINEAR_COST_MULT_TOTAL
        else:
            rotation_derivative_magnitude = self.ROTATION_CONSTANT_ADD
            rotation_derivative_magnitude += self.ROTATION_QUADRATIC_COST_MULTPLIER * self.dist_rotation_aftertrans

        rotation_derivative *= self.ROTATION_MULTIPLIER * rotation_derivative_magnitude / self.ROBOT_ROTATION_COST_MULTIPLIER

        if (np.sum(self.goal_quat * self.quat_after_trans)) > 0:
            rotation_derivative *= -1

        #check to make sure direction is correct


#    quat_thisdir = transition_quaternion(self.quat_curr, rotation_derivative, self.ACTION_APPLY_TIME)
#    val_thisdir = self.get_value_rotation(dist_rotation=QuaternionDistance(quat_thisdir, self.goal_quat))
#    quat_negdir = transition_quaternion(self.quat_curr, -1.*rotation_derivative, self.ACTION_APPLY_TIME)
#    val_negdir = self.get_value_rotation(dist_rotation=QuaternionDistance(quat_negdir, self.goal_quat))
#    if (val_thisdir < val_negdir):
#      print 'ROT DIRECTION ERROR vals this dir: ' + str(val_thisdir) + '  neg dir: ' + str(val_negdir)
#    else:
#      print 'rotdir good'

#hacky part to make it not jumpy
        dist_rotation_limit = np.pi / 12.
        #print 'dist rotation: ' + str(self.dist_rotation_aftertrans)
        if (self.dist_rotation_aftertrans < dist_rotation_limit):
            rotation_derivative *= self.dist_rotation_aftertrans / dist_rotation_limit

        return rotation_derivative
 def cb_master_state(self, msg):
     self.master_pos = np.array([
         msg.pose.position.x, msg.pose.position.y, msg.pose.position.z
     ]) - self.center_pos
     self.master_rot = np.array([
         msg.pose.orientation.x, msg.pose.orientation.y,
         msg.pose.orientation.z, msg.pose.orientation.w
     ])
     self.master_rot = tr.quaternion_multiply(self.q_offset,
                                              self.master_rot)
     self.master_vel = np.array(
         [msg.velocity.x, msg.velocity.y, msg.velocity.z])
     self.master_dir = self.normalize_vector(self.master_vel)
     # Control the gripper
     cmd = self.gripper_cmd
     if msg.close_gripper:
         cmd = 5.0
     else:
         cmd = -5.0
     try:
         self.gripper_pub.publish(cmd)
     except rospy.exceptions.ROSException:
         pass
Ejemplo n.º 35
0
    def generate_quat(self, s):
        s = max(0, s)
        s = min(s, 1)

        last_s = s - self._s_step
        if last_s == 0:
            last_s = 0

        this_pos = self.generate_pos(s)
        last_pos = self.generate_pos(last_s)
        dx = this_pos[0] - last_pos[0]
        dy = this_pos[1] - last_pos[1]
        dz = this_pos[2] - last_pos[2]

        rotq = self._compute_rot_quat(dx, dy, dz)

        # Calculating the step for the heading offset
        q_step = quaternion_about_axis(self._interp_fcns['heading'](s),
                                       np.array([0, 0, 1]))
        # Adding the heading offset to the rotation quaternion
        rotq = quaternion_multiply(rotq, q_step)

        return rotq
Ejemplo n.º 36
0
def plot_command_matrix_in_matplotlib(command_matrix):
    from mpl_toolkits.mplot3d import axes3d
    import matplotlib.pyplot as plt
    from tf.transformations import (
        quaternion_inverse,
        quaternion_multiply,
    )

    fig = plt.figure()
    ax = fig.gca(projection='3d')
    ax.set_xlim(-1, 1)
    ax.set_ylim(-1, 1)
    ax.set_zlim(-1, 1)
    pxyz_idx = [0, 1, 2]
    qxyzw_idx = [3, 4, 5, 6]

    for i in range(command_matrix.shape[0]):
        qxyzw = command_matrix[i][qxyzw_idx]
        uvw = quaternion_multiply(
            qxyzw,
            quaternion_multiply(
                [1, 0, 0, 0],
                quaternion_inverse(qxyzw),
            ))
        uvw_ = quaternion_multiply(
            qxyzw,
            quaternion_multiply(
                [0, 1, 0, 0],
                quaternion_inverse(qxyzw),
            ))
        uvw__ = quaternion_multiply(
            qxyzw,
            quaternion_multiply(
                [0, 0, 1, 0],
                quaternion_inverse(qxyzw),
            ))
        x, y, z = command_matrix[i][pxyz_idx]
        u, v, w = uvw[:3]
        u_, v_, w_ = uvw_[:3]
        u__, v__, w__ = uvw__[:3]
        ax.quiver(x, y, z, u, v, w, length=0.01, color='red')
        ax.quiver(x, y, z, u_, v_, w_, length=0.01, color='green')
        ax.quiver(x, y, z, u__, v__, w__, length=0.01, color='blue')
    fig.show()
    plt.show()
Ejemplo n.º 37
0
    def get_hand_diff(self, anchor_data, hand_data):
        res = [0] * 3
        if self.left_hand_state is None:
            return res
        res[0] = hand_data.pose.position.x - anchor_data.pose.position.x
        res[1] = hand_data.pose.position.y - anchor_data.pose.position.y
        res[1] *= -1
        res[2] = hand_data.pose.position.z - anchor_data.pose.position.z
        res[2] *= -1
        anchor_q = [
            anchor_data.pose.orientation.x, anchor_data.pose.orientation.y,
            anchor_data.pose.orientation.z, anchor_data.pose.orientation.w
        ]

        hand_q = [
            hand_data.pose.orientation.x, hand_data.pose.orientation.y,
            hand_data.pose.orientation.z, hand_data.pose.orientation.w
        ]
        angle = euler_from_quaternion(
            quaternion_multiply(self.quaternion_invert(anchor_q), hand_q))
        angle = list(angle)
        angle[2] *= -1
        return res + angle
Ejemplo n.º 38
0
    def update_errors(self):
        if not self.odom_is_init:
            return
        self._update_reference()
        # Calculate error in the BODY frame
        self._update_time_step()
        # Rotation matrix from INERTIAL to BODY frame
        rotItoB = self._vehicle_model.rotItoB
        rotBtoI = self._vehicle_model.rotBtoI
        if self._dt > 0:
            # Update position error with respect to the the BODY frame
            pos = self._vehicle_model.pos
            vel = self._vehicle_model.vel
            quat = self._vehicle_model.quat
            self._errors['pos'] = np.dot(
                rotItoB, self._reference['pos'] - pos)

            # Update orientation error with respect to the BODY frame
            self._errors['rot'] = quaternion_multiply(
                quaternion_inverse(quat), self._reference['rot'])

            # Velocity error with respect to the the BODY frame
            self._errors['vel'] = np.hstack((
                np.dot(rotItoB, self._reference['vel'][0:3]) - vel[0:3],
                np.dot(rotItoB, self._reference['vel'][3:6]) - vel[3:6]))

        stamp = rospy.Time.now()
        msg = TrajectoryPoint()
        msg.header.stamp = stamp
        msg.header.frame_id = 'world'
        # Publish pose error
        msg.pose.position = Vector3(*np.dot(rotBtoI, self._errors['pos']))
        msg.pose.orientation = Quaternion(*self._errors['rot'])
        # Publish velocity errors in INERTIAL frame
        msg.velocity.linear = Vector3(*np.dot(rotBtoI, self._errors['vel'][0:3]))
        msg.velocity.angular = Vector3(*np.dot(rotBtoI, self._errors['vel'][3:6]))
        self._error_pub.publish(msg)
Ejemplo n.º 39
0
def apply_transform(from_tf, to_tf):
    def get_R(trfm):
        quat_list = trfm.transform.rotation.x, trfm.transform.rotation.y, trfm.transform.rotation.z, trfm.transform.rotation.w
        rot_matrix = quaternion_matrix(quat_list)
        return rot_matrix[:3, :3]

    new_tf = TransformStamped()
    new_tf.header = from_tf.header
    new_tf.child_frame_id = to_tf.child_frame_id

    R = get_R(from_tf)

    [
        new_tf.transform.translation.x, new_tf.transform.translation.y,
        new_tf.transform.translation.z
    ] = [
        from_tf.transform.translation.x, from_tf.transform.translation.y,
        from_tf.transform.translation.z
    ] + np.matmul(R, [
        to_tf.transform.translation.x, to_tf.transform.translation.y,
        to_tf.transform.translation.z
    ])

    new_euler = quaternion_multiply([
        from_tf.transform.rotation.x, from_tf.transform.rotation.y,
        from_tf.transform.rotation.z, from_tf.transform.rotation.w
    ], [
        to_tf.transform.rotation.x, to_tf.transform.rotation.y,
        to_tf.transform.rotation.z, to_tf.transform.rotation.w
    ])

    [
        new_tf.transform.rotation.x, new_tf.transform.rotation.y,
        new_tf.transform.rotation.z, new_tf.transform.rotation.w
    ] = [new_euler[0], new_euler[1], new_euler[2], new_euler[3]]

    return new_tf
Ejemplo n.º 40
0
    def spin_motion_callback(self, _):
        # Obviously spins the drone XD
        n = 20
        angle_inc = 2 * np.pi / float(n)

        q_inc = quaternion_from_euler(0, 0, angle_inc)

        # in case of crazy drift during spin, we define final pose in
        # map so that it returns to original pose (shouldnt be necessary irl)

        # spin 2 thirds
        #print("Spinning...")
        goal = self.cf1_pose
        try:
            goal = self.tf_buf.transform(self.cf1_pose, 'map',
                                         rospy.Duration(1))
        except:
            rospy.logerr("Can't transform cf1/pose to map yet...")
            return SpinResponse(Bool(False))
        for i in range(n):
            goal.pose.orientation = Quaternion(*quaternion_multiply([
                goal.pose.orientation.x, goal.pose.orientation.y,
                goal.pose.orientation.z, goal.pose.orientation.w
            ], q_inc))
            r = self.navgoal_call(goal,
                                  pos_thres=0.1,
                                  yaw_thres=0.1,
                                  vel_thres=1,
                                  vel_yaw_thres=100,
                                  duration=1.0)
            #print("Spin {}/{}: {}".format(i+1, n, r))
        # final goal
        # it might take a long time (over 20 sec) for the drone to reach the final orientation in simulation. Could be that its spinning really fast...
        #r = self.navgoal_call(final_pose, pos_thres=0.2, yaw_thres=0.1, vel_thres=0.1, vel_yaw_thres=0.1, duration=3.0)
        #print("Spin {}/{}: {}".format(n, n, r))
        #print("Spin goal: {}".format(final_pose))
        return SpinResponse(Bool(r))
Ejemplo n.º 41
0
    def publish_object_marker(self):

        world_markerframe_quaternion = tfms.quaternion_multiply(
            self._object_kinematics._world_bodyframe_quaternion,
            BODY_MARKERFRAME_QUATERNION)

        world_bodyframe_rot = tfms.quaternion_matrix(
            self._object_kinematics._world_bodyframe_quaternion)
        world_diskcenter_marker_vec = np.matmul(world_bodyframe_rot,
                                                BODY_MARKERFRAME_POSITION)

        marker_pose = Pose()
        marker_pose.position = Point(
            self._object_kinematics._disk_center_position.x +
            world_diskcenter_marker_vec[0],
            self._object_kinematics._disk_center_position.y +
            world_diskcenter_marker_vec[1],
            self._object_kinematics._disk_center_position.z +
            world_diskcenter_marker_vec[2])

        marker_pose.orientation = Quaternion(world_markerframe_quaternion[0],
                                             world_markerframe_quaternion[1],
                                             world_markerframe_quaternion[2],
                                             world_markerframe_quaternion[3])

        marker = Marker(
            type=Marker.MESH_RESOURCE,
            action=Marker.ADD,
            id=0,
            pose=marker_pose,
            scale=Vector3(0.001, .001, .001),
            header=Header(frame_id="world"),
            color=ColorRGBA(.70, .70, .70, 1),
            mesh_resource=
            "package://rockwalk_kinematics/object_model/rockwalk_cone.dae")

        self._pub_kinematics._object_marker_publisher.publish(marker)
def _get_dmp_plan(robot, group, dmp_model, goal, goal_modification_info=None):
    list_of_postfix = get_eval_postfix(dmp_cmd_fields, 'pose')

    current_pose = group.get_current_pose().pose
    start = numpy.array(cook_array_from_object_using_postfixs(list_of_postfix, current_pose))
    end = numpy.array(cook_array_from_object_using_postfixs(list_of_postfix, goal))

    if goal_modification_info is not None:
        gmst = rospy.Time.now().to_sec()
        new_goal = copy.deepcopy(end)
        if 'translation' in goal_modification_info:
            pxyz_idx = goal_modification_info['translation']['index'] 
            new_goal[pxyz_idx] = end[pxyz_idx]+goal_modification_info['translation']['value']
        if 'quaternion_rotation' in goal_modification_info:
            qxyzw_idx = goal_modification_info['quaternion_rotation']['index']
            rot_q = goal_modification_info['quaternion_rotation']['value']
            new_goal[qxyzw_idx] = quaternion_multiply(rot_q, end[qxyzw_idx])
        end = new_goal
        gmet = rospy.Time.now().to_sec()
        rospy.logdebug("Took %s seconds to modify goal"%(gmet-gmst))

    st = rospy.Time.now().to_sec()
    command_matrix = generalize_via_dmp(start, end, dmp_model)
    rospy.logdebug("Took %s seconds to call generalize_via_dmp"%(rospy.Time.now().to_sec()-st))
    st = rospy.Time.now().to_sec()
    command_matrix = interpolate_pose_using_slerp(command_matrix, dmp_cmd_fields)
    rospy.logdebug("Took %s seconds to call interpolate_pose_using_slerp"%(rospy.Time.now().to_sec()-st))

    if goal_modification_info is None:
        update_goal_vector(numpy.asarray(command_matrix[-1]).reshape(-1).tolist())
    
    st = rospy.Time.now().to_sec()
    plan, fraction = _get_moveit_plan(robot, group, command_matrix, dmp_cmd_fields, 'pose')
    rospy.logdebug("Took %s seconds to call _get_moveit_plan"%(rospy.Time.now().to_sec()-st))

    rospy.loginfo('moveit plan success rate %s'%fraction)
    return plan, fraction
def got_accel_mag(msg_accel, msg_mag, msg_gyro):
    alpha = 0.1

    ###Change this to actual time####
    dt = 0.001  # random timestep
    #################################

    ## GET ROLL AND PITCH FROM THE ACCELEROMETER
    ax, ay, az = msg_accel.data
    gx, gy, gz = msg_gyro.data

    ## NORMALIZE THE ACCELEROMETER DATA
    norm_accel = m.sqrt(ax**2 + ay**2 + az**2)
    ax = ax / norm_accel
    ay = ay / norm_accel
    az = az / norm_accel
    a_roll = m.atan2(ay, az)
    a_pitch = m.atan2(-ax, m.sqrt(ay**2 + az**2))
    a_yaw = m.tan2(a_roll, a_pitch)  #### Check this #############

    ## Process GyroScope data
    current_gyro_quat = quaternion_from_euler(gx * dt, gy * dt, gz * dt)
    q_check = quaternion_multiply(current_gyro_quat, q)
    g_roll, g_pitch, g_yaw = euler_from_quaternion(q_check)

    ##Combine Gyro and Accelerometer Roll pitch and yaw
    roll = (1 - alpha) * g_roll + alpha * a_roll
    pitch = (1 - alpha) * g_pitch + alpha * a_pitch
    yaw = (1 - alpha) * g_yaw + alpha * a_yaw

    ## CONVERT TO quaternion_from_euler
    q = quaternion_from_euler(roll, pitch, yaw)

    ## LETS PUBLISH THIS TO THE BROADCASTER
    tf_br.sendTransform((1, 1, 1), (q[0], q[1], q[2], q[3]), Time.now(),
                        'phone', 'world')
Ejemplo n.º 44
0
    def within_acceptance_margins(self):
        # create quats from msg
        goal_quat_list = [
            self.controller_setpoint.orientation.x,
            self.controller_setpoint.orientation.y,
            self.controller_setpoint.orientation.z,
            -self.controller_setpoint.orientation.w,  # invert goal quat
        ]
        current_quat_list = [
            self.current_pose.orientation.x,
            self.current_pose.orientation.y,
            self.current_pose.orientation.z,
            self.current_pose.orientation.w,
        ]
        q_r = quaternion_multiply(current_quat_list, goal_quat_list)

        # convert relative quat to euler
        (roll_diff, pitch_diff, yaw_diff) = euler_from_quaternion(q_r)

        # check if close to goal
        diff_list = [
            abs(self.controller_setpoint.position.x - self.current_pose.position.x),
            abs(self.controller_setpoint.position.y - self.current_pose.position.y),
            abs(self.controller_setpoint.position.z - self.current_pose.position.z),
            roll_diff,
            pitch_diff,
            yaw_diff,
        ]
        is_close = True
        for i in range(len(diff_list)):
            if diff_list[i] > self.acceptance_margins[i]: 
                is_close = False
            elif self.current_velocity_list[i] > self.acceptance_velocities[i]:
                is_close = False
                
        return is_close
    def handle_odometry(self, robot_odom):
        """Compute the relative motion of the robot from the previous odometry measurement
        to the current odometry measurement."""
        self.last_robot_odom = self.robot_odom
        self.robot_odom = robot_odom

        if self.last_robot_odom:

            p_map_currbaselink = np.array([self.robot_odom.pose.pose.position.x,
                                           self.robot_odom.pose.pose.position.y,
                                           self.robot_odom.pose.pose.position.z])

            p_map_lastbaselink = np.array([self.last_robot_odom.pose.pose.position.x,
                                           self.last_robot_odom.pose.pose.position.y,
                                           self.last_robot_odom.pose.pose.position.z])

            q_map_lastbaselink = np.array([self.last_robot_odom.pose.pose.orientation.x,
                                           self.last_robot_odom.pose.pose.orientation.y,
                                           self.last_robot_odom.pose.pose.orientation.z,
                                           self.last_robot_odom.pose.pose.orientation.w])

            q_map_currbaselink = np.array([self.robot_odom.pose.pose.orientation.x,
                                           self.robot_odom.pose.pose.orientation.y,
                                           self.robot_odom.pose.pose.orientation.z,
                                           self.robot_odom.pose.pose.orientation.w])
            
            R_map_lastbaselink = tr.quaternion_matrix(q_map_lastbaselink)[0:3,0:3]
            
            p_lastbaselink_currbaselink = R_map_lastbaselink.transpose().dot(p_map_currbaselink - p_map_lastbaselink)
            q_lastbaselink_currbaselink = tr.quaternion_multiply(tr.quaternion_inverse(q_map_lastbaselink), q_map_currbaselink)
            
            _, _, yaw_diff = tr.euler_from_quaternion(q_lastbaselink_currbaselink) 

            self.dyaw += yaw_diff
            self.dx += p_lastbaselink_currbaselink[0]
            self.dy += p_lastbaselink_currbaselink[1]
Ejemplo n.º 46
0
def get_target_transform(transform):


    ee_target = geometry_msgs.msg.Pose()
    ee_target.position = transform.position
    ee_target.orientation = transform.orientation
    q = [transform.orientation.x, transform.orientation.y, transform.orientation.z, transform.orientation.w]


  
    pos = qr.quaternion_rotate_vec(q, [0.0, 0.0, -0.16])#-0.16]) # link_6 to ee
    ee_target.position.x += pos[0]
    ee_target.position.y += pos[1]
    ee_target.position.z += pos[2]

    qx = tt.quaternion_about_axis(math.pi, (1, 0, 0))
    q = tt.quaternion_multiply(q, qx)

    ee_target.orientation.x = q[0]
    ee_target.orientation.y = q[1]
    ee_target.orientation.z = q[2]
    ee_target.orientation.w = q[3]

    return ee_target
Ejemplo n.º 47
0
    def _calculate_angular_velocity(self):
        previous_orientation = QuaternionArray(
                self.last_pose_msg.pose.orientation.x,
                self.last_pose_msg.pose.orientation.y,
                self.last_pose_msg.pose.orientation.z,
                self.last_pose_msg.pose.orientation.w)

        # Calculate difference in orientation between previous and current position as a quaternion.
        diff_quat = quaternion_multiply(quaternion_inverse(previous_orientation), self.orientation)

        diff_euler = [i for i in euler_from_quaternion(diff_quat)]

        self.angular_diff_queue.append((diff_euler, self.dt))

        self.angular_velocity = Vector3Array(0, 0, 0)
        for item in self.angular_diff_queue:
            if item[1] != 0.0:
                self.angular_velocity[0] += item[0][0] / item[1]
                self.angular_velocity[1] += item[0][1] / item[1]
                self.angular_velocity[2] += item[0][2] / item[1]

        self.angular_velocity[0] /= len(self.angular_diff_queue)
        self.angular_velocity[1] /= len(self.angular_diff_queue)
        self.angular_velocity[2] /= len(self.angular_diff_queue)
Ejemplo n.º 48
0
def calculate_rot(start_orientation, rot):
    """Apply rotation to a quaternion

    Args:
        start_orientation (Quaternion): Initial orientation
        rot (Vector3): Angular speed to apply

    Returns:
        Quaternion: Result
    """
    rot_q = quaternion_from_euler(rot.x, rot.y, rot.z)
    orig_q = [
        start_orientation.x, start_orientation.y, start_orientation.z,
        start_orientation.w
    ]
    res_q = quaternion_multiply(rot_q, orig_q)

    res_msg = Quaternion()
    res_msg.x = res_q[0]
    res_msg.y = res_q[1]
    res_msg.z = res_q[2]
    res_msg.w = res_q[3]

    return res_msg
Ejemplo n.º 49
0
    def update_const_tf(self, ref_obj, const_name, tf_name, xyz, quat):
        obj = copy.deepcopy(ref_obj)
        target_const = obj.assemConsts[const_name]
        cri = tf_name.split("_")[-1]
        re_coord = utils.get_tf_matrix(xyz, quat)
        target_const[cri + "Coordinate"] = re_coord
        const_length = target_const["length"]
        if cri == "end":
            target_const["entryCoordinate"] = utils.get_translated_origin(
                xyz, quat, [0, 0, -const_length])
        else:
            target_const["endCoordinate"] = utils.get_translated_origin(
                xyz, quat, [0, 0, const_length])

        if const_name + "_spare" in obj.assemConsts.keys():
            spare_const = obj.assemConsts[const_name + "_spare"]
            spare_xyz = xyz
            rot_pi_x = tf.quaternion_from_euler(m.pi, 0, 0)
            spare_quat = tf.quaternion_multiply(rot_pi_x, quat)
            spare_const["endCoordinate"] = utils.get_translated_origin(
                spare_xyz, spare_quat, [0, 0, const_length])
            spare_const["entryCoordinate"] = utils.get_translated_origin(
                spare_xyz, spare_quat, [0, 0, -const_length])
        return obj
    def imu_callback(self, data):
        if data.header.frame_id == "imu_link":

            z_diff = float("-inf")
            try:
                old_z = self.tf_buffer.lookup_transform(
                    "world", "imu_link", rospy.Time()
                ).transform.translation.z
                for foot in self.feet:
                    trans = self.tf_buffer.lookup_transform("world", foot, rospy.Time())
                    z_diff = max(z_diff, old_z - trans.transform.translation.z)

                self.transform_imu.header.stamp = rospy.Time.now()
                self.transform_imu.transform.translation.z = z_diff

                imu_rotation = quaternion_multiply(
                    [
                        -data.orientation.x,
                        -data.orientation.y,
                        data.orientation.z,
                        data.orientation.w,
                    ],
                    quaternion_from_euler(0, -0.5 * pi, 0),
                )
                self.transform_imu.transform.rotation.x = imu_rotation[0]
                self.transform_imu.transform.rotation.y = imu_rotation[1]
                self.transform_imu.transform.rotation.z = imu_rotation[2]
                self.transform_imu.transform.rotation.w = imu_rotation[3]

                self._imu_broadcaster.sendTransform(self.transform_imu)
            except tf2_ros.TransformException as e:
                rospy.logdebug(
                    "Cannot calculate imu transform, because tf frames are not available, {0}".format(
                        e
                    )
                )
Ejemplo n.º 51
0
    def apply_transform_to_pose(self, pose_msg, transform_msg):
        '''
            this function receives a pose_msg and transform_msg and 
            returns PoseStamped transformed.
        '''
        pose_transformed = PoseStamped()
        pose_transformed.header = pose_msg.header
        try:
            transform_scale = float(transform_msg.child_frame_id)
        except:
            transform_scale = -1
        quat_multiplied = quaternion_multiply(
            self.quat_to_vect(pose_msg.pose.orientation),
            tf.transformations.quaternion_conjugate(
                self.quat_to_vect(transform_msg.transform.rotation)))
        pose_transformed.pose.orientation = Quaternion(quat_multiplied[0],
                                                       quat_multiplied[1],
                                                       quat_multiplied[2],
                                                       quat_multiplied[3])

        rot = transform_msg.transform.rotation
        q = [rot.x, rot.y, rot.z, rot.w]
        pos = pose_msg.pose.position
        v = [pos.x, pos.y, pos.z]
        # print("v = {}".format(v))

        new_v = self.qv_mult(q, v)
        # print("new_v = {}".format(new_v))
        pose_transformed.pose.position.x = new_v[
            0] * transform_scale + transform_msg.transform.translation.x
        pose_transformed.pose.position.y = new_v[
            1] * transform_scale + transform_msg.transform.translation.y
        pose_transformed.pose.position.z = new_v[
            2] * transform_scale + transform_msg.transform.translation.z

        return pose_transformed
Ejemplo n.º 52
0
    def _get_goal(self):
        if self._command is None or rospy.get_time(
        ) - self._last_reference_update > 0.1:
            return self._last_goal

        next_goal = deepcopy(self._last_goal)
        next_goal.p += PyKDL.Vector(self._command[0], self._command[1],
                                    self._command[2])

        q_step = trans.quaternion_from_euler(self._command[3],
                                             self._command[4],
                                             self._command[5])
        q_last = trans.quaternion_from_euler(*self._last_goal.M.GetRPY())
        q_next = trans.quaternion_multiply(q_last, q_step)

        next_goal.M = PyKDL.Rotation.Quaternion(*q_next)

        g_pos = [next_goal.p.x(), next_goal.p.y(), next_goal.p.z()]
        g_quat = next_goal.M.GetQuaternion()
        if self._arm_interface.inverse_kinematics(g_pos, g_quat) is not None:
            return next_goal
        else:
            print 'Next goal could not be resolved by the inv. kinematics solver.'
            return self._last_goal
Ejemplo n.º 53
0
    def get_command(self, msg):
        '''
        Publishes the wrench for this instant.
        (Note: this is called get_command because it used to be used for
        getting the actual thruster values, but now it is only being
        used for getting the wrench which is then later mapped elsewhere).

        '''
        if self.p_ref is None:
            return  # C3 is killed

        # Compute timestep from interval between this message's stamp and last's
        if self.last_odom is None:
            self.last_odom = msg
        else:
            self.timestep = (msg.header.stamp - self.last_odom.header.stamp).to_sec()
            self.last_odom = msg

        # ROS read-in
        position = msg.pose.pose.position
        orientation = msg.pose.pose.orientation
        lin_vel_body = msg.twist.twist.linear
        ang_vel_body = msg.twist.twist.angular

        # ROS unpack
        self.position = np.array([position.x, position.y])
        self.orientation = np.array([orientation.x, orientation.y, orientation.z, orientation.w])

        # Frame management quantities
        R = np.eye(3)
        R[:2, :2] = trns.quaternion_matrix(self.orientation)[:2, :2]
        y = trns.euler_from_quaternion(self.orientation)[2]

        # More ROS unpacking, converting body frame twist to world frame lin_vel and ang_vel
        self.lin_vel = R.dot(np.array([lin_vel_body.x, lin_vel_body.y, lin_vel_body.z]))[:2]
        self.ang_vel = R.dot(np.array([ang_vel_body.x, ang_vel_body.y, ang_vel_body.z]))[2]

        # Convert body PD gains to world frame
        kp = R.dot(self.kp_body).dot(R.T)
        kd = R.dot(self.kd_body).dot(R.T)

        # Compute error components (reference - true)
        p_err = self.p_ref - self.position
        y_err = trns.euler_from_quaternion(trns.quaternion_multiply(
            self.q_ref, trns.quaternion_inverse(self.orientation)))[2]
        v_err = self.v_ref - self.lin_vel
        w_err = self.w_ref - self.ang_vel

        # Combine error components into error vectors
        err = np.concatenate((p_err, [y_err]))
        errdot = np.concatenate((v_err, [w_err]))

        # Compute "anticipation" feedforward based on the boat's inertia
        inertial_feedforward = np.concatenate(
            (self.a_ref, [self.aa_ref])) * [self.mass_ref, self.mass_ref, self.inertia_ref]

        # Compute the "learning" matrix
        drag_regressor = np.array([
            [
                self.lin_vel[0] * np.cos(y)**2 + self.lin_vel[1] * np.sin(y) * np.cos(y),
                self.lin_vel[0] / 2 - (self.lin_vel[0] * np.cos(2 * y)) / 2 - (self.lin_vel[1] * np.sin(2 * y)) / 2,
                -self.ang_vel * np.sin(y),
                -self.ang_vel * np.cos(y),
                0
            ],
            [
                self.lin_vel[1] / 2 - (self.lin_vel[1] * np.cos(2 * y)) / 2 + (self.lin_vel[0] * np.sin(2 * y)) / 2,
                self.lin_vel[1] * np.cos(y)**2 - self.lin_vel[0] * np.cos(y) * np.sin(y),
                self.ang_vel * np.cos(y),
                -self.ang_vel * np.sin(y),
                0
            ],
            [
                0,
                0,
                self.lin_vel[1] * np.cos(y) - self.lin_vel[0] * np.sin(y),
                - self.lin_vel[0] * np.cos(y) - self.lin_vel[1] * np.sin(y),
                self.ang_vel
            ]])

        # wrench = PD + feedforward + I + adaptation
        if self.only_PD:
            wrench = (kp.dot(err)) + (kd.dot(errdot))
            self.drag_effort = [0, 0, 0]
            self.dist_est = [0, 0, 0]
        else:
            self.drag_effort = np.clip(drag_regressor.dot(self.drag_est), -self.drag_limit, self.drag_limit)
            wrench = (kp.dot(err)) + (kd.dot(errdot)) + inertial_feedforward + self.dist_est + self.drag_effort
            # Update disturbance estimate, drag estimates
            if self.learn and (npl.norm(p_err) < self.learning_radius):
                self.dist_est = np.clip(self.dist_est + (self.ki * err * self.timestep), -
                                        self.dist_limit, self.dist_limit)
                self.drag_est = self.drag_est + (self.kg * (drag_regressor.T.dot(err + errdot)) * self.timestep)

        # Update model reference for the next call
        if not self.use_external_tgen:
            self.increment_reference()

        # convert wrench to body frame
        wrench_body = R.T.dot(wrench)

        # NOT NEEDED SINCE WE ARE USING A DIFFERENT NODE FOR ACTUAL THRUSTER MAPPING
        # # Compute world frame thruster matrix (B) from thruster geometry, and then map wrench to thrusts
        # B = np.concatenate((R.dot(self.thruster_directions.T), R.dot(self.lever_arms.T)))
        # B_3dof = np.concatenate((B[:2, :], [B[5, :]]))
        # command = self.thruster_mapper(wrench, B_3dof)

        # Give wrench to ROS
        wrench_msg = WrenchStamped()
        wrench_msg.header.frame_id = "/base_link"
        wrench_msg.wrench.force.x = wrench_body[0]
        wrench_msg.wrench.force.y = wrench_body[1]
        wrench_msg.wrench.torque.z = wrench_body[2]
        self.wrench_pub.publish(wrench_msg)

        # Publish reference pose for examination
        self.pose_ref_pub.publish(PoseStamped(
            header=Header(
                frame_id='/enu',
                stamp=msg.header.stamp,
            ),
            pose=Pose(
                position=Point(self.p_ref[0], self.p_ref[1], 0),
                orientation=Quaternion(*self.q_ref),
            ),
        ))

        # Publish adaptation (Y*theta) for plotting
        adaptation_msg = WrenchStamped()
        adaptation_msg.header.frame_id = "/base_link"
        adaptation_msg.wrench.force.x = (self.dist_est + self.drag_effort)[0]
        adaptation_msg.wrench.force.y = (self.dist_est + self.drag_effort)[1]
        adaptation_msg.wrench.torque.z = (self.dist_est + self.drag_effort)[2]
        self.adaptation_pub.publish(adaptation_msg)

        try:
            self.theta_pub.publish(Float64MultiArray(data=self.drag_est))
        except BaseException:
            traceback.print_exc()
Ejemplo n.º 54
0
def quat_rot(p, q):
    p.append(0)
    pqinv = tft.quaternion_multiply(p, [-q[0], -q[1], -q[2], q[3]])
    qpqinv = tft.quaternion_multiply(q, pqinv)
    return qpqinv
Ejemplo n.º 55
0
 def yaw_left(self, angle, unit='rad'):
     return self.set_orientation(
         transformations.quaternion_multiply(
             transformations.quaternion_about_axis(angle * UNITS[unit], UP),
             self.orientation))
Ejemplo n.º 56
0
    def create_grasp(self, pose, grasp_id):
        """
        :type pose: Pose
            pose of the gripper for the grasp
        :type grasp_id: str
            name for the grasp
        :rtype: Grasp
        """
        g = Grasp()
        g.id = grasp_id

        pre_grasp_posture = JointTrajectory()
        pre_grasp_posture.header.frame_id = self._grasp_postures_frame_id
        pre_grasp_posture.joint_names = [
            name for name in self._gripper_joint_names.split()]
        jtpoint = JointTrajectoryPoint()
        jtpoint.positions = [
            float(pos) for pos in self._gripper_pre_grasp_positions.split()]
        jtpoint.time_from_start = rospy.Duration(self._time_pre_grasp_posture)
        pre_grasp_posture.points.append(jtpoint)

        grasp_posture = copy.deepcopy(pre_grasp_posture)
        grasp_posture.points[0].time_from_start = rospy.Duration(
            self._time_pre_grasp_posture + self._time_grasp_posture)
        jtpoint2 = JointTrajectoryPoint()
        jtpoint2.positions = [
            float(pos) for pos in self._gripper_grasp_positions.split()]
        jtpoint2.time_from_start = rospy.Duration(
            self._time_pre_grasp_posture +
            self._time_grasp_posture + self._time_grasp_posture_final)
        grasp_posture.points.append(jtpoint2)

        g.pre_grasp_posture = pre_grasp_posture
        g.grasp_posture = grasp_posture

        header = Header()
        header.frame_id = self._grasp_pose_frame_id  # base_footprint
        q = [pose.orientation.x, pose.orientation.y,
             pose.orientation.z, pose.orientation.w]
        # Fix orientation from gripper_link to parent_link (tool_link)
        fix_tool_to_gripper_rotation_q = quaternion_from_euler(
            math.radians(self._fix_tool_frame_to_grasping_frame_roll),
            math.radians(self._fix_tool_frame_to_grasping_frame_pitch),
            math.radians(self._fix_tool_frame_to_grasping_frame_yaw)
        )
        q = quaternion_multiply(q, fix_tool_to_gripper_rotation_q)
        fixed_pose = copy.deepcopy(pose)
        fixed_pose.orientation = Quaternion(*q)

        g.grasp_pose = PoseStamped(header, fixed_pose)
        g.grasp_quality = self._grasp_quality

        g.pre_grasp_approach = GripperTranslation()
        g.pre_grasp_approach.direction.vector.x = self._pre_grasp_direction_x  # NOQA
        g.pre_grasp_approach.direction.vector.y = self._pre_grasp_direction_y  # NOQA
        g.pre_grasp_approach.direction.vector.z = self._pre_grasp_direction_z  # NOQA
        g.pre_grasp_approach.direction.header.frame_id = self._grasp_postures_frame_id  # NOQA
        g.pre_grasp_approach.desired_distance = self._grasp_desired_distance  # NOQA
        g.pre_grasp_approach.min_distance = self._grasp_min_distance
        g.post_grasp_retreat = GripperTranslation()
        g.post_grasp_retreat.direction.vector.x = self._post_grasp_direction_x  # NOQA
        g.post_grasp_retreat.direction.vector.y = self._post_grasp_direction_y  # NOQA
        g.post_grasp_retreat.direction.vector.z = self._post_grasp_direction_z  # NOQA
        g.post_grasp_retreat.direction.header.frame_id = self._grasp_postures_frame_id  # NOQA
        g.post_grasp_retreat.desired_distance = self._grasp_desired_distance  # NOQA
        g.post_grasp_retreat.min_distance = self._grasp_min_distance

        g.max_contact_force = self._max_contact_force
        g.allowed_touch_objects = self._allowed_touch_objects

        return g
Ejemplo n.º 57
0
 def roll_right(self, angle):
     return self.set_orientation(transformations.quaternion_multiply(
         self.orientation,
         transformations.quaternion_about_axis(angle, [1, 0, 0]),
     ))
Ejemplo n.º 58
0
def qmul(b,a):
    """ Quaternion Multiply """
    return tx.quaternion_multiply(b, a)
            if gX == 0:
            	gX = -1
            if gY == 0:
            	gY = -1
            if gZ == 0:
            	gZ = -1
            for i in range (0, 240):
            	nextX = gX*math.exp(-1*bX*i)*math.sin(aX*math.pi*i)/damper + 0.65
            	nextY = gY*math.exp(-1*bY*i)*math.sin(aY*math.pi*i)/damper + 0.0
            	nextZ = gZ*math.exp(-1*bZ*i)*math.sin(aZ*math.pi*i)/damper + 0.5
            
            	roll  = (nextX - self.prevX)*4
            	pitch = (nextY - self.prevY)*2.25
            	q_base = quaternion_from_euler(0, math.pi/2, 0)
                q = quaternion_from_euler(0, pitch, -roll)
                q = quaternion_multiply(q_base, q)

            	self.prevX = nextX
            	self.prevY = nextY
            
            	newPose = PoseStamped()
            	newPose.header = Header(stamp=rospy.Time.now(), frame_id='base')
            	newPose.pose.position.x = nextX
            	newPose.pose.position.y = nextY
            	newPose.pose.position.z = nextZ
            	newPose.pose.orientation.x = q[0]
            	newPose.pose.orientation.y = q[1]
            	newPose.pose.orientation.z = q[2]
            	newPose.pose.orientation.w = q[3]
            
            	wpt_opts = MotionWaypointOptions(max_linear_speed=7.0, max_linear_accel=5.0, corner_distance=0.085)
Ejemplo n.º 60
0
 def pitch_down(self, angle):
     return self.set_orientation(transformations.quaternion_multiply(
         transformations.quaternion_about_axis(angle, self.zero_roll().left_vector),
         self.orientation,
     ))