コード例 #1
0
    def pbvs(self, kp, tols_p, tols_r, abort_force, max_iters = 25, no_z = False, z_offset = 0):

        i=0
        while True:
            
            if i > max_iters:
                raise Exception("Placement controller timeout")
            
            if self._aborted:
                raise Exception("Operation aborted")
            
            img = self.receive_image()
            
            target_pose, err = self.compute_step_gripper_target_pose(img, kp, no_z = no_z, z_offset = z_offset)
                        
            err_p = np.linalg.norm(err.p)
            if no_z:
                err_p = np.linalg.norm(err.p[0:2])
            err_r = np.abs(rox.R2rot(err.R)[1]) 
            
            if err_p < tols_p and err_r < tols_r:
                return err_p, err_r
            
            #target_pose.p[1]-=0.01 # Offset a little to avoid panel overlap
            plan = self.planner.trajopt_plan(target_pose, json_config_name = "panel_placement")
            
            if self._aborted:
                raise Exception("Operation aborted")            
            self.controller_commander.set_controller_mode(self.controller_commander.MODE_AUTO_TRAJECTORY,0.7,[], abort_force)
            self.controller_commander.execute_trajectory(plan)
            i+=1
def test_rot2q():
    k, theta=rox.R2rot(np.array([[-0.5057639,-0.1340537,0.8521928], \
                    [0.6456962,-0.7139224,0.2709081], \
                    [0.5720833,0.6872731,0.4476342]]))

    q_t = np.array([0.2387194, 0.4360402, 0.2933459, 0.8165967])
    q = rox.rot2q(k, theta)
    np.testing.assert_allclose(q_t, q, atol=1e-6)
コード例 #3
0
    def compute_step_gripper_target_pose(self,
                                         img,
                                         Kp,
                                         no_z=False,
                                         z_offset=0):

        fixed_marker_transform, payload_marker_transform, error_transform = self.compute_error_transform(
            img)

        if no_z:
            error_transform.p[2] = 0
        else:
            error_transform.p[2] -= z_offset

        gripper_to_camera_tf = self.tf_listener.lookupTransform(
            "vacuum_gripper_tool", "gripper_camera_2", rospy.Time(0))

        world_to_vacuum_gripper_tool_tf = self.tf_listener.lookupTransform(
            "world", "vacuum_gripper_tool", rospy.Time(0))

        #Scale by Kp
        k, theta = rox.R2rot(error_transform.R)
        r = np.multiply(k * theta, Kp[0:3])
        r_norm = np.linalg.norm(r)
        if (r_norm < 1e-6):
            error_transform2_R = np.eye(3)
        else:
            error_transform2_R = rox.rot(r / r_norm, r_norm)
        error_transform2_p = np.multiply(error_transform.p, (Kp[3:6]))

        error_transform2 = rox.Transform(error_transform2_R,
                                         error_transform2_p)

        gripper_to_fixed_marker_tf = gripper_to_camera_tf * fixed_marker_transform
        gripper_to_desired_fixed_marker_tf = gripper_to_fixed_marker_tf * error_transform2

        #print gripper_to_fixed_marker_tf

        ret = world_to_vacuum_gripper_tool_tf * (
            gripper_to_desired_fixed_marker_tf *
            gripper_to_fixed_marker_tf.inv()).inv()

        #print world_to_vacuum_gripper_tool_tf
        #print ret

        #print error_transform

        return ret, error_transform
def _R2rot_test(k1, theta1):
    R = rox.rot(k1, theta1)
    k2, theta2 = rox.R2rot(R)
    if abs(theta1 - theta2) > (theta1 + theta2):
        k2 = -k2
        theta2 = -theta2

    np.testing.assert_allclose(theta1, theta2, atol=1e-6)
    if (abs(theta1) < 1e-9):
        return

    if ((np.abs(theta1) - np.pi) < 1e-9):
        if np.linalg.norm(k1 + k2) < 1e-6:
            np.testing.assert_allclose(k1, -k2, atol=1e-6)
            return
        np.testing.assert_allclose(k1, k2, atol=1e-6)
        return

    np.testing.assert_allclose(k1, k2, atol=1e-6)
コード例 #5
0
def compute_step_gripper_target_pose(img, fixed_marker, payload_marker, desired_transform, \
                 camera_info, aruco_dict, Kp, tf_listener):

    fixed_marker_transform, error_transform = compute_error_transform(img, fixed_marker, payload_marker, \
                                              desired_transform, camera_info, aruco_dict)

    gripper_to_camera_tf = tf_listener.lookupTransform("vacuum_gripper_tool",
                                                       "gripper_camera_2",
                                                       rospy.Time(0))

    world_to_vacuum_gripper_tool_tf = tf_listener.lookupTransform(
        "world", "vacuum_gripper_tool", rospy.Time(0))

    #Scale by Kp
    k, theta = rox.R2rot(error_transform.R)
    r = np.multiply(k * theta, Kp[0:3])
    r_norm = np.linalg.norm(r)
    if (r_norm < 1e-6):
        error_transform2_R = np.eye(3)
    else:
        error_transform2_R = rox.rot(r / r_norm, r_norm)
    error_transform2_p = np.multiply(error_transform.p, (Kp[3:6]))

    error_transform2 = rox.Transform(error_transform2_R, error_transform2_p)

    gripper_to_fixed_marker_tf = gripper_to_camera_tf * fixed_marker_transform
    gripper_to_desired_fixed_marker_tf = gripper_to_fixed_marker_tf * error_transform2

    #print gripper_to_fixed_marker_tf

    ret = world_to_vacuum_gripper_tool_tf * (
        gripper_to_desired_fixed_marker_tf *
        gripper_to_fixed_marker_tf.inv()).inv()

    #print world_to_vacuum_gripper_tool_tf
    #print ret

    print error_transform

    return ret
コード例 #6
0
    def pbvs_jacobian(self):
        self.controller_commander.set_controller_mode(
            self.controller_commander.MODE_AUTO_TRAJECTORY, 0.7, [], [])
        tvec_err = [100, 100, 100]
        rvec_err = [100, 100, 100]

        self.FTdata_0 = self.FTdata

        error_transform = rox.Transform(rox.rpy2R([2, 2, 2]),
                                        np.array([100, 100, 100]))

        FT_data_ori = []
        FT_data_biased = []
        err_data_p = []
        err_data_rpy = []
        joint_data = []
        time_data = []
        #TODO: should listen to stage_3_tol_r not 1 degree
        print self.desired_camera2_transform
        #R_desired_cam = self.desired_camera2_transform.R.dot(self.desired_transform.R)
        #R_desired_cam = R_desired_cam.dot(self.desired_camera2_transform.R.transpose())
        #t_desired_cam = -self.desired_camera2_transform.R.dot(self.desired_transform.p)

        while (error_transform.p[2] > 0.01 or np.linalg.norm(
            [error_transform.p[0], error_transform.p[1]]) > self.stage3_tol_p
               or
               np.linalg.norm(rox.R2rpy(error_transform.R)) > np.deg2rad(1)):

            img = self.receive_image()

            fixed_marker_transform, payload_marker_transform, error_transform = self.compute_error_transform(
                img)
            #print self.desired_transform.R.T, -fixed_marker_transform.R.dot(self.desired_transform.p)

            R_desired_cam = fixed_marker_transform.R.dot(
                self.desired_transform.R)
            R_desired_cam = R_desired_cam.dot(
                fixed_marker_transform.R.transpose())
            t_desired_cam = -fixed_marker_transform.R.dot(
                self.desired_transform.p)

            # Compute error directly in the camera frame
            observed_R_difference = np.dot(
                payload_marker_transform.R,
                fixed_marker_transform.R.transpose())
            k, theta = rox.R2rot(
                np.dot(observed_R_difference, R_desired_cam.transpose())
            )  #np.array(rox.R2rpy(rvec_err1))
            rvec_err1 = k * theta

            observed_tvec_difference = fixed_marker_transform.p - payload_marker_transform.p
            tvec_err1 = -fixed_marker_transform.R.dot(
                self.desired_transform.p) - observed_tvec_difference
            #print 'SPOT: ',tvec_err1, rvec_err1
            # Map error to the robot spatial velocity
            world_to_camera_tf = self.tf_listener.lookupTransform(
                "world", "gripper_camera_2", rospy.Time(0))
            camera_to_link6_tf = self.tf_listener.lookupTransform(
                "gripper_camera_2", "link_6", rospy.Time(0))

            t21 = -np.dot(
                rox.hat(
                    np.dot(world_to_camera_tf.R,
                           (camera_to_link6_tf.p -
                            payload_marker_transform.p.reshape((1, 3))).T)),
                world_to_camera_tf.R)  #np.zeros((3,3))#

            # v = R_oc(vc)c + R_oc(omeega_c)_c x (r_pe)_o = R_oc(vc)c - (r_pe)_o x R_oc(omeega_c)_c
            tvec_err = t21.dot(rvec_err1).reshape(
                (3, )) + world_to_camera_tf.R.dot(tvec_err1).reshape((3, ))
            # omeega = R_oc(omeega_c)_c
            rvec_err = world_to_camera_tf.R.dot(rvec_err1).reshape((3, ))

            tvec_err = np.clip(tvec_err, -0.2, 0.2)
            rvec_err = np.clip(rvec_err, -np.deg2rad(5), np.deg2rad(5))

            if tvec_err[2] < 0.03:
                rospy.loginfo("Only Compliance Control")
                tvec_err[2] = 0

            rot_err = rox.R2rpy(error_transform.R)
            rospy.loginfo("tvec difference: %f, %f, %f", error_transform.p[0],
                          error_transform.p[1], error_transform.p[2])
            rospy.loginfo("rvec difference: %f, %f, %f", rot_err[0],
                          rot_err[1], rot_err[2])

            dx = -np.concatenate((rvec_err, tvec_err)) * self.K_pbvs

            print np.linalg.norm([error_transform.p[0], error_transform.p[1]])
            print np.linalg.norm(rox.R2rpy(error_transform.R))

            # Compliance Force Control
            if (not self.ft_flag):
                raise Exception("havent reached FT callback")
            # Compute the exteranl force
            FTread = self.FTdata - self.FTdata_0  # (F)-(F0)
            print '================ FT1 =============:', FTread
            print '================ FT2 =============:', self.FTdata

            if FTread[-1] > (self.F_d_set1 + 50):
                F_d = self.F_d_set1
            else:
                F_d = self.F_d_set2

            if (self.FTdata == 0).all():
                rospy.loginfo("FT data overflow")
                dx[-1] += self.K_pbvs * 0.004
            else:
                tx_correct = 0
                if abs(self.FTdata[0]) > 80:
                    tx_correct = 0.0002 * (abs(self.FTdata[0]) - 80)

                Vz = self.Kc * (F_d - FTread[-1]) + tx_correct
                dx[-1] = dx[-1] + Vz

            print "dx:", dx

            current_joint_angles = self.controller_commander.get_current_joint_values(
            )
            joints_vel = QP_abbirb6640(
                np.array(current_joint_angles).reshape(6, 1), np.array(dx))
            goal = self.trapezoid_gen(
                np.array(current_joint_angles) + joints_vel.dot(1),
                np.array(current_joint_angles), 0.008, 0.008,
                0.015)  #acc,dcc,vmax)

            print "joints_vel:", joints_vel

            self.client.wait_for_server()
            self.client.send_goal(goal)
            self.client.wait_for_result()
            res = self.client.get_result()
            if (res.error_code != 0):
                raise Exception("Trajectory execution returned error")

            FT_data_ori.append(self.FTdata)
            FT_data_biased.append(FTread)
            err_data_p.append(error_transform.p)
            err_data_rpy.append(rot_err)
            joint_data.append(current_joint_angles)
            time_data.append(time.time())

        filename_pose = "/home/rpi-cats/Desktop/YC/Data/Panel2_Placement_In_Nest_Pose_" + str(
            time.time()) + ".mat"
        scipy.io.savemat(filename_pose,
                         mdict={
                             'FT_data_ori': FT_data_ori,
                             'FT_data_biased': FT_data_biased,
                             'err_data_p': err_data_p,
                             'err_data_rpy': err_data_rpy,
                             'joint_data': joint_data,
                             'time_data': time_data
                         })

        rospy.loginfo("End  ====================")
コード例 #7
0
def update_ik_info(R_d, p_d):
    # R_d, p_d: Desired orientation and position
    global robot
    global d_q  # Get Current Joint angles in radian ndarray
    global num_joints

    q_cur = d_q  # initial guess on the current joint angles
    q_cur = q_cur.reshape((num_joints, 1))

    epsilon = 0.001  # Damping Constant
    Kq = epsilon * np.eye(
        len(q_cur)
    )  # small value to make sure positive definite used in Damped Least Square
    # print_div( "<br> Kq " + str(Kq) ) # DEBUG

    max_steps = 200  # number of steps to for convergence

    # print_div( "<br> q_cur " + str(q_cur) ) # DEBUG

    itr = 0  # Iterations
    converged = False
    while itr < max_steps and not converged:

        pose = rox.fwdkin(robot, q_cur)
        R_cur = pose.R
        p_cur = pose.p

        #calculate current Jacobian
        J0T = rox.robotjacobian(robot, q_cur)

        # Transform Jacobian to End effector frame from the base frame
        Tr = np.zeros((6, 6))
        Tr[:3, :3] = R_cur.T
        Tr[3:, 3:] = R_cur.T
        J0T = Tr @ J0T
        # print_div( "<br> J0T " + str(J0T) ) # DEBUG

        # Error in position and orientation
        # ER = np.matmul(R_cur, np.transpose(R_d))
        ER = np.matmul(np.transpose(R_d), R_cur)
        #print_div( "<br> ER " + str(ER) ) # DEBUG
        EP = R_cur.T @ (p_cur - p_d)
        #print_div( "<br> EP " + str(EP) ) # DEBUG

        #decompose ER to (k,theta) pair
        k, theta = rox.R2rot(ER)
        # print_div( "<br> k " + str(k) ) # DEBUG
        # print_div( "<br> theta " + str(theta) ) # DEBUG

        ## set up s for different norm for ER
        # s=2*np.dot(k,np.sin(theta)) #eR1
        # s = np.dot(k,np.sin(theta/2))         #eR2
        s = np.sin(theta / 2) * np.array(k)  #eR2
        # s=2*theta*k              #eR3
        # s=np.dot(J_phi,phi)              #eR4
        # print_div( "<br> s " + str(s) ) # DEBUG

        alpha = 1  # Step size
        # Damped Least square for iterative incerse kinematics
        delta = alpha * (np.linalg.inv(Kq + J0T.T @ J0T) @ J0T.T @ np.hstack(
            (s, EP)).T)
        # print_div( "<br> delta " + str(delta) ) # DEBUG

        q_cur = q_cur - delta.reshape((num_joints, 1))

        # Convergence Check
        converged = (np.hstack((s, EP)) < 0.0001).all()
        # print_div( "<br> converged? " + str(converged) ) # DEBUG

        itr += 1  # Increase the iteration

    joints_text = ""
    for i in q_cur:
        joints_text += "(%.3f, %.3f) " % (np.rad2deg(i), i)
    print_div_ik_info(
        str(rox.Transform(R_d, p_d)) + "<br>" + joints_text + "<br>" +
        str(converged) + ", itr = " + str(itr))
    return q_cur, converged
コード例 #8
0
def update_ik_info3(
        robot_rox, T_desired,
        q_current):  # inverse kinematics that uses Least Square solver

    # R_d, p_d: Desired orientation and position
    R_d = T_desired.R
    p_d = T_desired.p
    d_q = q_current

    num_joints = len(robot_rox.joint_type)

    q_cur = d_q  # initial guess on the current joint angles
    q_cur = q_cur.reshape((num_joints, 1))

    max_steps = 200  # number of steps to for convergence

    # print_div( "<br> q_cur " + str(q_cur) ) # DEBUG

    hist_b = []

    itr = 0  # Iterations
    converged = False
    while itr < max_steps and not converged:

        pose = rox.fwdkin(robot_rox, q_cur.flatten())
        R_cur = pose.R
        p_cur = pose.p

        #calculate current Jacobian
        J0T = rox.robotjacobian(robot_rox, q_cur.flatten())

        # Transform Jacobian to End effector frame from the base frame
        Tr = np.zeros((6, 6))
        Tr[:3, :3] = R_cur.T
        Tr[3:, 3:] = R_cur.T
        J0T = Tr @ J0T

        # Jp=J0T[3:,:]
        # JR=J0T[:3,:]                      #decompose to position and orientation Jacobian

        # Error in position and orientation
        # ER = np.matmul(R_cur, np.transpose(R_d))
        ER = np.matmul(np.transpose(R_d), R_cur)
        #print_div( "<br> ER " + str(ER) ) # DEBUG

        # EP = p_cur - p_d
        EP = R_cur.T @ (p_cur - p_d)
        #print_div( "<br> EP " + str(EP) ) # DEBUG

        #decompose ER to (k,theta) pair
        k, theta = rox.R2rot(ER)
        # print_div( "<br> k " + str(k) ) # DEBUG
        # print_div( "<br> theta " + str(theta) ) # DEBUG

        ## set up s for different norm for ER
        # s=2*np.dot(k,np.sin(theta)) #eR1
        # s = np.dot(k,np.sin(theta/2))         #eR2
        s = np.sin(theta / 2) * np.array(k)  #eR2
        # s=2*theta*k              #eR3
        # s=np.dot(J_phi,phi)              #eR4
        # print_div( "<br> s " + str(s) ) # DEBUG

        Kp = np.eye(3)
        KR = np.eye(3)  #gains for position and orientation error

        vd = -Kp @ EP
        wd = -KR @ s

        b = np.concatenate([wd, vd])
        np.nan_to_num(b, copy=False, nan=0.0, posinf=None, neginf=None)
        # print(b)
        # print(J0T)

        # DEBUG --------------
        hist_b.append(b)
        if itr > 0:
            error_cur = np.linalg.norm(hist_b[itr - 1]) - np.linalg.norm(
                hist_b[itr])
            #print("Error= " + str(error_cur))
        # DEBUG --------------

        res = lsq_linear(J0T, b)

        if res.success:
            qdot_star = res.x
        else:
            print("Any solution could not found")
            qdot_star = np.finfo(float).eps * np.ones(num_joints)

        # find best step size to take
        # alpha=fminbound(min_alpha,0,1,args=(q_cur,qdot_star,Sawyer_def,Rd,pd,w,Kp))
        alpha = 0.3  # Step size    # 1.0
        delta = alpha * qdot_star
        # print_div( "<br> delta " + str(delta) ) # DEBUG

        # Convergence Check
        converged = (np.abs(np.hstack((s, EP))) < 0.0001).all()

        if not converged:
            # Update for next iteration
            q_cur = q_cur + delta.reshape((num_joints, 1))

            # Normalize angles betweeen -pi to pi
            q_cur = normalizeAngles(q_cur)

        # print_div( "<br> converged? " + str(converged) ) # DEBUG
        # print( "converged? " + str(converged) ) # DEBUG

        itr += 1  # Increase the iteration
        #print(itr)
        #print(converged)
        # print(delta)
        # print(q_cur)

    # joints_text=""
    # for i in q_cur:
    #     joints_text+= "(%.3f, %.3f) " % (np.rad2deg(i), i)
    # print_div_ik_info(str(rox.Transform(R_d,p_d)) +"<br>"+ joints_text +"<br>"+ str(converged) + ", itr = " + str(itr))
    return np.squeeze(q_cur), converged
コード例 #9
0
    def _generate_movel_trajectory(self, robot_client, rox_robot,
                                   initial_q_or_T, T_final, speed_perc,
                                   q_final_seed):

        JointTrajectoryWaypoint = RRN.GetStructureType(
            "com.robotraconteur.robotics.trajectory.JointTrajectoryWaypoint",
            robot_client)
        JointTrajectory = RRN.GetStructureType(
            "com.robotraconteur.robotics.trajectory.JointTrajectory",
            robot_client)

        dof = len(rox_robot.joint_type)

        if isinstance(initial_q_or_T, rox.Robot):
            T_initial = initial_q_or_T
            q_initial = invkin.update_ik_info3(rox_robot, initial_q_or_T,
                                               np.random.randn((dof, )))
        else:
            q_initial = initial_q_or_T
            T_initial = rox.fwdkin(rox_robot, q_initial)

        p_diff = T_final.p - T_initial.p
        R_diff = np.transpose(T_initial.R) @ T_final.R
        k, theta = rox.R2rot(R_diff)

        N_samples_translate = np.linalg.norm(p_diff) * 100.0
        N_samples_rot = np.abs(theta) * 0.25 * 180 / np.pi

        N_samples_translate = np.linalg.norm(p_diff) * 100.0
        N_samples_rot = np.abs(theta) * 0.2 * 180 / np.pi

        N_samples = math.ceil(np.max((N_samples_translate, N_samples_rot, 20)))

        ss = np.linspace(0, 1, N_samples)

        if q_final_seed is None:
            q_final, res = invkin.update_ik_info3(rox_robot, T_final,
                                                  q_initial)
        else:
            q_final, res = invkin.update_ik_info3(rox_robot, T_final,
                                                  q_final_seed)
        #assert res, "Inverse kinematics failed"

        way_pts = np.zeros((N_samples, dof), dtype=np.float64)
        way_pts[0, :] = q_initial
        for i in range(1, N_samples):
            s = float(i) / (N_samples - 1)
            R_i = T_initial.R @ rox.rot(k, theta * s)
            p_i = (p_diff * s) + T_initial.p
            T_i = rox.Transform(R_i, p_i)
            #try:
            #    q, res = invkin.update_ik_info3(rox_robot, T_i, way_pts[i-1,:])
            #except AssertionError:
            ik_seed = (1.0 - s) * q_initial + s * q_final
            q, res = invkin.update_ik_info3(rox_robot, T_i, ik_seed)
            assert res, "Inverse kinematics failed"
            way_pts[i, :] = q

        vlims = rox_robot.joint_vel_limit
        alims = rox_robot.joint_acc_limit

        path = ta.SplineInterpolator(ss, way_pts)
        pc_vel = constraint.JointVelocityConstraint(vlims * 0.95 * speed_perc /
                                                    100.0)
        pc_acc = constraint.JointAccelerationConstraint(alims)

        instance = algo.TOPPRA([pc_vel, pc_acc],
                               path,
                               parametrizer="ParametrizeConstAccel")
        jnt_traj = instance.compute_trajectory()

        if jnt_traj is None:
            return None

        ts_sample = np.linspace(0, jnt_traj.duration, N_samples)
        qs_sample = jnt_traj(ts_sample)

        waypoints = []

        for i in range(N_samples):
            wp = JointTrajectoryWaypoint()
            wp.joint_position = qs_sample[i, :]
            wp.time_from_start = ts_sample[i]
            waypoints.append(wp)

        traj = JointTrajectory()
        traj.joint_names = rox_robot.joint_names
        traj.waypoints = waypoints

        return traj