def _test_last_configuration(robot, theta, last_theta):

        pose_1 = rox.fwdkin(robot, theta)
        theta2 = rox.robot6_sphericalwrist_invkin(robot, pose_1, last_theta)
        pose_2 = rox.fwdkin(robot, theta2[0])
        if not pose_1 == pose_2:
            return False
        if not np.allclose(theta2[0], last_theta, atol=np.deg2rad(10)):
            return False
        return True
Beispiel #2
0
        def _test_configuration(robot, theta):

            pose_1 = rox.fwdkin(robot, theta)
            theta2 = rox.robot6_sphericalwrist_invkin(robot, pose_1)

            if not len(theta2) > 0:
                return False
            for theta2_i in theta2:
                pose_2 = rox.fwdkin(robot, theta2_i)
                if not pose_1 == pose_2:
                    return False
            return True
Beispiel #3
0
    def runTest(self):

        #TODO: other joint types

        #Home configuration (See Page 2-2 of Puma 260 manual)
        puma = puma260b_robot()
        pose = rox.fwdkin(puma, np.zeros(6))
        np.testing.assert_allclose(pose.R, np.identity(3))
        np.testing.assert_allclose(pose.p,
                                   np.array([10, -4.9, 4.25]) * in_2_m,
                                   atol=1e-6)

        #Another right-angle configuration
        joints2 = np.array([180, -90, -90, 90, 90, 90]) * np.pi / 180.0
        pose2 = rox.fwdkin(puma, joints2)
        np.testing.assert_allclose(pose2.R,
                                   rox.rot([0, 0, 1], np.pi).dot(
                                       rox.rot([0, 1, 0], -np.pi / 2)),
                                   atol=1e-6)
        np.testing.assert_allclose(pose2.p,
                                   np.array([-0.75, 4.9, 31]) * in_2_m)

        #Random configuration
        joints3 = np.array([50, -105, 31, 4, 126, -184]) * np.pi / 180
        pose3 = rox.fwdkin(puma, joints3)

        pose3_R_t=np.array([[0.4274, 0.8069, -0.4076],\
                            [0.4455, -0.5804,-0.6817], \
                            [-0.7866, 0.1097, -0.6076]])
        pose3_p_t = [0.2236, 0.0693, 0.4265]

        np.testing.assert_allclose(pose3.R, pose3_R_t, atol=1e-4)
        np.testing.assert_allclose(pose3.p, pose3_p_t, atol=1e-4)

        puma_tool = puma260b_robot_tool()

        pose4 = rox.fwdkin(puma_tool, joints3)
        pose4_R_t=np.array([[0.4076, 0.8069, 0.4274],\
                            [0.6817, -0.5804,0.4455], \
                            [0.6076, 0.1097, -0.7866]])

        pose4_p_t = [0.2450, 0.0916, 0.3872]

        np.testing.assert_allclose(pose4.R, pose4_R_t, atol=1e-4)
        np.testing.assert_allclose(pose4.p, pose4_p_t, atol=1e-4)
Beispiel #4
0
    def update_qdot2(
            self, R_axis, P_axis, speed_perc
    ):  # inverse velocity kinematics that uses LSQ Linear solver

        # Get the corresponding joint angles at that time
        d_q = self.get_current_joint_positions()
        q_cur = d_q.reshape((self.num_joints, 1))

        # Update the end effector pose info
        pose = rox.fwdkin(self.robot_rox, q_cur.flatten())
        R_cur = pose.R
        p_cur = pose.p

        #calculate current Jacobian
        J0T = rox.robotjacobian(self.robot_rox, q_cur.flatten())

        # Transform Jacobian to End effector frame from the base frame
        Tr = np.zeros((6, 6))
        Tr[:3, :3] = R_cur.T
        Tr[3:, 3:] = R_cur.T
        #J0T = Tr @ J0T

        # Normalize R_axis and P_axis
        #R_axis = R_axis/(np.linalg.norm(R_axis))
        #P_axis = P_axis/(np.linalg.norm(P_axis))

        # Create the corresponding velocities
        w = R_axis  #* self.rotate_angle
        v = P_axis  #* self.move_distance

        b = np.concatenate([w, v]) * 0.01 * speed_perc
        np.nan_to_num(b, copy=False, nan=0.0, posinf=None, neginf=None)
        # print(b)
        # print(J0T)

        joint_vel_limits = 0.01 * speed_perc * self.joint_vel_limits
        res = lsq_linear(J0T,
                         b,
                         bounds=(-1.0 * joint_vel_limits, joint_vel_limits))

        if res.success:
            qdot_star = res.x
        else:
            print("Any solution could not found")
            qdot_star = np.zeros(self.num_joints)

        print("qdot_star:")
        print(qdot_star)
        # print("self.joint_vel_limits")
        # print(self.joint_vel_limits)

        # q_dot = self.normalize_dq(qdot_star)
        q_dot = qdot_star

        return q_dot
async def update_end_info():
    global robot
    global d_q

    pose = rox.fwdkin(robot, d_q)

    print_div_end_info(str(pose))
    # print_div_end_info( np.array_str(pose.R, precision=4, suppress_small=True).replace('\n', '\n' + ' '*4))

    # Calculate euler ZYX angles from pose and write them into:
    x, y, z = euler_angles_from_rotation_matrix(pose.R)
    str_rx = "%.2f" % (np.rad2deg(x))
    str_ry = "%.2f" % (np.rad2deg(y))
    str_rz = "%.2f" % (np.rad2deg(z))
    str_px = "%.3f" % (pose.p[0])
    str_py = "%.3f" % (pose.p[1])
    str_pz = "%.3f" % (pose.p[2])
    xyz_orient_str = "[" + str_rx + ", " + str_ry + ", " + str_rz + "]"
    xyz_positi_str = "[" + str_px + ", " + str_py + ", " + str_pz + "]"
    print_div_cur_pose(xyz_orient_str, xyz_positi_str)
    return pose
def robot_get_end_pose(frame):
    """
    Returns the end pose of a robot. This end pose is reported by the
    robot driver. It is typically defined as the flange of the robot,
    but may be the tool if the driver is configured to report
    the tool pose.

    Parameters:

    * frame (str): The frame to return the pose in. May be `robot` or `world`.

    Return (Pose): The robot end pose in the requested frame
    """
    robot_name = _get_active_robot_name()

    device_manager = PyriSandboxContext.device_manager
    robot = device_manager.get_device_client(robot_name,1)
    robot_state, _ = robot.robot_state.PeekInValue()

    robot_util = RobotUtil(client_obj = robot)
    geom_util = GeometryUtil(client_obj = robot)

    # TODO: cache robot_info
    rox_robot = robot_util.robot_info_to_rox_robot(robot.robot_info,0)
    T1 = rox.fwdkin(rox_robot,robot_state.joint_position)

    if frame =="ROBOT":
        return geom_util.rox_transform_to_pose(T1)
    elif frame == "WORLD":
        var_storage = device_manager.get_device_client("variable_storage")
        # TODO: don't hard code robot origin
        robot_origin_pose = var_storage.getf_variable_value("globals",_get_robot_origin_calibration()).data
        T_rob = geom_util.named_pose_to_rox_transform(robot_origin_pose.pose)
        T2 = T_rob * T1
        return geom_util.rox_transform_to_pose(T2)
    else:
        assert False, "Invalid frame"
Beispiel #7
0
    def compute_joint_vel_cmd_qp(self, joint_position,
                                 spatial_velocity_command):
        pp, RR = self.fwdkin_alljoints(joint_position)

        ## desired eef orientation ##
        if self._eq_orien == True:
            self._R_des = RR[:, :, -1]
            self._eq_orien = False

        ## if the self-collision has higher priority than collision with environment
        #if self._self_min_dist <= self._min_dist:
        #	Closest_Pt = self._self_Closest_Pt_1
        #	Closest_Pt_env = self._self_Closest_Pt_2
        #else:
        #	Closest_Pt = self._Closest_Pt
        #	Closest_Pt_env = self._Closest_Pt_env

        #Closest_Pt = self._Closest_Pt
        #Closest_Pt_env = self._Closest_Pt_env

        # where is the closest joint to the closest point
        #J2C_Joint = self.Joint2Collision(Closest_Pt, pp)

        # jacobian of end-effector
        J_eef = rox.robotjacobian(self._robot, joint_position)

        #v_tmp = Closest_Pt - pp[:, [-1]]

        #v_tmp2 = (pp[:, [-1]] - pp[:, [-3]])
        #p_norm2 = norm(v_tmp2)
        #v_tmp2 = v_tmp2/p_norm2

        # desired rotational velocity
        vr = spatial_velocity_command[0:3]
        vr = vr.reshape(3, 1)

        # desired linear velocity
        vp = spatial_velocity_command[3:None]
        vp = vp.reshape(3, 1)

        #J = self.getJacobian3(joint_position, Closest_Pt)
        ##### change 6 #####
        #J, _ = self.getJacobian2(joint_position, Closest_Pt, 6)

        ### change J to J_eef ###
        Q = self.getqp_H(J_eef, vr, vp)

        # make sure Q is symmetric
        Q = 0.5 * (Q + Q.T)

        f = self.getqp_f()
        f = f.reshape((8, ))

        ### change the velocity scale ###
        LB = np.vstack((-self._robot.joint_vel_limit.reshape(6, 1), 0, 0))
        UB = np.vstack((self._robot.joint_vel_limit.reshape(6, 1), 1, 1))

        # inequality constrains A and b
        #self._h[0:6] = joint_position.reshape(6, 1) - self._robot.joint_lower_limit.reshape(6, 1)
        #self._h[6:12] = self._robot.joint_upper_limit.reshape(6, 1) - joint_position.reshape(6, 1)

        #dx = Closest_Pt_env[0] - Closest_Pt[0]
        #dy = Closest_Pt_env[1] - Closest_Pt[1]
        #dz = Closest_Pt_env[2] - Closest_Pt[2]

        #dist = np.sqrt(dx**2 + dy**2 + dz**2)
        #dist = norm(Closest_Pt-Closest_Pt_env)

        # derivative of dist w.r.t time
        #der = np.array([dx*(dx**2 + dy**2 + dz**2)**(-0.5), dy*(dx**2 + dy**2 + dz**2)**(-0.5), dz*(dx**2 + dy**2 + dz**2)**(-0.5)])

        #print dist
        #dmin = 0.03#0.115
        #self._h[12] = dist - dmin
        #self._h[12] = 0.5*(dist*dist - dmin*dmin)

        ## change here ##
        #self._dhdq[12, 0:6] = np.dot(-der.T, J[3:6,:])
        #self._dhdq[12, 0:6] = np.dot(-dist*der.T, J[3:6,:])
        #self._dhdq[12, 0:6] = np.dot(-Closest_Pt_env.T+Closest_Pt.T, J[3:6,:])

        #self._sigma[0:12] = self.inequality_bound(self._h[0:12])
        #self._sigma[12] = self.inequality_bound(self._h[12])
        #print self._h[12]
        #print self._sigma[12]

        #A = self._dhdq
        #b = self._sigma

        #A = np.vstack((self._dhdq, np.eye(8), -np.eye(8)))
        #b = np.vstack((self._sigma, LB, -UB))
        #b = b.reshape((29, ))

        A = np.vstack((np.eye(8), -np.eye(8)))
        b = np.vstack((LB, -UB))
        b = b.reshape((16, ))

        # equality constraints for maintaining end-effector orientation (pure translation)
        #A_eq = np.hstack((J_eef[0:3,:], np.zeros((3, 2))))
        #w_skew = logm(np.dot(RR[:,:,-1], self._R_des.T))
        #w = np.array([w_skew[2, 1], w_skew[0, 2], w_skew[1, 0]])
        ######### change -0.05 ##########
        #b_eq = -0.001*self._Ke*w

        # stack equality constrains on top of the inequality constraints
        #A = np.vstack((A_eq, A))
        #b = np.concatenate((b_eq.reshape((3, 1)), b.reshape((29, 1))), axis=0)
        #b = b.reshape((32, ))

        # the last argument specify the number of equality constraints

        #sc = norm(Q,'fro')

        #dq_sln = quadprog.solve_qp(Q/sc, -f/sc, A.T, b, A_eq.shape[0])[0]

        #A = np.delete(A, [0, 1, 2], axis=0)
        #b = np.delete(b, [0, 1, 2])

        # solve the quadprog problem
        ## scale the matrix to avoid numerical errors of solver
        sc = norm(Q, 'fro')
        dq_sln = quadprog.solve_qp(Q / sc, -f / sc, A.T, b)[0]
        #dq_sln = quadprog.solve_qp(Q, -f, A.T, b)[0]

        if len(dq_sln) < self._n:
            qdot = np.zeros((self._n, 1))
            V_scaled = 0
            print 'No Solution'
        else:
            qdot = dq_sln[0:self._n]
            V_scaled = dq_sln[-1] * vp
            #vr_scaled = dq_sln[-2]*vr.reshape(3,1)

        V_linear = np.dot(J_eef[3:6, :], qdot)
        V_rot = np.dot(J_eef[0:3, :], qdot)

        qdot = qdot.reshape((6, ))

        #print 'desired angular velocity'
        print vr
        #print 'actual angular velocity'
        print V_rot
        #print 'desired linear velocity'
        print vp
        #print 'actual linear velocity'
        print V_linear

        #print 'solved joint velocity'
        #print qdot

        ####### for Rviz visualization ######
        ##### 2
        br = tf.TransformBroadcaster()
        pub_d = rospy.Publisher('desired_velocity',
                                TwistStamped,
                                queue_size=10)
        pub_a = rospy.Publisher('actual_velocity', TwistStamped, queue_size=10)
        msg_d = TwistStamped()
        msg_a = TwistStamped()

        transform_0T = rox.fwdkin(self._robot, joint_position)
        #p = transform_0T.p
        #print p

        #q_orig1 = quaternion_from_euler(spatial_velocity_command[0], spatial_velocity_command[1], spatial_velocity_command[2])

        #q = quaternion_from_euler(-euler[0], -euler[1], -euler[2])
        # first element is w
        q_orig2 = Quaternion(matrix=transform_0T.R)
        #print q_orig2
        # convert q_orig2 into the form that w is the last element
        # inverse the quaternion by inverting w
        q_orig3 = np.array([q_orig2[1], q_orig2[2], q_orig2[3], -q_orig2[0]])

        #q_orig = np.array([0, 1, 0, 0])

        #q_orig5 = quaternion_multiply(q_orig3, q_orig4)

        # create a frame for velocity
        br.sendTransform((0.0, 0.0, -0.2), q_orig3, rospy.Time.now(), "v_d",
                         "vacuum_gripper_tool")
        br.sendTransform((0.0, 0.2, 0.0), (0, 0, 0, 1), rospy.Time.now(),
                         "v_a", "v_d")

        msg_d.header.seq = 1
        msg_d.header.stamp = rospy.Time.now()
        msg_d.header.frame_id = "v_d"

        msg_d.twist.linear.x = vp[0]  # + p[0]
        msg_d.twist.linear.y = vp[1]  # + p[1]
        msg_d.twist.linear.z = vp[2]  # + p[2]

        # the last one should be w
        #q_orig = quaternion_multiply(q_orig1, q_orig5)
        msg_d.twist.angular.x = vr[0]  #q_orig[0]
        msg_d.twist.angular.y = vr[1]  #q_orig[1]
        msg_d.twist.angular.z = vr[2]  #q_orig[2]

        msg_a.header.seq = 1
        msg_a.header.stamp = rospy.Time.now()
        msg_a.header.frame_id = "v_a"

        msg_a.twist.linear.x = V_linear[0]  # + p[0]
        msg_a.twist.linear.y = V_linear[1]  # + p[1]
        msg_a.twist.linear.z = V_linear[2]  # + p[2]

        # the last one should be w
        #q_orig = quaternion_multiply(q_orig1, q_orig5)
        msg_a.twist.angular.x = V_rot[0]  #q_orig[0]
        msg_a.twist.angular.y = V_rot[1]  #q_orig[1]
        msg_a.twist.angular.z = V_rot[2]  #q_orig[2]
        #msg.pose.orientation.w = q_orig[3]

        pub_d.publish(msg_d)
        pub_a.publish(msg_a)
        ###################

        return qdot
Beispiel #8
0
    def compute_quadprog(self, joint_position):
        self._tic = timeit.default_timer()

        with self._lock:
            print '-------force-------'
            print self._F
            print '-------distance between eef and panel-------'
            print self._dist

            self._F_new = self._F

            force_err = self._F - self._fd
            self._force_inte += force_err
            self._force_der = (self._F_new - self._F_old) / (
                self._tic - self._toc)  # derivative part

            # P control to approach the target
            vz = -self._kp * force_err  # - self._ki*self._force_inte-self._kd*self._force_der; # velocity in tool0 frame

            # change velocity in world frame
            try:
                # rot is a quaternion (x, y, z, w)
                (trans,
                 rot) = self._listener.lookupTransform('/base_link',
                                                       '/gripper',
                                                       rospy.Time(0))
            except (LookupException, ConnectivityException,
                    ExtrapolationException):
                pass

            # convert quaternion into (w, x, y, z)
            # convert from quaternion to rotation matrix (4 by 4)
            R_rot = quaternion_matrix([rot[3], rot[0], rot[1], rot[2]])

            R_rot = R_rot[0:3, 0:3]

            # if there is no contact, only move toward the target
            if self._F == 0:
                self._kp = 0.000004
                # 3 by 1 linear velocity in tool0 frame
                v_l = np.array([0, 0, vz])
            # if there is contact, also move in x direction in sensor frame
            else:
                # increase Kp gain when in contact with the target
                self._kp = 0.000007
                # PID control
                vz = -self._kp * force_err - self._ki * self._force_inte - self._kd * self._force_der
                # 3 by 1 linear velocity in tool0 frame
                v_l = np.array([-0.003, 0, vz])

            # transform into base frame
            v_l = np.dot(R_rot, v_l)

            # make a complete 6 by 1 velocity command with zero angular velocity
            spatial_velocity_command = np.array(
                [0, 0, 0, v_l[0], v_l[1], v_l[2]])

            ####### for Rviz visualization ######
            ##### 2
            br = tf.TransformBroadcaster()
            pub_v = rospy.Publisher('resolved_velocity',
                                    TwistStamped,
                                    queue_size=10)
            msg_v = TwistStamped()

            transform_0T = rox.fwdkin(self._robot, joint_position)
            #p = transform_0T.p
            #print p

            #q_orig1 = quaternion_from_euler(spatial_velocity_command[0], spatial_velocity_command[1], spatial_velocity_command[2])

            #q = quaternion_from_euler(-euler[0], -euler[1], -euler[2])
            # first element is w
            q_orig2 = Quaternion(matrix=transform_0T.R)
            #print q_orig2
            # convert q_orig2 into the form that w is the last element
            # inverse the quaternion by inverting w
            q_orig3 = np.array(
                [q_orig2[1], q_orig2[2], q_orig2[3], -q_orig2[0]])

            #q_orig = np.array([0, 1, 0, 0])

            #q_orig5 = quaternion_multiply(q_orig3, q_orig4)

            # create a frame for velocity
            br.sendTransform((0.0, 0.0, -0.2), q_orig3, rospy.Time.now(), "v",
                             "gripper")
            #br.sendTransform((0.0, 0.2, 0.0),(0,0,0,1),rospy.Time.now(),"v_a","v_d")

            msg_v.header.seq = 1
            msg_v.header.stamp = rospy.Time.now()
            msg_v.header.frame_id = "v"

            msg_v.twist.linear.x = v_l[0]  # + p[0]
            msg_v.twist.linear.y = v_l[1]  # + p[1]
            msg_v.twist.linear.z = v_l[2]  # + p[2]

            # the last one should be w
            #q_orig = quaternion_multiply(q_orig1, q_orig5)
            msg_v.twist.angular.x = 0  #q_orig[0]
            msg_v.twist.angular.y = 0  #q_orig[1]
            msg_v.twist.angular.z = 0  #q_orig[2]

            pub_v.publish(msg_v)
            ##############################################
            ##############################################

            # compute joint velocity by QP
            if self.safe_mode == True:
                joints_vel = self.compute_joint_vel_cmd_qp(
                    joint_position, spatial_velocity_command)
                joints_vel = np.clip(joints_vel, -self._robot.joint_vel_limit,
                                     self._robot.joint_vel_limit)
            # compute joint velocity by jacobian inverse
            else:
                joints_vel = self.compute_joint_vel_cmd_jacobian_inverse(
                    joint_position, spatial_velocity_command)
                joints_vel = np.clip(joints_vel, -self._robot.joint_vel_limit,
                                     self._robot.joint_vel_limit)

            self._F_old = self._F_new
            self._toc = self._tic
            return joints_vel
Beispiel #9
0
    vel_ctrl.set_velocity_command(np.array(qdot))

    cognex_wire=detection_wire.TryGetInValue()
    if cognex_wire[1][key].detected==True and cognex_wire[0] and cognex_wire[2]!=timestamp:
        timestamp=cognex_wire[2]
        joint_angles.append(state_w.InValue.joint_position)
        cam_coordinates.append([detection_wire.InValue[key].x,detection_wire.InValue[key].y])
    
vel_ctrl.set_velocity_command(np.zeros((num_joints,)))
vel_ctrl.disable_velocity_mode() 

#process data
eef=[]
num_samples=len(cam_coordinates)
print("num samples: ",num_samples)
for i in range(num_samples):
    if robot_name=='ur':
        joint_angles[i][0]+=np.pi
    transform=fwdkin(robot_def,joint_angles[i])
    p=transform.p
    eef.append(p.tolist()[:2])
H=calibrate(cam_coordinates, eef)
H[2][-1]=robot_yaml['height']
print(H)
dict_file={'H':H.tolist()}
# directory='/home/rpi/RR_Project/calibration'
# os.chdir(directory)
with open('/home/rpi/RR_Project/calibration/'+robot_name+'.yaml', 'w') as file:
    yaml.dump(dict_file, file)

Beispiel #10
0
    def _do_grab_place_object_planar(self, robot_local_device_name,
                                     tool_local_device_name,
                                     robot_origin_calib_global_name,
                                     reference_pose_global_name, object_x,
                                     object_y, object_theta, z_offset_before,
                                     z_offset_grab, speed_perc, grab):

        var_storage = self.device_manager.get_device_client(
            "variable_storage", 0.1)

        robot = self.device_manager.get_device_client(robot_local_device_name)
        tool = self.device_manager.get_device_client(tool_local_device_name)

        reference_pose = var_storage.getf_variable_value(
            "globals", reference_pose_global_name)
        robot_origin_calib = var_storage.getf_variable_value(
            "globals", robot_origin_calib_global_name)

        T_robot = self._geom_util.named_pose_to_rox_transform(
            robot_origin_calib.data.pose)

        robot_info = robot.robot_info
        rox_robot = self._robot_util.robot_info_to_rox_robot(robot_info, 0)

        # The object's pose in world coordinates
        T_object = rox.Transform(rox.rot([0., 0., 1.], object_theta),
                                 [object_x, object_y, 0.])

        # The robot's reference pose in its own frame
        T_reference_pose_r = rox.fwdkin(rox_robot,
                                        np.deg2rad(reference_pose.data))

        # The robot's reference pose in world coordinates
        T_reference_pose = T_robot * T_reference_pose_r

        # The nominal grab pose copy
        T_grab_pose_n = copy.deepcopy(T_reference_pose)

        # Translate the reference pose in x and y to the nominal grab point
        T_grab_pose_n.p[0] = T_object.p[0]
        T_grab_pose_n.p[1] = T_object.p[1]

        # Rotate the reference pose to align with the object
        T_grab_pose_n.R = T_object.R @ T_grab_pose_n.R

        # Before pose
        T_grab_pose_before = copy.deepcopy(T_grab_pose_n)
        T_grab_pose_before.p[2] += z_offset_before
        # Transform to robot frame
        T_grab_pose_before_r = T_robot.inv() * T_grab_pose_before

        # Grab pose
        T_grab_pose = copy.deepcopy(T_grab_pose_n)
        T_grab_pose.p[2] += z_offset_grab
        # Transform to robot frame
        T_grab_pose_r = T_robot.inv() * T_grab_pose

        robot_state, _ = robot.robot_state.PeekInValue()
        q_current = robot_state.joint_position

        ## Compute inverse kinematics
        # q_grab_before, res = invkin.update_ik_info3(rox_robot, T_grab_pose_before_r, q_current)
        # assert res, "Invalid setpoint: invkin did not converge"
        # q_grab, res = invkin.update_ik_info3(rox_robot, T_grab_pose_r, q_current)
        # assert res, "Invalid setpoint: invkin did not converge"

        # print(f"q_grab_before: {q_grab_before}")
        # print(f"q_grab: {q_grab}")
        # print()

        final_seed = np.deg2rad(reference_pose.data)
        traj_before = self._generate_movel_trajectory_tool_j_range(
            robot, rox_robot, q_current, T_grab_pose_before_r, speed_perc,
            final_seed)

        q_init_grab = traj_before.waypoints[-1].joint_position
        traj_grab = self._generate_movel_trajectory_tool_j_range(
            robot, rox_robot, q_init_grab, T_grab_pose_r, speed_perc,
            q_init_grab)

        q_init_after = traj_grab.waypoints[-1].joint_position
        traj_after = self._generate_movel_trajectory_tool_j_range(
            robot, rox_robot, q_init_after, T_grab_pose_before_r, speed_perc,
            q_init_grab)

        gen = PickPlaceMotionGenerator(robot, rox_robot, tool, traj_before,
                                       traj_grab, traj_after, grab, self._node)

        # robot.jog_freespace(q_grab_before,max_velocity,True)
        # time.sleep(0.5)
        # robot.jog_freespace(q_grab,max_velocity*.25,True)
        # time.sleep(0.5)
        # robot.jog_freespace(q_grab_before,max_velocity*.25,True)
        return gen
    def _compute_joint_command(self, controller_state=None):
        if controller_state is None:
            controller_state = self._state

        controller_state.controller_time = self._clock.now
        step_ts = controller_state.ts * controller_state.speed_scalar

        if controller_state.mode.value < 0:
            if controller_state.mode == ControllerMode.ROBOT_INVALID_STATE \
                or controller_state.mode == ControllerMode.ROBOT_COMMUNICATION_ERROR \
                or controller_state.mode == ControllerMode.ROBOT_FAULT:

                controller_state.joint_setpoint = None
                controller_state.joint_command = None

            controller_state.ft_wrench = None
            while True:
                self._update_active_trajectory(controller_state)
                if controller_state.active_trajectory is not None:
                    controller_state.active_trajectory.abort(
                        controller_state.mode)
                    controller_state.active_trajectory = None
                else:
                    break

            return controller_state.mode
        else:

            self._update_active_trajectory(controller_state)

        if controller_state.joint_setpoint is None:
            controller_state.joint_setpoint = controller_state.joint_position

        if controller_state.ft_wrench_threshold is not None \
          and np.shape(controller_state.ft_wrench_threshold) == (6,) \
          and controller_state.ft_wrench is None:
            controller_state.mode = ControllerMode.SENSOR_FAULT
            return ControllerMode.SENSOR_FAULT

        if controller_state.joystick_command is not None \
          and controller_state.joystick_command.halt_command:
            controller_state.mode = ControllerMode.HALT

        if controller_state.mode.value > 0 and not self._check_ft_threshold(
                controller_state.ft_wrench, controller_state.ft_wrench_bias):
            if controller_state.active_trajectory is not None:
                controller_state.active_trajectory.abort(ControllerMode.FT_THRESHOLD_VIOLATION, \
                                                         "Force/Torque Threshold Violated")
                controller_state.active_trajectory = None
            controller_state.mode = ControllerMode.FT_THRESHOLD_VIOLATION
            return ControllerMode.FT_THRESHOLD_VIOLATION
        try:
            if controller_state.mode == ControllerMode.HALT:
                pass
            elif controller_state.mode.value < ControllerMode.HALT.value:
                if controller_state.active_trajectory is not None:
                    controller_state.active_trajectory.abort(
                        controller_state.mode)
                    controller_state.active_trajectory = None
            elif controller_state.mode == ControllerMode.JOINT_TELEOP:
                if controller_state.joystick_command is not None \
                  and controller_state.joystick_command.joint_velocity_command is not None:
                    #controller_state.joint_setpoint += \
                    #controller_state.joystick_command.joint_velocity_command.dot(step_ts)
                    ####################################################################
                    J = rox.robotjacobian(self._robot,
                                          controller_state.joint_position)
                    cmd_vel = np.dot(
                        J, controller_state.joystick_command.
                        joint_velocity_command)
                    controller_state.joint_setpoint += self._quadprog.compute_quadprog(
                        controller_state.joint_position).dot(step_ts * 5)
                    ####################################################################
                    #self._clip_joint_angles(controller_state)
            elif controller_state.mode == ControllerMode.CARTESIAN_TELEOP:
                if controller_state.joystick_command is not None \
                  and controller_state.joystick_command.spatial_velocity_command is not None:
                    #self._compute_joint_vel_from_spatial_vel(controller_state, step_ts, controller_state.joystick_command.spatial_velocity_command)
                    ####################################################
                    ########## change step_ts*5 #################
                    controller_state.joint_setpoint += self._quadprog.compute_quadprog(
                        controller_state.joint_position).dot(step_ts * 2)
                    #self._clip_joint_angles(controller_state)
                    ####################################################
            elif controller_state.mode == ControllerMode.CYLINDRICAL_TELEOP:
                if controller_state.joystick_command is not None \
                  and controller_state.joystick_command.spatial_velocity_command is not None:
                    if (not all(self._robot.H[:, 0] == (0, 0, 1))
                            or self._robot.joint_type[0] != 0):
                        controller_state.mode = ControllerMode.INVALID_OPERATION
                    else:
                        cmd_vel = controller_state.joystick_command.spatial_velocity_command
                        transform_0T = rox.fwdkin(
                            self._robot, controller_state.joint_position)
                        d = np.linalg.norm(
                            (transform_0T.p[0], transform_0T.p[1]))
                        d = np.max((d, 0.05))
                        theta = np.arctan2(transform_0T.p[1],
                                           transform_0T.p[0])
                        s_0 = transform_0T.p[1] / d
                        c_0 = transform_0T.p[0] / d
                        v_x = -d * s_0 * cmd_vel[3] + c_0 * cmd_vel[4]
                        v_y = d * c_0 * cmd_vel[3] + s_0 * cmd_vel[4]
                        v_z = cmd_vel[5]
                        w_x = cmd_vel[0]
                        w_y = cmd_vel[1]
                        w_z = cmd_vel[2] + cmd_vel[3]
                        cmd_vel2 = np.array([w_x, w_y, w_z, v_x, v_y, v_z])
                        #self._compute_joint_vel_from_spatial_vel(controller_state, step_ts, cmd_vel2)
                        ########## change step_ts*5 #################
                        controller_state.joint_setpoint += self._quadprog.compute_quadprog(
                            controller_state.joint_position).dot(step_ts * 5)
                    ####################################################

            elif controller_state.mode == ControllerMode.SPHERICAL_TELEOP:
                if controller_state.joystick_command is not None \
                  and controller_state.joystick_command.spatial_velocity_command is not None:
                    if (not all(self._robot.H[:, 0] == (0, 0, 1))
                            or self._robot.joint_type[0] != 0):
                        controller_state.mode = ControllerMode.INVALID_OPERATION
                    else:
                        #TODO: more clever solution that doesn't require trigonometry?
                        cmd_vel = controller_state.joystick_command.spatial_velocity_command
                        transform_0T = rox.fwdkin(
                            self._robot, controller_state.joint_position)
                        d = np.linalg.norm(transform_0T.p)
                        d = np.max((d, 0.05))
                        theta_phi_res = rox.subproblem2(
                            np.dot([1, 0, 0], d), transform_0T.p, [0, 0, 1],
                            [0, 1, 0])
                        if (len(theta_phi_res) == 0):
                            controller_state.mode = ControllerMode.ROBOT_SINGULARITY
                        else:
                            theta_dot = cmd_vel[3]
                            phi_dot = cmd_vel[4]
                            d_dot = cmd_vel[5]
                            if (len(theta_phi_res) == 1):
                                theta, phi = theta_phi_res[0]
                            elif (np.abs(theta_phi_res[0][1]) <
                                  np.deg2rad(90)):
                                theta, phi = theta_phi_res[0]
                            else:
                                theta, phi = theta_phi_res[1]

                            s_theta = np.sin(theta)
                            c_theta = np.cos(theta)
                            s_phi = np.sin(phi)
                            c_phi = np.cos(phi)
                            v_x = -d * s_phi * c_theta * phi_dot - d * s_theta * c_theta * theta_dot + c_phi * c_theta * d_dot
                            v_y = -d * s_phi * s_theta * phi_dot + d * c_phi * c_theta * theta_dot + s_theta * c_phi * d_dot
                            v_z = -d * c_phi * phi_dot - s_phi * d_dot
                            w_x = cmd_vel[0] - phi_dot * s_theta
                            w_y = cmd_vel[1] + phi_dot * c_theta
                            w_z = cmd_vel[2] + theta_dot
                            cmd_vel2 = np.array([w_x, w_y, w_z, v_x, v_y, v_z])
                            #self._compute_joint_vel_from_spatial_vel(controller_state, step_ts, cmd_vel2)
                            ########## change step_ts*5 ###################
                            controller_state.joint_setpoint += self._quadprog.compute_quadprog(
                                controller_state.joint_position).dot(step_ts *
                                                                     5)
                            ###############################################
            elif controller_state.mode == ControllerMode.SHARED_TRAJECTORY:
                if controller_state.joystick_command is not None \
                  and controller_state.joystick_command.trajectory_velocity_command is not None:
                    active_trajectory = controller_state.active_trajectory
                    if active_trajectory is not None and active_trajectory.trajectory_valid:
                        res, setpoint = active_trajectory.increment_trajectory_time(
                            step_ts * controller_state.joystick_command.
                            trajectory_velocity_command, controller_state)
                        if res == ControllerMode.SUCCESS:
                            controller_state.joint_setpoint = setpoint
            elif controller_state.mode == ControllerMode.AUTO_TRAJECTORY:
                active_trajectory = controller_state.active_trajectory
                if active_trajectory is not None and active_trajectory.trajectory_valid:
                    res, setpoint = active_trajectory.increment_trajectory_time(
                        step_ts, controller_state)
                    if res == ControllerMode.SUCCESS:
                        controller_state.joint_setpoint = setpoint
            elif controller_state.mode.value >= ControllerMode.EXTERNAL_SETPOINT_0.value \
                and controller_state.mode.value <= ControllerMode.EXTERNAL_SETPOINT_7.value:
                i = controller_state.mode.value - ControllerMode.EXTERNAL_SETPOINT_0.value
                setpoint = controller_state.external_joint_setpoints[i]
                if setpoint is not None:
                    controller_state.joint_setpoint = np.copy(setpoint)
                    self._clip_joint_angles(controller_state)
            else:
                self._compute_setpoint_custom_mode(controller_state)
        except:
            traceback.print_exc()
            controller_state.mode = ControllerMode.INTERNAL_ERROR
            controller_state.joint_command = controller_state.joint_position

        #TODO: add in joint command filter
        controller_state.joint_command = controller_state.joint_setpoint

        return controller_state.mode
def update_ik_info(R_d, p_d):
    # R_d, p_d: Desired orientation and position
    global robot
    global d_q  # Get Current Joint angles in radian ndarray
    global num_joints

    q_cur = d_q  # initial guess on the current joint angles
    q_cur = q_cur.reshape((num_joints, 1))

    epsilon = 0.001  # Damping Constant
    Kq = epsilon * np.eye(
        len(q_cur)
    )  # small value to make sure positive definite used in Damped Least Square
    # print_div( "<br> Kq " + str(Kq) ) # DEBUG

    max_steps = 200  # number of steps to for convergence

    # print_div( "<br> q_cur " + str(q_cur) ) # DEBUG

    itr = 0  # Iterations
    converged = False
    while itr < max_steps and not converged:

        pose = rox.fwdkin(robot, q_cur)
        R_cur = pose.R
        p_cur = pose.p

        #calculate current Jacobian
        J0T = rox.robotjacobian(robot, q_cur)

        # Transform Jacobian to End effector frame from the base frame
        Tr = np.zeros((6, 6))
        Tr[:3, :3] = R_cur.T
        Tr[3:, 3:] = R_cur.T
        J0T = Tr @ J0T
        # print_div( "<br> J0T " + str(J0T) ) # DEBUG

        # Error in position and orientation
        # ER = np.matmul(R_cur, np.transpose(R_d))
        ER = np.matmul(np.transpose(R_d), R_cur)
        #print_div( "<br> ER " + str(ER) ) # DEBUG
        EP = R_cur.T @ (p_cur - p_d)
        #print_div( "<br> EP " + str(EP) ) # DEBUG

        #decompose ER to (k,theta) pair
        k, theta = rox.R2rot(ER)
        # print_div( "<br> k " + str(k) ) # DEBUG
        # print_div( "<br> theta " + str(theta) ) # DEBUG

        ## set up s for different norm for ER
        # s=2*np.dot(k,np.sin(theta)) #eR1
        # s = np.dot(k,np.sin(theta/2))         #eR2
        s = np.sin(theta / 2) * np.array(k)  #eR2
        # s=2*theta*k              #eR3
        # s=np.dot(J_phi,phi)              #eR4
        # print_div( "<br> s " + str(s) ) # DEBUG

        alpha = 1  # Step size
        # Damped Least square for iterative incerse kinematics
        delta = alpha * (np.linalg.inv(Kq + J0T.T @ J0T) @ J0T.T @ np.hstack(
            (s, EP)).T)
        # print_div( "<br> delta " + str(delta) ) # DEBUG

        q_cur = q_cur - delta.reshape((num_joints, 1))

        # Convergence Check
        converged = (np.hstack((s, EP)) < 0.0001).all()
        # print_div( "<br> converged? " + str(converged) ) # DEBUG

        itr += 1  # Increase the iteration

    joints_text = ""
    for i in q_cur:
        joints_text += "(%.3f, %.3f) " % (np.rad2deg(i), i)
    print_div_ik_info(
        str(rox.Transform(R_d, p_d)) + "<br>" + joints_text + "<br>" +
        str(converged) + ", itr = " + str(itr))
    return q_cur, converged
Beispiel #13
0
def first_half(input, num_iter):

    # stop the active RAPID program
    #rapid.stop()
    # reset the pointer to main
    #rapid.resetpp()
    print 'first half'
    print 'reset PP to main'
    time.sleep(5)
    # start the RAPID program
    #rapid.start()

    # determine if robot has reached the initial configuration
    tag = True
    while tag:
        res, state = egm.receive_from_robot(.1)
        if res:
            #print np.fabs(sum(np.rad2deg(state.joint_angles)) - sum(init))
            if np.fabs(sum(np.rad2deg(state.joint_angles)) - sum(init)) < 1e-4:
                tag = False

    time.sleep(1)

    # out is composed of 5 velocity and 1 force in z
    out = np.zeros((6, n))
    force_out = np.zeros((6, n))
    # pos of eef
    eef_pos = np.zeros((3, n))
    # orientation of eef (quaternion)
    eef_orien = np.zeros((4, n))
    # timestamp
    tim = np.zeros((1, n))

    ############### change ################ or there will be errors that q_pre referred before assigned
    #q_pre = np.deg2rad(state.joint_angles)
    q_hat = np.zeros((6, 1))
    qhat_dot = np.zeros((6, 1))
    # for observer k should be symmetric and positive definite
    kl = 0.1

    ### drain the force sensor buffer ###
    count = 0
    while count < 1000:
        flag, ft = netft.read_ft_streaming(.1)
        #print ft[5]
        count = count + 1
    ### drain the EGM buffer ###
    for i in range(1000):
        res, state = egm.receive_from_robot(.1)
    # substract the initial force for bias
    flag, ft = netft.read_ft_streaming(.1)
    F0 = ft[5]
    print F0
    time.sleep(3)

    cnt = 0
    step_done = False
    while cnt < n:
        #t_pre = time.time()
        # receive EGM feedback
        res, state = egm.receive_from_robot(.1)

        if not res:
            continue

        q_new = state.joint_angles

        # step-over
        if not step_done:
            print '--------start step-over motion--------'
            # do step-over of 0.25 mm in +x in world frame
            # current eef pose
            pose_cur = rox.fwdkin(abb_robot, q_new)
            pose_cur.p[0] = pose_cur.p[0] + num_iter * 2 * step_over
            # solve for inverse kinematics and pick the one that is closest to current configuration
            sol = rox.robot6_sphericalwrist_invkin(abb_robot, pose_cur, q_new)
            try:
                tar = sol[0]  # raise exception if no solution
            except:
                tar = q_new
            # move to the new position after step-over
            egm.send_to_robot(tar)
            step_done = True

            q_new = tar
            ### drain the EGM buffer, or it will use the obsolete EGM feedback###
            for i in range(1000):
                res, state = egm.receive_from_robot(.1)

            print '--------step-over motion done--------'
            time.sleep(2)

# forward kinematics to calculate current position of eef
        pose = rox.fwdkin(abb_robot, q_new)
        R = pose.R
        flag, ft = netft.read_ft_streaming(.1)
        # map torque/force from sensor frame to base frame
        T_b = np.matmul(R, ft[0:3])
        F_b = np.matmul(R, ft[3:None])
        F = F_b[2]  # - F0# first three torques and then three forces

        # start motion in y direction when robot barely touches coupon
        Fd0 = 50  # this should be equal to the Fdz[0]
        if F < Fd0 - 0.1 and cnt == 0:
            z = pose.p[2]
            # account for the robot base and length of tool
            z = z + 0.026 - 0.18 + 0.00353
            # will shake if gain too large, here use 0.0002
            v_z = Kp * 10 * (F - Fd0)  #-Ki*(z-z_ref)-Kd*acce#-Ki*pos
            # formalize entire twist
            spatial_velocity_command = np.array([0, 0, 0, 0, 0, v_z])
        else:
            # formalize entire twist
            # nominal input composed of F and v
            spatial_velocity_command = input[:,
                                             cnt]  #np.array([0, 0, 0, vdx, 0, Fd])

            z = pose.p[2]
            # account for the robot base and length of tool
            z = z + 0.026 - 0.18 + 0.00353
            v_z = Kp * (F - spatial_velocity_command[5]
                        )  #-Ki*(z-z_ref)-Kd*acce#-Ki*pos
            # nominal input only contains v
            spatial_velocity_command[5] = v_z
            # approximation of joint velocity
            #q_new = np.deg2rad(state.joint_angles)

            ######### change here, use observer instead of approximation to calculate q_dot ########
            if cnt == 0:
                q_hat = q_new
            qhat_dot = joints_vel + kl * (q_new - q_hat)

            #q_dot = (q_new - q_pre)/delta_t

            J = rox.robotjacobian(abb_robot, q_new)
            # estimate velocity
            v_est = J.dot(qhat_dot)
            # formalize the nominal output composed of F and v
            out[:, cnt] = np.append(v_est[0:5], F)

            force_out[:, cnt] = np.concatenate((T_b, F_b), axis=0)

            eef_pos[:, cnt] = pose.p
            #eef_pos[2, cnt] = z

            R = pose.R
            quat = rox.R2q(R)
            eef_orien[:, cnt] = quat
            tim[0, cnt] = time.time()
            cnt = cnt + 1

        print F
        # solve for joint velocity
        # Jacobian inverse
        #J = rox.robotjacobian(abb_robot, q_new)
        #joints_vel = np.linalg.pinv(J).dot(spatial_velocity_command)

        # emergency stop if force too large
        if abs(F) > 2000:
            spatial_velocity_command = np.array([0, 0, 0, 0, 0, 0])
            print "force too large, stop..."

        # QP
        joints_vel = quadprog.compute_joint_vel_cmd_qp(
            q_new, spatial_velocity_command)

        # commanded joint position setpoint to EGM
        q_c = q_new + joints_vel * delta_t
        egm.send_to_robot(q_c)
        # joint angle at previous time step
        #q_pre = q_new

        ############ change here ##############
        # make the time interval 0.004 s
        #t_new = time.time()
        #if t_new - t_pre < delta_t:
        #    time.sleep(delta_t - t_new + t_pre)

        ######### change here ########
        q_hat = q_hat + qhat_dot * delta_t

    # interpolate to filter the output
    t_inter = np.arange(0, n * delta_t, delta_t)
    # interpolate each row of output
    for i in range(6):
        y = out[i, :]
        tck = interpolate.splrep(t_inter, y,
                                 s=0.01)  # s = 0 means no interpolation
        ynew = interpolate.splev(t_inter, tck, der=0)
        out[i, :] = ynew

    error = out - desired
    # flip the error
    err_flip = np.fliplr(error)
    print np.linalg.norm(error, 'fro')

    return out, err_flip, np.linalg.norm(
        error, 'fro'), force_out, eef_pos, eef_orien, tim
Beispiel #14
0
def update_ik_info3(
        robot_rox, T_desired,
        q_current):  # inverse kinematics that uses Least Square solver

    # R_d, p_d: Desired orientation and position
    R_d = T_desired.R
    p_d = T_desired.p
    d_q = q_current

    num_joints = len(robot_rox.joint_type)

    q_cur = d_q  # initial guess on the current joint angles
    q_cur = q_cur.reshape((num_joints, 1))

    max_steps = 200  # number of steps to for convergence

    # print_div( "<br> q_cur " + str(q_cur) ) # DEBUG

    hist_b = []

    itr = 0  # Iterations
    converged = False
    while itr < max_steps and not converged:

        pose = rox.fwdkin(robot_rox, q_cur.flatten())
        R_cur = pose.R
        p_cur = pose.p

        #calculate current Jacobian
        J0T = rox.robotjacobian(robot_rox, q_cur.flatten())

        # Transform Jacobian to End effector frame from the base frame
        Tr = np.zeros((6, 6))
        Tr[:3, :3] = R_cur.T
        Tr[3:, 3:] = R_cur.T
        J0T = Tr @ J0T

        # Jp=J0T[3:,:]
        # JR=J0T[:3,:]                      #decompose to position and orientation Jacobian

        # Error in position and orientation
        # ER = np.matmul(R_cur, np.transpose(R_d))
        ER = np.matmul(np.transpose(R_d), R_cur)
        #print_div( "<br> ER " + str(ER) ) # DEBUG

        # EP = p_cur - p_d
        EP = R_cur.T @ (p_cur - p_d)
        #print_div( "<br> EP " + str(EP) ) # DEBUG

        #decompose ER to (k,theta) pair
        k, theta = rox.R2rot(ER)
        # print_div( "<br> k " + str(k) ) # DEBUG
        # print_div( "<br> theta " + str(theta) ) # DEBUG

        ## set up s for different norm for ER
        # s=2*np.dot(k,np.sin(theta)) #eR1
        # s = np.dot(k,np.sin(theta/2))         #eR2
        s = np.sin(theta / 2) * np.array(k)  #eR2
        # s=2*theta*k              #eR3
        # s=np.dot(J_phi,phi)              #eR4
        # print_div( "<br> s " + str(s) ) # DEBUG

        Kp = np.eye(3)
        KR = np.eye(3)  #gains for position and orientation error

        vd = -Kp @ EP
        wd = -KR @ s

        b = np.concatenate([wd, vd])
        np.nan_to_num(b, copy=False, nan=0.0, posinf=None, neginf=None)
        # print(b)
        # print(J0T)

        # DEBUG --------------
        hist_b.append(b)
        if itr > 0:
            error_cur = np.linalg.norm(hist_b[itr - 1]) - np.linalg.norm(
                hist_b[itr])
            #print("Error= " + str(error_cur))
        # DEBUG --------------

        res = lsq_linear(J0T, b)

        if res.success:
            qdot_star = res.x
        else:
            print("Any solution could not found")
            qdot_star = np.finfo(float).eps * np.ones(num_joints)

        # find best step size to take
        # alpha=fminbound(min_alpha,0,1,args=(q_cur,qdot_star,Sawyer_def,Rd,pd,w,Kp))
        alpha = 0.3  # Step size    # 1.0
        delta = alpha * qdot_star
        # print_div( "<br> delta " + str(delta) ) # DEBUG

        # Convergence Check
        converged = (np.abs(np.hstack((s, EP))) < 0.0001).all()

        if not converged:
            # Update for next iteration
            q_cur = q_cur + delta.reshape((num_joints, 1))

            # Normalize angles betweeen -pi to pi
            q_cur = normalizeAngles(q_cur)

        # print_div( "<br> converged? " + str(converged) ) # DEBUG
        # print( "converged? " + str(converged) ) # DEBUG

        itr += 1  # Increase the iteration
        #print(itr)
        #print(converged)
        # print(delta)
        # print(q_cur)

    # joints_text=""
    # for i in q_cur:
    #     joints_text+= "(%.3f, %.3f) " % (np.rad2deg(i), i)
    # print_div_ik_info(str(rox.Transform(R_d,p_d)) +"<br>"+ joints_text +"<br>"+ str(converged) + ", itr = " + str(itr))
    return np.squeeze(q_cur), converged
Beispiel #15
0
print(jog_service.device_info.device.name)

jog = jog_service.get_jog("robot")

jog.setf_jog_mode()

#for x in range(100):
#jog.jog_joints3(1,1)
#jog.setf_halt_mode()

robot = d.get_device_client("robot", 1)

robot_state, _ = robot.robot_state.PeekInValue()
q_current = robot_state.joint_position
robot_util = RobotUtil(client_obj=robot)
rox_robot = robot_util.robot_info_to_rox_robot(robot.robot_info, 0)
geom_util = GeometryUtil(client_obj=jog_service)
T = rox.fwdkin(rox_robot, q_current)
print(f"Current xyz = {T.p}, rpy = {np.rad2deg(rox.R2rpy(T.R))}")
T2 = copy.deepcopy(T)
T2.p[1] += 0.1
T3 = copy.deepcopy(T)
T3.p[1] -= 0.1
pose_des = geom_util.rox_transform_to_pose(T2)
pose_des2 = geom_util.rox_transform_to_pose(T3)

for i in range(10):
    jog.jog_joints_to_pose(pose_des, 50)
    jog.jog_joints_to_pose(pose_des2, 50)
 def get_current_pose(self):
     d_q = self.get_current_joint_positions()
     pose = rox.fwdkin(
         self.robot_rox,
         d_q)  # Returns as pose.R and pose.p, look rox for details
     return pose
Beispiel #17
0
def main():
    # initialize EGM interface instance
    egm = rpi_abb_irc5.EGM()

    # initialize a robot instance
    abb_robot = abb_irb6640_180_255_robot()

    # desired force
    Fd = -1000

    # desired velocity in x
    vdx = 1

    # spring constant
    k = 50000

    # position of object in z
    pos_obj = 1.5

    # feedback gain
    Kp = 0.005
    Kd = 0.2
    Ki = 0.001

    # time step
    delta_t = 0.004 * 4

    # initial configuration in degree
    init = [0, 0, 0, 0, 90, 0]

    # quadprog to solve for joint velocity
    quadprog = qp.QuadProg(abb_robot)

    # determine if robot has reached the initial configuration init
    tag = True
    while tag:
        res, state = egm.receive_from_robot(.1)
        if res:
            #print np.fabs(sum(state.joint_angles) - sum(b))
            if np.fabs(sum(state.joint_angles) - sum(init)) < 1e-2:
                tag = False

    time.sleep(0.5)
    print '--------start force control--------'

    pose = rox.fwdkin(abb_robot, np.deg2rad(state.joint_angles))

    while pose.p[0] < 2:
        # receive EGM feedback
        res, state = egm.receive_from_robot(.1)

        if not res:
            continue

# forward kinematics to calculate current position of eef
        pose = rox.fwdkin(abb_robot, np.deg2rad(state.joint_angles))

        if pose.p[2] >= pos_obj:
            F = 0
            v_l = np.array([0, 0, -Kp * (F - Fd)])
        else:
            F = k * (pose.p[2] - pos_obj)
            v_l = np.array([vdx, 0, -Kp * (F - Fd)])

        print F

        # formalize entire twist
        spatial_velocity_command = np.array([0, 0, 0, v_l[0], v_l[1], v_l[2]])

        # solve for joint velocity
        # Jacobian inverse
        #J = rox.robotjacobian(abb_robot, np.deg2rad(state.joint_angles))
        #joints_vel = np.linalg.pinv(J).dot(spatial_velocity_command)

        # QP
        joints_vel = quadprog.compute_joint_vel_cmd_qp(
            np.deg2rad(state.joint_angles), spatial_velocity_command)

        # commanded joint position setpoint to EGM
        q_c = np.deg2rad(state.joint_angles) + joints_vel * delta_t

        egm.send_to_robot(q_c)
def calibrate(images, joint_poses, aruco_dict, aruco_id, aruco_markersize,
              flange_to_marker, mtx, dist, cam_pose, rox_robot,
              robot_local_device_name):
    """ Apply extrinsic camera calibration operation for images in the given directory path 
    using opencv aruco marker detection, the extrinsic marker poses given in a json file, 
    and the given intrinsic camera parameters."""

    assert aruco_dict.startswith("DICT_"), "Invalid aruco dictionary name"

    aruco_dict = getattr(aruco, aruco_dict)  # convert string to python
    aruco_dict = cv2.aruco.Dictionary_get(aruco_dict)
    aruco_params = cv2.aruco.DetectorParameters_create()

    i = 0

    imgs_out = []

    geom_util = GeometryUtil()
    image_util = ImageUtil()

    object_points = []
    image_points = []

    for img, joints in zip(images, joint_poses):

        # Find the aruco tag corners
        # corners, ids, rejected = cv2.aruco.detectMarkers(img, aruco_dict, parameters=aruco_params,cameraMatrix=mtx, distCoeff=dist)
        corners, ids, rejected = cv2.aruco.detectMarkers(
            img, aruco_dict, parameters=aruco_params)

        # #debug
        # print(str(type(corners))) # <class 'list'>
        # print(str(corners))  # list of numpy arrays of corners
        # print(str(type(ids))) # <class 'numpy.ndarray'>
        # print(str(ids))

        if len(corners) > 0:
            # Only find the id that we desire
            indexes = np.flatnonzero(ids.flatten() == aruco_id).tolist()
            corners = [corners[index] for index in indexes]
            ids = np.asarray([ids[index] for index in indexes])

            # #debug
            # print(str(type(corners))) # <class 'list'>
            # print(str(corners))  # list of numpy arrays of corners
            # print(str(type(ids))) # <class 'numpy.ndarray'>
            # print(str(ids))

            if len(ids) > 0:  # if there exist at least one id that we desire
                # Select the first detected one, discard the others
                corners = corners[0]  # now corners is 1 by 4

                # # extract the marker corners (which are always returned
                # # in top-left, top-right, bottom-right, and bottom-left
                # # order)
                # corners = corners.reshape((4, 2))
                # (topLeft, topRight, bottomRight, bottomLeft) = corners

                # Estimate the pose of the detected marker in camera frame
                rvec, tvec, markerPoints = cv2.aruco.estimatePoseSingleMarkers(
                    corners, aruco_markersize, mtx, dist)

                # # Debug: Show the detected tag and axis in the image
                # # # cv2.aruco.drawDetectedMarkers(img, corners)  # Draw A square around the markers (Does not work)
                img1 = img.copy()
                img_out = cv2.aruco.drawAxis(img1, mtx, dist, rvec, tvec,
                                             aruco_markersize *
                                             0.75)  # Draw Axis
                imgs_out.append(img_out)

                transform_base_2_flange = rox.fwdkin(rox_robot, joints)
                transform_flange_2_marker = geom_util.pose_to_rox_transform(
                    flange_to_marker)
                transform_base_2_marker = transform_base_2_flange * transform_flange_2_marker
                transform_base_2_marker_corners = _marker_corner_poses(
                    transform_base_2_marker, aruco_markersize)
                # Structure of this disctionary is "filename":[[R_base2marker],[T_base2marker],[R_cam2marker],[T_cam2marker]]
                for j in range(4):
                    object_points.append(transform_base_2_marker_corners[j].p)
                    image_points.append(corners[0, j])
                #pose_pairs_dict[i] = (transform_base_2_marker_corners, corners)
                i += 1

    object_points_np = np.array(object_points, dtype=np.float64)
    image_points_np = np.array(image_points, dtype=np.float32)

    # Finally execute the calibration
    retval, rvec, tvec = cv2.solvePnP(object_points_np, image_points_np, mtx,
                                      dist)
    R_cam2base = cv2.Rodrigues(rvec)[0]
    T_cam2base = tvec

    # Add another display image of marker at robot base
    img_out = cv2.aruco.drawAxis(img, mtx, dist,
                                 cv2.Rodrigues(R_cam2base)[0], T_cam2base,
                                 aruco_markersize * 0.75)  # Draw Axis
    imgs_out.append(img_out)

    rox_transform_cam2base = rox.Transform(R_cam2base, T_cam2base,
                                           cam_pose.parent_frame_id,
                                           robot_local_device_name)
    rox_transform_world2base = cam_pose * rox_transform_cam2base

    #R_base2cam = R_cam2base.T
    #T_base2cam = - R_base2cam @ T_cam2base

    R_base2cam = rox_transform_world2base.inv().R
    T_base2cam = rox_transform_world2base.inv().p

    #debug
    print("FINAL RESULTS: ")
    print("str(R_cam2base)")
    # print(str(type(R_cam2base)))
    print(str(R_cam2base))
    print("str(T_cam2base)")
    # print(str(type(T_cam2base.flatten())))
    print(str(T_cam2base))

    print("str(R_base2cam)")
    # print(str(type(R_base2cam)))
    print(str(R_base2cam))
    print("str(T_base2cam)")
    # print(str(type(T_base2cam.flatten())))
    print(str(T_base2cam))

    pose_res = geom_util.rox_transform_to_named_pose(rox_transform_world2base)
    cov = np.eye(6) * 1e-5

    imgs_out2 = [
        image_util.array_to_compressed_image_jpg(i, 70) for i in imgs_out
    ]

    return pose_res, cov, imgs_out2, 0.0
Beispiel #19
0
def main():
    # initialize EGM interface instance
    egm = rpi_abb_irc5.EGM()

    # initialize a robot instance
    abb_robot = abb_irb6640_180_255_robot()

    # desired force
    #Fd = 1000

    # desired velocity in y
    #vdy = -0.5

    # feedback gain
    Kp = 0.0002
    Kd = 0.0008
    Ki = 0.0004

    # time step
    delta_t = 0.004

    # initial configuration in degree
    init = [-91.85, 2.53, 38.20, 0.00, 49.27, -1.85]

    n = 2000

    # quadprog to solve for joint velocity
    quadprog = qp.QuadProg(abb_robot)

    # force sensor interface
    try:
        if (len(sys.argv) < 2):
            raise Exception('IP address of ATI Net F/T sensor required')
        host = sys.argv[1]
        netft = rpi_ati_net_ft.NET_FT(host)
        netft.set_tare_from_ft()

        netft.start_streaming()
    except KeyboardInterrupt:
        pass

    ####### trapezoidal desired force in z #######
    tt = np.linspace(0, 4 * np.pi, n)
    desired_f = np.zeros((1, n))
    vdy = np.zeros((1, n))

    # form trap force and trap motion
    for i in range(n):
        if tt[i] >= 0 and tt[i] < np.pi:
            desired_f[0, i] = 50 + 302 * tt[i]
            vdy[0, i] = -0.2 * tt[i]
        elif tt[i] >= np.pi and tt[i] < 3 * np.pi:
            desired_f[0, i] = 50 + 302 * np.pi
            vdy[0, i] = -0.2 * np.pi
        else:
            desired_f[0, i] = 50 + 302 * np.pi - 302 * (tt[i] - 3 * np.pi)
            vdy[0, i] = -0.2 * np.pi + 0.2 * (tt[i] - 3 * np.pi)
    #plt.plot(vdy[0, :])
    #plt.show()

    ######## change here ########
    #pos = 0
    #acce = 0
    #v_l_pre = 0
    #########

    # output force
    force_out = np.zeros((6, n))

    # pos of eef
    eef_pos = np.zeros((3, n))
    eef_orien = np.zeros((4, n))

    # timestamp
    tim = np.zeros((1, n))

    # referece height of coupon that achieves desired force
    z_ref = 0.89226
    x_ref = 0
    y_ref = -1.35626

    # determine if robot has reached the initial configuration init
    tag = True
    while tag:
        res, state = egm.receive_from_robot(.1)
        if res:
            #print sum(np.rad2deg(state.joint_angles))- sum(init)
            if np.fabs(sum(np.rad2deg(state.joint_angles)) - sum(init)) < 1e-3:
                tag = False

    time.sleep(1)
    print '--------start force control--------'

    ### drain the force sensor buffer ###
    for i in range(1000):
        flag, ft = netft.read_ft_streaming(.1)

    ### drain the EGM buffer ###
    for i in range(1000):
        res, state = egm.receive_from_robot(.1)

    flag, ft = netft.read_ft_streaming(.1)
    F0 = ft[5]
    print F0
    time.sleep(2)

    cnt = 0
    while cnt < n:
        # receive EGM feedback
        res, state = egm.receive_from_robot(.1)

        if not res:
            continue

# forward kinematics
        pose = rox.fwdkin(abb_robot, state.joint_angles)
        R = pose.R
        flag, ft = netft.read_ft_streaming(.1)

        # map torque/force from sensor frame to base frame
        T_b = np.matmul(R, ft[0:3])
        F_b = np.matmul(R, ft[3:None])
        F = F_b[2]  # first three torques and then three forces

        Fd0 = 50
        # do not start motion in y until robot barely touches coupon (50 N)
        if F < Fd0 - 0.1 and cnt == 0:
            z = pose.p[2]
            # account for the robot base and length of tool
            z = z + 0.026 - 0.18 + 0.00353
            # will shake if gain too large, here use 0.0002
            v_z = Kp * 10 * (F - Fd0)
            v_l = np.array([0, 0, v_z])
        else:
            # deadzone for Fx
            if abs(F_b[0]) < 30:
                F_x = 0

            # deadzone for Fy
            if abs(F_b[1]) < 30:
                F_y = 0

            v_x = Kp / 2 * (F_x - 0)
            v_y = vdy[0, cnt] + Kp / 2 * (F_y - 0)
            z = pose.p[2]

            #print desired_f[0, cnt]
            # account for the robot base and length of tool
            z = z + 0.026 - 0.18 + 0.00353
            v_z = Kp * (F - desired_f[0, cnt])
            v_l = np.array([v_x, v_y, v_z])

            force_out[:, cnt] = np.concatenate((T_b, F_b), axis=0)

            eef_pos[:, cnt] = pose.p

            quat = rox.R2q(R)
            eef_orien[:, cnt] = quat
            tim[0, cnt] = time.time()

            cnt += 1

        print F

        #### change here ####
        #pos = pos + v_l[2]*delta_t
        #acce = (v_l[2]-v_l_pre)/delta_t

        # formalize entire twist
        spatial_velocity_command = np.array([0, 0, 0, v_l[0], v_l[1], v_l[2]])

        # emergency stop if force too large
        if abs(F) > 2000:
            spatial_velocity_command = np.array([0, 0, 0, 0, 0, 0])
            print "force too large, stop..."

        # solve for joint velocity
        # Jacobian inverse
        #J = rox.robotjacobian(abb_robot, state.joint_angles)
        #joints_vel = np.linalg.pinv(J).dot(spatial_velocity_command)

        # QP
        joints_vel = quadprog.compute_joint_vel_cmd_qp(
            state.joint_angles, spatial_velocity_command)

        # commanded joint position setpoint to EGM
        q_c = state.joint_angles + joints_vel * delta_t

        egm.send_to_robot(q_c)

        ####### change here ########
        #v_l_pre = v_l[2]

        #if t_new - t_pre < delta_t:
        #    time.sleep(delta_t - t_new + t_pre)

        if cnt == n:
            csv_dat = np.hstack((desired_f.T, vdy.T, force_out.T, eef_pos.T,
                                 eef_orien.T, tim.T))
            np.savetxt(
                'trap_force_trap_motion_020520.csv',
                csv_dat,
                fmt='%6.5f',
                delimiter=',')  #, header='desired joint, optimal input')
            print "done"
def first_half(input, num_iter):

    # stop the active RAPID program
    rapid.stop()
    # reset the pointer to main
    rapid.resetpp()
    print 'first half'
    print 'reset PP to main'
    time.sleep(2)
    # start the RAPID program
    rapid.start()

    # determine if robot has reached the initial configuration
    tag = True
    while tag:
        res, state = egm.receive_from_robot(.1)
        if res:
            #print np.fabs(sum(np.rad2deg(state.joint_angles)) - sum(init))
            if np.fabs(sum(state.joint_angles) - sum(init)) < 1e-4:
                tag = False

    time.sleep(1)

    # out is composed of 5 velocity and 1 force in z
    out = np.zeros((6, n))
    force_out = np.zeros((1, n))
    # pos of eef
    eef_pos = np.zeros((3, n))
    # orientation of eef (quaternion)
    eef_orien = np.zeros((4, n))
    # timestamp
    tim = np.zeros((1, n))

    q_hat = np.zeros((6, 1))
    qhat_dot = np.zeros((6, 1))
    # for observer k should be symmetric and positive definite
    kl = 0.5

    ### drain the EGM buffer ###
    for i in range(1000):
        res, state = egm.receive_from_robot(.1)

    time.sleep(2)

    cnt = 0
    step_done = False
    while cnt < n:
        # receive EGM feedback
        res, state = egm.receive_from_robot(.1)

        if not res:
            continue

        q_new = np.deg2rad(state.joint_angles)

        # step-over
        if not step_done:
            print '--------start step-over motion--------'
            # do step-over of 0.25 mm in +x in world frame
            # current eef pose
            pose_cur = rox.fwdkin(abb_robot, q_new)
            pose_cur.p[0] = pose_cur.p[0] + num_iter * 2 * step_over
            # solve for inverse kinematics and pick the one that is closest to current configuration
            sol = rox.robot6_sphericalwrist_invkin(abb_robot, pose_cur, q_new)
            try:
                tar = sol[0]  # raise exception if no solution
            except:
                tar = q_new
            # move to the new position after step-over
            egm.send_to_robot(tar)
            step_done = True

            q_new = tar
            ### drain the EGM buffer, or it will use the obsolete EGM feedback###
            for i in range(1000):
                res, state = egm.receive_from_robot(.1)

            print '--------step-over motion done--------'
            time.sleep(2)

# forward kinematics to calculate current position of eef
        pose = rox.fwdkin(abb_robot, q_new)
        R = pose.R
        Fd0 = 50
        if pose.p[2] >= pos_obj:
            F = 0
            v_z = Kp * 30 * (F - Fd0)  #-Ki*(z-z_ref)-Kd*acce#-Ki*pos
            # formalize entire twist
            spatial_velocity_command = np.array([0, 0, 0, 0, 0, v_z])
        else:
            F = -k * (pose.p[2] - pos_obj)
            if F < Fdz - 0.5 and cnt == 0:
                v_z = Kp * (F - Fdz)  #-Ki*(z-z_ref)-Kd*acce#-Ki*pos
                # formalize entire twist
                spatial_velocity_command = np.array([0, 0, 0, 0, 0, v_z])
            else:
                # formalize entire twist
                # nominal input composed of F and v
                spatial_velocity_command = input[:, cnt]

                v_z = Kp * (F - spatial_velocity_command[5])
                # nominal input only contains v
                spatial_velocity_command[5] = v_z
                # approximation of joint velocity
                #q_new = np.deg2rad(state.joint_angles)

                ######### change here, use observer instead of approximation to calculate q_dot ########
                if cnt == 0:
                    q_hat = q_new
                qhat_dot = joints_vel + kl * (q_new - q_hat)

                #q_dot = (q_new - q_pre)/delta_t

                J = rox.robotjacobian(abb_robot, q_new)
                # estimate velocity
                v_est = J.dot(qhat_dot)
                # formalize the nominal output composed of F and v
                out[:, cnt] = np.append(v_est[0:5], F)

                force_out[:, cnt] = F

                eef_pos[:, cnt] = pose.p

                R = pose.R
                quat = rox.R2q(R)
                eef_orien[:, cnt] = quat
                tim[0, cnt] = time.time()
                cnt = cnt + 1

        print F
        # solve for joint velocity
        # Jacobian inverse
        J = rox.robotjacobian(abb_robot, q_new)
        joints_vel = np.linalg.pinv(J).dot(spatial_velocity_command)

        # QP
        #joints_vel = quadprog.compute_joint_vel_cmd_qp(q_new, spatial_velocity_command)

        # commanded joint position setpoint to EGM
        q_c = q_new + joints_vel * delta_t
        egm.send_to_robot(q_c)
        # joint angle at previous time step

        ######### change here ########
        q_hat = q_hat + qhat_dot * delta_t

    error = out - desired
    # flip the error
    err_flip = np.fliplr(error)
    print np.linalg.norm(error, 'fro')

    return out, err_flip, np.linalg.norm(
        error, 'fro'), force_out, eef_pos, eef_orien, tim
 def compute_fk(self, joint = None):
     
     if joint is None:
         joint=self.moveit_group.get_current_joint_values()
     
     return rox.fwdkin(self.rox_robot, joint)
Beispiel #22
0
    def _generate_movel_trajectory(self, robot_client, rox_robot,
                                   initial_q_or_T, T_final, speed_perc,
                                   q_final_seed):

        JointTrajectoryWaypoint = RRN.GetStructureType(
            "com.robotraconteur.robotics.trajectory.JointTrajectoryWaypoint",
            robot_client)
        JointTrajectory = RRN.GetStructureType(
            "com.robotraconteur.robotics.trajectory.JointTrajectory",
            robot_client)

        dof = len(rox_robot.joint_type)

        if isinstance(initial_q_or_T, rox.Robot):
            T_initial = initial_q_or_T
            q_initial = invkin.update_ik_info3(rox_robot, initial_q_or_T,
                                               np.random.randn((dof, )))
        else:
            q_initial = initial_q_or_T
            T_initial = rox.fwdkin(rox_robot, q_initial)

        p_diff = T_final.p - T_initial.p
        R_diff = np.transpose(T_initial.R) @ T_final.R
        k, theta = rox.R2rot(R_diff)

        N_samples_translate = np.linalg.norm(p_diff) * 100.0
        N_samples_rot = np.abs(theta) * 0.25 * 180 / np.pi

        N_samples_translate = np.linalg.norm(p_diff) * 100.0
        N_samples_rot = np.abs(theta) * 0.2 * 180 / np.pi

        N_samples = math.ceil(np.max((N_samples_translate, N_samples_rot, 20)))

        ss = np.linspace(0, 1, N_samples)

        if q_final_seed is None:
            q_final, res = invkin.update_ik_info3(rox_robot, T_final,
                                                  q_initial)
        else:
            q_final, res = invkin.update_ik_info3(rox_robot, T_final,
                                                  q_final_seed)
        #assert res, "Inverse kinematics failed"

        way_pts = np.zeros((N_samples, dof), dtype=np.float64)
        way_pts[0, :] = q_initial
        for i in range(1, N_samples):
            s = float(i) / (N_samples - 1)
            R_i = T_initial.R @ rox.rot(k, theta * s)
            p_i = (p_diff * s) + T_initial.p
            T_i = rox.Transform(R_i, p_i)
            #try:
            #    q, res = invkin.update_ik_info3(rox_robot, T_i, way_pts[i-1,:])
            #except AssertionError:
            ik_seed = (1.0 - s) * q_initial + s * q_final
            q, res = invkin.update_ik_info3(rox_robot, T_i, ik_seed)
            assert res, "Inverse kinematics failed"
            way_pts[i, :] = q

        vlims = rox_robot.joint_vel_limit
        alims = rox_robot.joint_acc_limit

        path = ta.SplineInterpolator(ss, way_pts)
        pc_vel = constraint.JointVelocityConstraint(vlims * 0.95 * speed_perc /
                                                    100.0)
        pc_acc = constraint.JointAccelerationConstraint(alims)

        instance = algo.TOPPRA([pc_vel, pc_acc],
                               path,
                               parametrizer="ParametrizeConstAccel")
        jnt_traj = instance.compute_trajectory()

        if jnt_traj is None:
            return None

        ts_sample = np.linspace(0, jnt_traj.duration, N_samples)
        qs_sample = jnt_traj(ts_sample)

        waypoints = []

        for i in range(N_samples):
            wp = JointTrajectoryWaypoint()
            wp.joint_position = qs_sample[i, :]
            wp.time_from_start = ts_sample[i]
            waypoints.append(wp)

        traj = JointTrajectory()
        traj.joint_names = rox_robot.joint_names
        traj.waypoints = waypoints

        return traj