コード例 #1
0
    def runTest(self):

        q_1 = np.array([0.63867877, 0.52251797, 0.56156573, 0.06089615])
        q_2 = np.array([0.35764716, 0.61051424, 0.11540801, 0.69716703])
        R_t = rox.q2R(q_1).dot(rox.q2R(q_2))
        q_t = rox.R2q(R_t)

        q = rox.quatproduct(q_1).dot(q_2).reshape((4, ))

        np.testing.assert_allclose(q, q_t, atol=1e-6)
    def plan_pickup_raise(self, goal=None):

        #TODO: check change state and target
        self._begin_step(goal)
        try:
            rospy.loginfo("Begin pickup_raise for payload %s",
                          self.current_payload)

            #Just use gripper position for now, think up a better way in future
            object_target = self.tf_listener.lookupTransform(
                "world", "vacuum_gripper_tool", rospy.Time(0))
            pose_target2 = copy.deepcopy(object_target)
            pose_target2.p[2] += 0.3
            pose_target2.p = np.array([-0.02285, -1.840, 1.0])
            pose_target2.R = rox.q2R([0.0, 0.707, 0.707, 0.0])

            path = self._plan(pose_target2, config="transport_panel_short")

            self.state = "plan_pickup_raise"
            self.plan_dictionary['pickup_raise'] = path
            rospy.loginfo("Finish pickup_raise for payload %s",
                          self.current_target)
            self._step_complete(goal)
        except Exception as err:
            traceback.print_exc()
            self._step_failed(err, goal)
 def get_object_gripper_target_pose(self, key):
     
     object_pose=self.get_object_pose(key)
             
     (trans1,rot1) = self.listener.lookupTransform(key, key + "_gripper_target", rospy.Time(0))
     tag_rel_pose=rox.Pose(rox.q2R([rot1[3], rot1[0], rot1[1], rot1[2]]), trans1)
     return object_pose * tag_rel_pose
コード例 #4
0
    def runTest(self):
        rot_t=np.array([[-0.5057639,-0.1340537,0.8521928], \
                      [0.6456962,-0.7139224,0.2709081], \
                      [0.5720833,0.6872731,0.4476342]])

        q = np.array([0.2387194, 0.4360402, 0.2933459, 0.8165967])
        rot = rox.q2R(q)
        np.testing.assert_allclose(rot, rot_t, atol=1e-6)
コード例 #5
0
    def convert_to_world(self, robot_state, robot_name):
        #get robot pose
        robot_model = self.w.get_models(robot_name)
        robot_pose = robot_model.world_pose.PeekInValue()[0]
        robot_R = q2R(np.array(robot_pose['orientation'].tolist()[0]))
        robot_P = np.array(robot_pose['position'].tolist()[0]).reshape((3, 1))
        robot_H = np.vstack((np.hstack((robot_R, robot_P)), self.H4))

        #get robot ee pose
        ee_R = q2R(
            np.array(
                robot_state.InValue.kin_chain_tcp['orientation'].tolist()[0]))
        ee_P = np.array(
            robot_state.InValue.kin_chain_tcp['position'].tolist()[0]).reshape(
                (3, 1))
        ee_H = np.vstack((np.hstack((ee_R, ee_P)), self.H4))

        ee_world_H = np.dot(robot_H, ee_H)
        k, ee_world_angle = R2rot(ee_world_H[:3, :3])

        return ee_world_H[0][-1], ee_world_H[1][-1], ee_world_angle
コード例 #6
0
 def release_suction_cups(self):
     ################## RELEASE SUCTION CUPS AND LIFT THE GRIPPER ################## 	
     rospy.loginfo("============ Release Suction Cups...")
     self.controller_commander.set_controller_mode(self.controller_commander.MODE_AUTO_TRAJECTORY, 0.7, [])
     self.rapid_node.set_digital_io("Vacuum_enable", 0)
     #g = ProcessStepGoal('plan_place_set_second_step',"")
     #process_client.send_goal(g)
     time.sleep(0.5)
 
     Cur_Pose = self.controller_commander.get_current_pose_msg()
     rot_current = [Cur_Pose.pose.orientation.w, Cur_Pose.pose.orientation.x,Cur_Pose.pose.orientation.y,Cur_Pose.pose.orientation.z]
     trans_current = [Cur_Pose.pose.position.x,Cur_Pose.pose.position.y,Cur_Pose.pose.position.z]
     pose_target2 = rox.Transform(rox.q2R([rot_current[0], rot_current[1], rot_current[2], rot_current[3]]), trans_current)
     pose_target2.p[2] += 0.25
 
     rospy.loginfo("============ Lift gripper...")
     #TODO: Change to trajopt planning
     self.controller_commander.compute_cartesian_path_and_move(pose_target2, avoid_collisions=False)
    def plan_reset_position(self, goal=None):

        self._begin_step(goal)
        try:
            Q = [0.02196692, -0.10959773, 0.99369529, -0.00868731]
            P = [1.8475985, -0.04983688, 0.82486047]

            rospy.loginfo("Planning to reset position")
            self.state = "reset_position"
            self.process_index = 0

            pose_target = rox.Transform(rox.q2R(Q), np.copy(P))

            path = self._plan(pose_target, config="reposition_robot")

            self.plan_dictionary['reset_position'] = path

            self._step_complete(goal)

        except Exception as err:
            traceback.print_exc()
            self._step_failed(err, goal)
    def plan_pickup_raise(self):

        #TODO: check change state and target

        rospy.loginfo("Begin pickup_raise for payload %s",
                      self.current_payload)

        #Just use gripper position for now, think up a better way in future
        object_target = self.tf_listener.lookupTransform(
            "world", "vacuum_gripper_tool", rospy.Time(0))
        pose_target2 = copy.deepcopy(object_target)
        pose_target2.p[2] += 0.8
        pose_target2.p = np.array([-0.02285, -1.840, 1.0])
        pose_target2.R = rox.q2R([0.0, 0.707, 0.707, 0.0])

        path = self.controller_commander.compute_cartesian_path(
            pose_target2, avoid_collisions=False)

        self.state = "plan_pickup_raise"
        self.plan_dictionary['pickup_raise'] = path
        rospy.loginfo("Finish pickup_raise for payload %s",
                      self.current_target)
        self.publish_process_state()
コード例 #9
0
    def robot_info_to_rox_robot(self, robot_info, chain_number):
        _check_list(robot_info.chains,
                    f"could not find kinematic chain number {chain_number}")
        if chain_number >= len(robot_info.chains):
            raise RR.InvalidArgumentException(
                f"invalid kinematic chain number {chain_number}")

        chain = robot_info.chains[chain_number]
        joint_count = len(chain.joint_numbers)
        for i in range(1, joint_count):
            if chain.joint_numbers[i - 1] >= chain.joint_numbers[i]:
                raise RR.InvalidArgumentException(
                    f"joint numbers must be increasing in chain number {chain_number}"
                )

            if chain.joint_numbers[i] >= len(robot_info.joint_info):
                raise RR.InvalidArgumentException(
                    f"joint number out of bounds in chain number {chain_number}"
                )

        _check_list(chain.H,
                    f"invalid shape for H in chain number {chain_number}",
                    joint_count)
        _check_list(chain.P,
                    f"invalid shape for P in chain number {chain_number}",
                    joint_count + 1)

        H = np.zeros((3, joint_count), dtype=np.float64)
        for i in range(joint_count):
            H[0, i] = chain.H[i]["x"]
            H[1, i] = chain.H[i]["y"]
            H[2, i] = chain.H[i]["z"]

        P = np.zeros((3, joint_count + 1), dtype=np.float64)
        for i in range(joint_count + 1):
            P[0, i] = chain.P[i]["x"]
            P[1, i] = chain.P[i]["y"]
            P[2, i] = chain.P[i]["z"]

        joint_type = [0] * joint_count
        joint_lower_limit = np.zeros((joint_count, ), dtype=np.float64)
        joint_upper_limit = np.zeros((joint_count, ), dtype=np.float64)
        joint_vel_limit = np.zeros((joint_count, ), dtype=np.float64)
        joint_acc_limit = np.zeros((joint_count, ), dtype=np.float64)
        joint_names = [None] * joint_count

        for i in range(joint_count):
            j = robot_info.joint_info[i]
            if j.joint_type == 1:
                # Revolute joint
                joint_type[i] = 0
            elif j.joint_type == 3:
                # Prismatic joint
                joint_type[i] = 1
            else:
                raise RR.InvalidArgumentException(
                    f"invalid joint type: {j.joint_type}")

            if j.joint_limits is None:
                raise RR.InvalidArgumentException(
                    "joint_limits must not be null")
            joint_lower_limit[i] = j.joint_limits.lower
            joint_upper_limit[i] = j.joint_limits.upper
            joint_vel_limit[i] = j.joint_limits.velocity
            joint_acc_limit[i] = j.joint_limits.acceleration
            if j.joint_identifier is not None:
                joint_names[i] = j.joint_identifier.name
            else:
                joint_names[i] = ""

        root_link_name = None
        if chain.link_identifiers is not None and len(
                chain.link_identifiers
        ) > 0 and chain.link_identifiers[0] is not None:
            root_link_name = chain.link_identifiers[0].name

        tip_link_name = None
        if chain.flange_identifier is not None:
            tip_link_name = chain.flange_identifier.name

        flange_q = chain.flange_pose["orientation"]
        flange_p = chain.flange_pose["position"]

        r_tool = rox.q2R(self._node.NamedArrayToArray(flange_q).flatten())
        p_tool = np.array(self._node.NamedArrayToArray(flange_p).flatten())

        rox_robot = rox.Robot(H, P, joint_type, joint_lower_limit,
                              joint_upper_limit, joint_vel_limit,
                              joint_acc_limit, None, r_tool, p_tool,
                              joint_names, root_link_name, tip_link_name)

        return rox_robot
コード例 #10
0
 def tf_lookup(target_frame, source_frame):
     (trans1, rot1) = listener.lookupTransform(target_frame, source_frame,
                                               rospy.Time(0))
     return rox.Transform(rox.q2R([rot1[3], rot1[0], rot1[1], rot1[2]]),
                          trans1)
def main():

    #Start timer to measure execution time
    t1 = time.time()

    #Subscribe to controller_state
    rospy.Subscriber("controller_state", controllerstate, callback)
    last_ros_image_stamp = rospy.Time(0)

    #Force-torque
    if not "disable-ft" in sys.argv:
        ft_threshold1 = ft_threshold
    else:
        ft_threshold1 = []

    #Initialize ros node of this process
    rospy.init_node('Placement_DJ_1', anonymous=True)

    #print "============ Starting setup"

    listener = PayloadTransformListener()
    rapid_node = rapid_node_pkg.RAPIDCommander()
    controller_commander = controller_commander_pkg.ControllerCommander()

    object_commander = ObjectRecognitionCommander()
    robot = urdf.robot_from_parameter_server()

    #subscribe to Gripper camera node for image acquisition
    ros_gripper_2_img_sub = rospy.Subscriber(
        '/gripper_camera_2/image', Image,
        object_commander.ros_raw_gripper_2_image_cb)
    ros_gripper_2_trigger = rospy.ServiceProxy(
        '/gripper_camera_2/continuous_trigger', CameraTrigger)

    #Set controller command mode
    controller_commander.set_controller_mode(
        controller_commander.MODE_AUTO_TRAJECTORY, 0.4, ft_threshold_place, [])
    time.sleep(0.5)

    #Set Location above the panel where the end effector goes first (in the world/base frame) Ideal location of panel.
    tran = np.array([2.20120663258, 1.2145882986, 0.0798989466944])
    rot = np.array([[-0.9971185, 0.0071821, 0.0755188],
                    [0.0056502, 0.9997743, -0.0204797],
                    [-0.0756488, -0.0199939, -0.9969341]])
    pose_target2 = rox.Transform(rot, tran)
    pose_target3 = copy.deepcopy(pose_target2)
    pose_target2.p[2] += 0.35  #20 cm above ideal location of panel

    #Execute movement to set location
    print "Executing initial path"
    controller_commander.compute_cartesian_path_and_move(
        pose_target2, avoid_collisions=False)

    #Initilialize aruco boards and parameters
    aruco_dict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_ARUCO_ORIGINAL)
    parameters = cv2.aruco.DetectorParameters_create()
    parameters.cornerRefinementMethod = cv2.aruco.CORNER_REFINE_SUBPIX
    board_ground = cv2.aruco.GridBoard_create(5, 4, 0.04, 0.01, aruco_dict, 20)
    board_panel = cv2.aruco.GridBoard_create(8, 3, 0.025, 0.0075, aruco_dict,
                                             50)
    #    tag_ids=["vacuum_gripper_marker_1","leeward_mid_panel_marker_1", "aligner_ref_1", "aligner_ref_2", "aligner_ref_3", "aligner_ref_4"]
    #    boards=[gripper_board, panel_board, ref1, ref2, ref3, ref4]

    #Initialize camera intrinsic parameters #18285636_10_05_2018_5_params
    CamParam = CameraParams(2342.561249990927, 1209.151959040735,
                            2338.448312671424, 1055.254852652610, 1.0,
                            -0.014840837133389, -0.021008029929566,
                            3.320024219653553e-04, -0.002187550225028,
                            -0.025059986937316)

    #Subscribe tp controller state. Again?
    rospy.Subscriber("controller_state", controllerstate, callback)

    UV = np.zeros([74, 8])
    P = np.zeros([74, 3])

    #Load object points ground tag in panel tag coordinate system from mat file
    loaded_object_points_ground_in_panel_system_stage_1 = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat'
    )['object_points_ground_in_panel_tag_system']
    loaded_object_points_ground_in_panel_system_stage_2 = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['object_points_ground_in_panel_tag_system']
    loaded_object_points_ground_in_panel_system = loaded_object_points_ground_in_panel_system_stage_1

    #focal length in pixel units, this number is averaged values from f_hat for x and y
    f_hat_u = 2342.561249990927  #2282.523358266698#2281.339593273153 #2446.88
    f_hat_v = 2338.448312671424  #2280.155828279608
    #functions like a gain, used with velocity to track position
    dt = 0.1
    du = 100.0
    dv = 100.0
    dutmp = 100.0
    dvtmp = 100.0
    TimeGain = [0.1, 0.1, 0.1]

    du_array = []
    dv_array = []
    dx_array = []
    iteration = 0
    stage = 1
    step_ts = 0.004

    Kc = 0.0002
    time_save = []
    FTdata_save = []
    Tran_z = np.array([[0, 0, -1], [0, -1, 0], [1, 0, 0]])
    Vec_wrench = 100 * np.array([
        0.019296738361905, 0.056232033265447, 0.088644197659430,
        0.620524934626544, -0.517896661195076, 0.279323567303444,
        -0.059640563813256, 0.631460085138371, -0.151143175570223,
        -6.018321330845553
    ]).transpose()

    FTdata_0 = FTdata
    T = listener.lookupTransform("base", "link_6", rospy.Time(0))
    rg = 9.8 * np.matmul(
        np.matmul(T.R, Tran_z).transpose(),
        np.array([0, 0, 1]).transpose())
    A1 = np.hstack([
        rox.hat(rg).transpose(),
        np.zeros([3, 1]),
        np.eye(3),
        np.zeros([3, 3])
    ])
    A2 = np.hstack(
        [np.zeros([3, 3]),
         rg.reshape([3, 1]),
         np.zeros([3, 3]),
         np.eye(3)])
    A = np.vstack([A1, A2])
    FTdata_0est = np.matmul(A, Vec_wrench)
    FTread_array = []
    FTdata_array = []
    t_now_array = []

    controller_commander.set_controller_mode(
        controller_commander.MODE_AUTO_TRAJECTORY, 1.0, [], [])
    #while (pose_target2.p[2] > 0.185):
    #    while ((du>10) | (dv>10) | (pose_target2.p[2] > 0.172)):    #try changing du and dv to lower values(number of iterations increases)
    while (
        (du > 1) | (dv > 1) and (iteration < 125)
    ):  #try changing du and dv to lower values(number of iterations increases)
        #    while ((np.max(np.abs(dutmp))>0.5) | (np.max(np.abs(dvtmp))>0.5)):    #try changing du and dv to lower values(number of iterations increases)
        iteration += 1
        t_now_array.append(time.time())

        #Go to stage2 of movement(mostly downward z motion)
        if ((du < 2) and (dv < 2) and (stage == 1)):
            #Save pose of marker boards after the iterations end(while in the final hovering position above nest)
            #Get pose of both ground and panel markers from detected corners
            retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(
                objPoints_ground, imgPoints_ground, CamParam.camMatrix,
                CamParam.distCoeff)
            Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
            retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(
                objPoints_panel, imgPoints_panel, CamParam.camMatrix,
                CamParam.distCoeff)
            Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)

            print "============== Observed Pose difference in hovering position"
            observed_tvec_difference = tvec_ground - tvec_panel
            observed_rvec_difference = rvec_ground - rvec_panel
            print "tvec difference: ", observed_tvec_difference
            print "rvec differnece: ", observed_rvec_difference

            #Load ideal pose differnece information from file
            loaded_rvec_difference = loadmat(
                '/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat'
            )['rvec_difference']
            loaded_tvec_difference = loadmat(
                '/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat'
            )['tvec_difference']

            print "============== Ideal Pose difference in hovering position"
            print "tvec difference: ", loaded_tvec_difference
            print "rvec differnece: ", loaded_rvec_difference

            tvec_difference_Above_Nest = loaded_tvec_difference - observed_tvec_difference
            rvec_difference_Above_Nest = loaded_rvec_difference - observed_rvec_difference

            print "============== Difference in pose difference in hovering position"
            print "tvec difference: ", tvec_difference_Above_Nest
            print "rvec differnece: ", rvec_difference_Above_Nest

            #Saving pose information to file
            filename_pose1 = "/home/rpi-cats/Desktop/DJ/Code/Data/Above_Nest_Pose_" + str(
                t1) + ".mat"
            scipy.io.savemat(filename_pose1,
                             mdict={
                                 'tvec_ground_Above_Nest':
                                 tvec_ground,
                                 'tvec_panel_Above_Nest':
                                 tvec_panel,
                                 'Rca_ground_Above_Nest':
                                 Rca_ground,
                                 'Rca_panel_Above_Nest':
                                 Rca_panel,
                                 'tvec_difference_Above_Nest':
                                 tvec_ground - tvec_panel,
                                 'rvec_difference_Above_Nest':
                                 rvec_ground - rvec_panel,
                                 'loaded_tvec_difference':
                                 loaded_tvec_difference,
                                 'loaded_rvec_difference':
                                 loaded_rvec_difference,
                                 'observed_tvec_difference':
                                 observed_tvec_difference,
                                 'observed_rvec_difference':
                                 observed_rvec_difference
                             })

            raw_input("Confirm Stage 2")
            stage = 2
            #			dt=0.02
            loaded_object_points_ground_in_panel_system = loaded_object_points_ground_in_panel_system_stage_2
        #print pose_target2.p[2]

        #Print current robot pose at the beginning of this iteration
        Cur_Pose = controller_commander.get_current_pose_msg()
        print "============ Current Robot Pose"
        print Cur_Pose

        #Read new image
        last_ros_image_stamp = object_commander.ros_image_stamp
        try:
            ros_gripper_2_trigger.wait_for_service(timeout=0.1)
            ros_gripper_2_trigger(False)
        except:
            pass
        wait_count = 0
        while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp:
            if wait_count > 50:
                raise Exception("Image receive timeout")
            time.sleep(0.25)
            wait_count += 1
        result = object_commander.ros_image
        #Save
        #        filename = "Acquisition3_%d.jpg" % (time.time())
        #        scipy.misc.imsave(filename, result)
        #Display
        #        cv2.namedWindow('Aqcuired Image',cv2.WINDOW_NORMAL)
        #        cv2.imshow('Acquired Image',result)
        #        cv2.waitKey(1)

        #Detect tag corners in aqcuired image using aruco
        corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
            result, aruco_dict, parameters=parameters)
        frame_with_markers_and_axis = result

        #Sort corners and ids according to ascending order of ids
        corners_original = copy.deepcopy(corners)
        ids_original = np.copy(ids)
        sorting_indices = np.argsort(ids_original, 0)
        ids_sorted = ids_original[sorting_indices]
        ids_sorted = ids_sorted.reshape([len(ids_original), 1])
        combined_list = zip(np.ndarray.tolist(ids.flatten()), corners_original)
        #        print "combined_list:",combined_list
        combined_list.sort()
        corners_sorted = [x for y, x in combined_list]
        ids = np.copy(ids_sorted)
        corners = copy.deepcopy(corners_sorted)

        #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image)
        false_ids_ind = np.where(ids > 73)
        mask = np.ones(ids.shape, dtype=bool)
        mask[false_ids_ind] = False
        ids = ids[mask]
        corners = np.array(corners)
        corners = corners[mask.flatten(), :]
        corners = list(corners)

        #Define object and image points of both tags
        objPoints_ground, imgPoints_ground = aruco.getBoardObjectAndImagePoints(
            board_ground, corners, ids)
        objPoints_panel, imgPoints_panel = aruco.getBoardObjectAndImagePoints(
            board_panel, corners, ids)
        objPoints_ground = objPoints_ground.reshape(
            [objPoints_ground.shape[0], 3])
        imgPoints_ground = imgPoints_ground.reshape(
            [imgPoints_ground.shape[0], 2])
        objPoints_panel = objPoints_panel.reshape(
            [objPoints_panel.shape[0], 3])
        imgPoints_panel = imgPoints_panel.reshape(
            [imgPoints_panel.shape[0], 2])

        #Get pose of both ground and panel markers from detected corners
        retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(
            objPoints_ground, imgPoints_ground, CamParam.camMatrix,
            CamParam.distCoeff)
        Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
        retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(
            objPoints_panel, imgPoints_panel, CamParam.camMatrix,
            CamParam.distCoeff)
        Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)

        frame_with_markers_and_axis = cv2.aruco.drawAxis(
            frame_with_markers_and_axis, CamParam.camMatrix,
            CamParam.distCoeff, Rca_ground, tvec_ground, 0.2)
        frame_with_markers_and_axis = cv2.aruco.drawAxis(
            frame_with_markers_and_axis, CamParam.camMatrix,
            CamParam.distCoeff, Rca_panel, tvec_panel, 0.2)

        rvec_all_markers_ground, tvec_all_markers_ground, _ = aruco.estimatePoseSingleMarkers(
            corners[0:20], 0.04, CamParam.camMatrix, CamParam.distCoeff)
        rvec_all_markers_panel, tvec_all_markers_panel, _ = aruco.estimatePoseSingleMarkers(
            corners[20:45], 0.025, CamParam.camMatrix, CamParam.distCoeff)
        tvec_all = np.concatenate(
            (tvec_all_markers_ground, tvec_all_markers_panel), axis=0)
        for i_ids, i_corners, i_tvec in zip(ids, corners, tvec_all):
            #print 'i_corners',i_corners,i_corners.reshape([1,8])
            UV[i_ids, :] = i_corners.reshape(
                [1, 8])  #np.average(i_corners, axis=1)
            P[i_ids, :] = i_tvec

        #used to find the height of the tags and the delta change of height, z height at desired position
        Z = 1 * P[20:40,
                  2]  #- [0.68184539, 0.68560932, 0.68966803, 0.69619578])

        #check if all tags detected
        if retVal_ground != 0 and retVal_panel != 0:
            dutmp = []
            dvtmp = []

            #Pixel estimates of the ideal ground tag location
            reprojected_imagePoints_ground_2, jacobian2 = cv2.projectPoints(
                loaded_object_points_ground_in_panel_system.transpose(),
                rvec_panel, tvec_panel, CamParam.camMatrix, CamParam.distCoeff)
            reprojected_imagePoints_ground_2 = reprojected_imagePoints_ground_2.reshape(
                [reprojected_imagePoints_ground_2.shape[0], 2])
            #            print "Image Points Ground:", imgPoints_ground
            #            print "Reprojected Image Points Ground2:", reprojected_imagePoints_ground_2
            #            print "Reprojectoin error:",imgPoints_ground-reprojected_imagePoints_ground_2
            #            print "Average Reprojectoin error: ",np.mean(imgPoints_ground-reprojected_imagePoints_ground_2, axis=0)

            #print "Reprojected Image Points Ground2 type:", type(reprojected_imagePoints_ground_2)

            #plot image points for ground tag from corner detection and from re-projections
            for point1, point2 in zip(
                    imgPoints_ground,
                    np.float32(reprojected_imagePoints_ground_2)):
                cv2.circle(frame_with_markers_and_axis, tuple(point1), 5,
                           (0, 0, 255), 3)
                cv2.circle(frame_with_markers_and_axis, tuple(point2), 5,
                           (255, 0, 0), 3)

#            cv2.namedWindow('Aqcuired Image',cv2.WINDOW_NORMAL)
#            cv2.imshow('Acquired Image',frame_with_markers_and_axis)
#            cv2.waitKey(1)
#Save
            filename_image = "/home/rpi-cats/Desktop/DJ/Code/Images/Acquisition_" + str(
                t1) + "_" + str(iteration) + ".jpg"
            scipy.misc.imsave(filename_image, frame_with_markers_and_axis)

            #Go through a particular point in all tags to build the complete Jacobian
            for ic in range(4):
                #uses first set of tags, numbers used to offset camera frame, come from camera parameters
                #UV_target = np.vstack([UV[9:13,2*ic]-1209.151959040735,UV[9:13,2*ic+1]-1055.254852652610]).T - UV_shift[:,(2*ic):(2*ic+2)]		#shift corner estimates to the image centers. Necessary for the image jacobian to work.
                reprojected_imagePoints_ground_2 = np.reshape(
                    reprojected_imagePoints_ground_2, (20, 8))
                UV_target = np.vstack([
                    reprojected_imagePoints_ground_2[:, 2 * ic] -
                    1209.151959040735,
                    reprojected_imagePoints_ground_2[:, 2 * ic + 1] -
                    1055.254852652610
                ]).T
                uc = UV_target[:, 0]
                vc = UV_target[:, 1]

                #                print 'UV_target', UV_target
                UV_current = np.vstack([
                    UV[20:40, 2 * ic] - 1209.151959040735,
                    UV[20:40, 2 * ic + 1] - 1055.254852652610
                ]).T
                #find difference between current and desired tag difference
                delta_UV = UV_target - UV_current
                #                print 'delta_UV: ',ic, delta_UV
                dutmp.append(np.mean(delta_UV[:, 0]))
                dvtmp.append(np.mean(delta_UV[:, 1]))
                for tag_i in range(20):
                    if tag_i == 0:
                        J_cam_tmp = 1.0 * np.array(
                            [[
                                -f_hat_u / Z[tag_i], 0.0, uc[tag_i] / Z[tag_i],
                                uc[tag_i] * vc[tag_i] / f_hat_u, -1.0 *
                                (uc[tag_i] * uc[tag_i] / f_hat_u + f_hat_u),
                                vc[tag_i]
                            ],
                             [
                                 0.0, -f_hat_v / Z[tag_i], vc[tag_i] /
                                 Z[tag_i], vc[tag_i] * vc[tag_i] / f_hat_v +
                                 f_hat_v, -1.0 * uc[tag_i] * vc[tag_i] /
                                 f_hat_v, -uc[tag_i]
                             ]])
                    else:
                        J_cam_tmp = np.concatenate(
                            (J_cam_tmp,
                             1.0 * np.array(
                                 [[
                                     -f_hat_u / Z[tag_i], 0.0,
                                     uc[tag_i] / Z[tag_i],
                                     uc[tag_i] * vc[tag_i] / f_hat_u, -1.0 *
                                     (uc[tag_i] * uc[tag_i] / f_hat_u +
                                      f_hat_u), vc[tag_i]
                                 ],
                                  [
                                      0.0, -f_hat_v / Z[tag_i], vc[tag_i] /
                                      Z[tag_i], vc[tag_i] * vc[tag_i] / f_hat_v
                                      + f_hat_v, -1.0 * uc[tag_i] * vc[tag_i] /
                                      f_hat_v, -uc[tag_i]
                                  ]])),
                            axis=0)
                #camera jacobian
                if ic == 0:
                    J_cam = J_cam_tmp
                    delta_UV_all = delta_UV.reshape(40, 1)
                    UV_target_all = UV_target.reshape(40, 1)
                else:
                    J_cam = np.vstack([J_cam, J_cam_tmp])
                    delta_UV_all = np.vstack(
                        [delta_UV_all, delta_UV.reshape(40, 1)])
                    UV_target_all = np.vstack(
                        [UV_target_all,
                         UV_target.reshape(40, 1)])

            print 'dutmp: ', dutmp
            print 'dvtmp: ', dvtmp
            du = np.mean(np.absolute(dutmp))
            dv = np.mean(np.absolute(dvtmp))
            print 'Average du of all points:', du, 'Average dv of all points:', dv
            du_array.append(du)
            dv_array.append(dv)
            #            print 'delta_UV_all',delta_UV_all
            dx = np.matmul(np.linalg.pinv(J_cam), np.array(delta_UV_all))
            dx_array.append(dx)
            #            print 'dx:',dx             #translational and angular velocity
            #print '1:',dx[0:3,0]
            #print '2:',np.hstack([0,-dx[0:3,0]])
            #print '3:',np.hstack([dx[0:3,0].reshape(3,1),rox.hat(np.array(dx[0:3,0]))])
            #coordinate system change, replace with tf transform
            #dx_w = np.array([dx[3,0],-dx[4,0],-dx[5,0]])        #angular velocity
            #map omega to current quaternion
            #Omega = 0.5*np.vstack([np.hstack([0,-dx_w]),np.hstack([dx_w.reshape(3,1),rox.hat(np.array(dx_w))])])
            #print Omega
            #rot_current = [Cur_Pose.pose.orientation.w, Cur_Pose.pose.orientation.x,Cur_Pose.pose.orientation.y,Cur_Pose.pose.orientation.z]
            #rot = np.matmul(expm(Omega*dt),np.array(rot_current))
            #trans_current = [Cur_Pose.pose.position.x,Cur_Pose.pose.position.y,Cur_Pose.pose.position.z]
            #trans = trans_current + np.array([dx[0,0],-dx[1,0],-dx[2,0]])*TimeGain

            #pose_target2 = rox.Transform(rox.q2R([rot[0], rot[1], rot[2], rot[3]]), trans)
            #Vz=0
            dx = np.array([
                dx[3, 0], -dx[4, 0], -dx[5, 0], dx[0, 0], -dx[1, 0], -dx[2, 0]
            ])

            if stage == 2:
                T = listener.lookupTransform("base", "link_6", rospy.Time(0))
                rg = 9.8 * np.matmul(
                    np.matmul(T.R, Tran_z).transpose(),
                    np.array([0, 0, 1]).transpose())
                A1 = np.hstack([
                    rox.hat(rg).transpose(),
                    np.zeros([3, 1]),
                    np.eye(3),
                    np.zeros([3, 3])
                ])
                A2 = np.hstack([
                    np.zeros([3, 3]),
                    rg.reshape([3, 1]),
                    np.zeros([3, 3]),
                    np.eye(3)
                ])
                A = np.vstack([A1, A2])
                FTdata_est = np.matmul(A, Vec_wrench)
                FTread = FTdata - FTdata_0 - FTdata_est + FTdata_0est
                print 'FTread:', FTread

                print 'Z', FTread[-1]
                if FTread[-1] > -100:
                    F_d = -150
                    Kc = 0.0001
                else:
                    Kc = 0.0001
                    F_d = -200

                Vz = Kc * (F_d - FTread[-1])
                print 'Vz', Vz
                dx[-1] = Vz + dx[-1]
                FTread_array.append(FTread)
                FTdata_array.append(FTdata)

            current_joint_angles = controller_commander.get_current_joint_values(
            )
            J = rox.robotjacobian(robot, current_joint_angles)
            joints_vel = np.linalg.pinv(J).dot(np.array(dx))

            print 'vel_norm:', np.linalg.norm(joints_vel)

            t_duration = np.log(np.linalg.norm(joints_vel) * 10 + 1) + 0.1
            if t_duration < 0.1:
                t_duration = 0.1

            goal = trapezoid_gen(
                np.array(current_joint_angles) + joints_vel.dot(dt),
                np.array(current_joint_angles), t_duration)
            '''
            plan=RobotTrajectory()    
            plan.joint_trajectory.joint_names=['joint_1', 'joint_2', 'joint_3', 'joint_4', 'joint_5', 'joint_6']
        
            plan.joint_trajectory.header.frame_id='/world'
            p1=JointTrajectoryPoint()
            p1.positions = current_joint_angles
            p1.velocities = np.zeros((6,))
            p1.accelerations = np.zeros((6,))
            p1.time_from_start = rospy.Duration(0)     
                 
            p2=JointTrajectoryPoint()
            p2.positions = np.array(p1.positions) + joints_vel.dot(dt)
            p2.velocities = np.zeros((6,))
            p2.accelerations = np.zeros((6,))
            p2.time_from_start = rospy.Duration(dt)

            controller_commander.set_controller_mode(controller_commander.MODE_AUTO_TRAJECTORY, 1.0, [], [])

        
            plan.joint_trajectory.points.append(p1)
            plan.joint_trajectory.points.append(p2)
            '''

            client = actionlib.SimpleActionClient("joint_trajectory_action",
                                                  FollowJointTrajectoryAction)
            client.wait_for_server()

            client.send_goal(goal)
            client.wait_for_result()
            res = client.get_result()
            if (res.error_code != 0):
                raise Exception("Trajectory execution returned error")

            print res

            #break
            #raw_input("confirm move...")
            '''
            print "============ Executing Current Iteration movement"
            try:
                controller_commander.execute(plan)
                #controller_commander.compute_cartesian_path_and_move(pose_target2, avoid_collisions=False)
            except:
                pass
            '''

            print 'Current Iteration Finished.'


#            filename = "delta_UV_all.txt"
#            f_handle = file(filename, 'a')
#            np.savetxt(f_handle, delta_UV_all)
#            f_handle.close()
#
#            filename = "UV_target_all.txt"
#            f_handle = file(filename, 'a')
#            np.savetxt(f_handle, UV_target_all)
#            f_handle.close()
#
#            filename = "P.txt"
#            f_handle = file(filename, 'a')
#            np.savetxt(f_handle, P)
#            f_handle.close()
#
#
#            filename = "Robot.txt"
#            f_handle = file(filename, 'a')
#            np.savetxt(f_handle, np.hstack([np.array(trans_current),np.array(rot_current)]))
#            f_handle.close()

#Saving iteration data to file
    filename_data = "/home/rpi-cats/Desktop/DJ/Code/Data/Data_" + str(
        t1) + ".mat"
    scipy.io.savemat(filename_data,
                     mdict={
                         'du_array': du_array,
                         'dv_array': dv_array,
                         'dx_array': dx_array
                     })

    #Saving force data to file
    filename_force_data = "/home/rpi-cats/Desktop/DJ/Code/Data/Data_force_" + str(
        t1) + ".mat"
    scipy.io.savemat(filename_force_data,
                     mdict={
                         'FTread': FTread_array,
                         'FTdata': FTdata_array,
                         't_now_array': t_now_array
                     })

    #    ####Hovering error calculation
    #    #Read new image
    #    last_ros_image_stamp = object_commander.ros_image_stamp
    #    try:
    #        ros_gripper_2_trigger.wait_for_service(timeout=0.1)
    #        ros_gripper_2_trigger(False)
    #    except:
    #        pass
    #    wait_count=0
    #    while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp:
    #        if wait_count > 50:
    #            raise Exception("Image receive timeout")
    #        time.sleep(0.25)
    #        wait_count += 1
    #    result = object_commander.ros_image
    #
    #    #Detect tag corners in aqcuired image using aruco
    #    corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(result, aruco_dict, parameters=parameters)
    #
    #    #Sort corners and ids according to ascending order of ids
    #    corners_original=copy.deepcopy(corners)
    #    ids_original=np.copy(ids)
    #    sorting_indices=np.argsort(ids_original,0)
    #    ids_sorted=ids_original[sorting_indices]
    #    ids_sorted=ids_sorted.reshape([len(ids_original),1])
    #    combined_list=zip(np.ndarray.tolist(ids.flatten()),corners_original)
    #    combined_list.sort()
    #    corners_sorted=[x for y,x in combined_list]
    #    ids=np.copy(ids_sorted)
    #    corners=copy.deepcopy(corners_sorted)
    #
    #    #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image)
    #    false_ids_ind = np.where(ids>73)
    #    mask = np.ones(ids.shape, dtype=bool)
    #    mask[false_ids_ind] = False
    #    ids = ids[mask]
    #    corners = np.array(corners)
    #    corners = corners[mask.flatten(),:]
    #    corners = list(corners)
    #
    #    #Define object and image points of both tags
    #    objPoints_ground, imgPoints_ground	=	aruco.getBoardObjectAndImagePoints(board_ground, corners, ids)
    #    objPoints_panel, imgPoints_panel	=	aruco.getBoardObjectAndImagePoints(board_panel, corners, ids)
    #    objPoints_ground=objPoints_ground.reshape([objPoints_ground.shape[0],3])
    #    imgPoints_ground=imgPoints_ground.reshape([imgPoints_ground.shape[0],2])
    #    objPoints_panel=objPoints_panel.reshape([objPoints_panel.shape[0],3])
    #    imgPoints_panel=imgPoints_panel.reshape([imgPoints_panel.shape[0],2])
    #
    #    #Save pose of marker boards after the iterations end(while in the final hovering position above nest)
    #    #Get pose of both ground and panel markers from detected corners
    #    retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(objPoints_ground, imgPoints_ground, CamParam.camMatrix, CamParam.distCoeff)
    #    Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
    #    retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(objPoints_panel, imgPoints_panel, CamParam.camMatrix, CamParam.distCoeff)
    #    Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)
    #
    #    print"============== Observed Pose difference in hovering position"
    #    observed_tvec_difference=tvec_ground-tvec_panel
    #    observed_rvec_difference=rvec_ground-rvec_panel
    #    print "tvec difference: ",observed_tvec_difference
    #    print "rvec differnece: ",observed_rvec_difference
    #
    #    #Load ideal pose differnece information from file
    #    loaded_rvec_difference = loadmat('/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat')['rvec_difference']
    #    loaded_tvec_difference = loadmat('/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat')['tvec_difference']
    #
    #    print"============== Ideal Pose difference in hovering position"
    #    print "tvec difference: ",loaded_tvec_difference
    #    print "rvec differnece: ",loaded_rvec_difference
    #
    #    print"============== Difference in pose difference in hovering position"
    #    print "tvec difference: ",loaded_tvec_difference-observed_tvec_difference
    #    print "rvec differnece: ",loaded_rvec_difference-observed_rvec_difference
    #
    #    #Saving pose information to file
    #    filename_pose1 = "/home/armabb6640/Desktop/DJ/Code/Data/Above_Nest_Pose_"+str(t1)+".mat"
    #    scipy.io.savemat(filename_pose1, mdict={'tvec_ground_Above_Nest':tvec_ground, 'tvec_panel_Above_Nest':tvec_panel,
    #    'Rca_ground_Above_Nest':Rca_ground, 'Rca_panel_Above_Nest': Rca_panel, 'tvec_difference_Above_Nest': tvec_ground-tvec_panel, 'rvec_difference_Above_Nest': rvec_ground-rvec_panel})

    print "============ Final Push Down to Nest"
    # while (1):
    #DJ Final push from hovering above nest into resting in the nest
    controller_commander.set_controller_mode(
        controller_commander.MODE_AUTO_TRAJECTORY, 0.2, [], [])
    #    pose_target2.p[2] = 0.115
    #    pose_target2.p[2] = 0.15

    Cur_Pose = controller_commander.get_current_pose_msg()
    rot_current = [
        Cur_Pose.pose.orientation.w, Cur_Pose.pose.orientation.x,
        Cur_Pose.pose.orientation.y, Cur_Pose.pose.orientation.z
    ]
    trans_current = [
        Cur_Pose.pose.position.x, Cur_Pose.pose.position.y,
        Cur_Pose.pose.position.z
    ]
    pose_target2.R = rox.q2R(
        [rot_current[0], rot_current[1], rot_current[2], rot_current[3]])
    pose_target2.p = trans_current
    pose_target2.p[2] -= 0.11
    #    pose_target2.p[0] += 0.005
    #    pose_target2.p[1] -= 0.002

    #    controller_commander.compute_cartesian_path_and_move(pose_target2, avoid_collisions=False)

    #### Final Nest Placement Error Calculation ===============================
    #Read new image
    last_ros_image_stamp = object_commander.ros_image_stamp
    try:
        ros_gripper_2_trigger.wait_for_service(timeout=0.1)
        ros_gripper_2_trigger(False)
    except:
        pass
    wait_count = 0
    while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp:
        if wait_count > 50:
            raise Exception("Image receive timeout")
        time.sleep(0.25)
        wait_count += 1
    result = object_commander.ros_image

    #Detect tag corners in aqcuired image using aruco
    corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
        result, aruco_dict, parameters=parameters)

    #Sort corners and ids according to ascending order of ids
    corners_original = copy.deepcopy(corners)
    ids_original = np.copy(ids)
    sorting_indices = np.argsort(ids_original, 0)
    ids_sorted = ids_original[sorting_indices]
    ids_sorted = ids_sorted.reshape([len(ids_original), 1])
    combined_list = zip(np.ndarray.tolist(ids.flatten()), corners_original)
    combined_list.sort()
    corners_sorted = [x for y, x in combined_list]
    ids = np.copy(ids_sorted)
    corners = copy.deepcopy(corners_sorted)

    #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image)
    false_ids_ind = np.where(ids > 73)
    mask = np.ones(ids.shape, dtype=bool)
    mask[false_ids_ind] = False
    ids = ids[mask]
    corners = np.array(corners)
    corners = corners[mask.flatten(), :]
    corners = list(corners)

    #Define object and image points of both tags
    objPoints_ground, imgPoints_ground = aruco.getBoardObjectAndImagePoints(
        board_ground, corners, ids)
    objPoints_panel, imgPoints_panel = aruco.getBoardObjectAndImagePoints(
        board_panel, corners, ids)
    objPoints_ground = objPoints_ground.reshape([objPoints_ground.shape[0], 3])
    imgPoints_ground = imgPoints_ground.reshape([imgPoints_ground.shape[0], 2])
    objPoints_panel = objPoints_panel.reshape([objPoints_panel.shape[0], 3])
    imgPoints_panel = imgPoints_panel.reshape([imgPoints_panel.shape[0], 2])

    #Save pose of marker boards after the iterations end(while in the final hovering position above nest)
    #Get pose of both ground and panel markers from detected corners
    retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(
        objPoints_ground, imgPoints_ground, CamParam.camMatrix,
        CamParam.distCoeff)
    Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
    retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(
        objPoints_panel, imgPoints_panel, CamParam.camMatrix,
        CamParam.distCoeff)
    Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)

    print "============== Observed Pose difference in nest position"
    observed_tvec_difference = tvec_ground - tvec_panel
    observed_rvec_difference = rvec_ground - rvec_panel
    print "tvec difference: ", observed_tvec_difference
    print "rvec differnece: ", observed_rvec_difference

    #Load ideal pose differnece information from file
    loaded_rvec_difference = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['rvec_difference']
    loaded_tvec_difference = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['tvec_difference']

    print "============== Ideal Pose difference in nest position"
    print "tvec difference: ", loaded_tvec_difference
    print "rvec differnece: ", loaded_rvec_difference

    print "============== Difference in pose difference in nest position"
    print "tvec difference: ", loaded_tvec_difference - observed_tvec_difference
    print "rvec differnece: ", loaded_rvec_difference - observed_rvec_difference

    #Saving pose information to file
    filename_pose2 = "/home/rpi-cats/Desktop/DJ/Code/Data/In_Nest_Pose_" + str(
        t1) + ".mat"
    scipy.io.savemat(filename_pose2,
                     mdict={
                         'tvec_ground_In_Nest': tvec_ground,
                         'tvec_panel_In_Nest': tvec_panel,
                         'Rca_ground_In_Nest': Rca_ground,
                         'Rca_panel_In_Nest': Rca_panel,
                         'tvec_difference_In_Nest': tvec_ground - tvec_panel,
                         'rvec_difference_In_Nest': rvec_ground - rvec_panel,
                         'loaded_tvec_difference': loaded_tvec_difference,
                         'loaded_rvec_difference': loaded_rvec_difference,
                         'observed_tvec_difference': observed_tvec_difference,
                         'observed_rvec_difference': observed_rvec_difference
                     })

    #    print "============ Lift gripper!"
    #    controller_commander.set_controller_mode(controller_commander.MODE_AUTO_TRAJECTORY, 0.7, [])
    #    raw_input("confirm release vacuum")
    #    #rapid_node.set_digital_io("Vacuum_enable", 0)
    #    print "VACUUM OFF!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
    #    time.sleep(0.5)
    #    pose_target3.p[2] += 0.2
    #
    #
    #    #print 'Target:',pose_target3
    #
    #    #print "============ Executing plan4"
    #    #controller_commander.compute_cartesian_path_and_move(pose_target3, avoid_collisions=False)

    t2 = time.time()
    print 'Execution Finished.'
    print "Execution time: " + str(t2 - t1) + " seconds"
コード例 #12
0
ファイル: print_joint.py プロジェクト: johnwason/RR_Project
res = RRN.FindServiceByType("com.robotraconteur.robotics.robot.Robot",
                            ["rr+local", "rr+tcp", "rrs+tcp"])
url = None
for serviceinfo2 in res:
    if robot_name in serviceinfo2.NodeName:
        url = serviceinfo2.ConnectionURL
        break
if url == None:
    print('service not found')
    sys.exit()

####################Start Service and robot setup

robot_sub = RRN.SubscribeService(url)
robot = robot_sub.GetDefaultClientWait(1)

state_w = robot_sub.SubscribeWire("robot_state")

print(robot.robot_info.device_info.device.name)

time.sleep(0.5)
robot_state_wire = state_w.TryGetInValue()
print("wire value set: ", robot_state_wire[0])
robot_state = robot_state_wire[1]
print("kin_chain_tcp: ", robot_state.kin_chain_tcp)
print("robot_joints: ", robot_state.joint_position)
position = robot_state.kin_chain_tcp[0]['position']
orientation = robot_state.kin_chain_tcp[0]['orientation']
print(position)
print(q2R(list(orientation)))
コード例 #13
0

Q=[0.02196692, -0.10959773,  0.99369529, -0.00868731]
P=[1.8475985 , -0.04983688,  0.82486047]

if __name__ == '__main__':
    
    controller_commander=ControllerCommander()
    
    P=np.reshape(P,(3,))    
        
    rospy.init_node('Reset_Start_pos_wason2', anonymous=True)
    
    controller_commander.set_controller_mode(controller_commander.MODE_HALT, 1, [],[])
    controller_commander.set_controller_mode(controller_commander.MODE_AUTO_TRAJECTORY, 0.5, [],[])  
        
    print "============ Printing robot Pose"
    print controller_commander.get_current_pose_msg()  
    #print robot.get_current_state().joint_state.position
    print "============ Generating plan 1"
    
    pose_target=rox.Transform(rox.q2R(Q), np.copy(P))
        
    print 'Target:',pose_target
    
    print "============ Executing plan1"
    controller_commander.plan_and_move(pose_target)        
    print 'Execution Finished.'
  
       
def main():

    #Start timer to measure execution time
    t1 = time.time()

    #Subscribe to controller_state
    rospy.Subscriber("controller_state", controllerstate, callback)
    last_ros_image_stamp = rospy.Time(0)

    #Force-torque
    if not "disable-ft" in sys.argv:
        ft_threshold1 = ft_threshold
    else:
        ft_threshold1 = []

    #Initialize ros node of this process
    rospy.init_node('Placement_DJ_1', anonymous=True)
    process_client = actionlib.SimpleActionClient('process_step',
                                                  ProcessStepAction)
    process_state_pub = rospy.Publisher("GUI_state",
                                        ProcessState,
                                        queue_size=100,
                                        latch=True)
    process_client.wait_for_server()
    listener = PayloadTransformListener()
    rapid_node = rapid_node_pkg.RAPIDCommander()
    controller_commander = controller_commander_pkg.ControllerCommander()

    object_commander = ObjectRecognitionCommander()
    robot = urdf.robot_from_parameter_server()

    #subscribe to Gripper camera node for image acquisition
    ros_gripper_2_img_sub = rospy.Subscriber(
        '/gripper_camera_2/image', Image,
        object_commander.ros_raw_gripper_2_image_cb)

    ros_gripper_2_trigger = rospy.ServiceProxy(
        '/gripper_camera_2/camera_trigger', Trigger)

    ros_gripper_2_trigger = rospy.ServiceProxy(
        'gripper_camera_2/camera_trigger', Trigger)

    #Set controller command mode
    controller_commander.set_controller_mode(
        controller_commander.MODE_AUTO_TRAJECTORY, 0.4, [], [])
    time.sleep(0.5)

    #open loop set the initial pose
    #Set Location above the panel where the end effector goes first (in the world/base frame) Ideal location of panel.
    #Cur_Pose = controller_commander.get_current_pose_msg()
    #print Cur_Pose
    #raw_input("confirm release vacuum")

    tran = np.array(
        [2.15484, 1.21372,
         0.25766])  #np.array([2.17209718963,-1.13702651252,0.224273328841])
    rot = rox.q2R(
        [0.02110, -0.03317, 0.99922, -0.00468]
    )  #rox.q2R([0.0145063655084, -0.0282932350764, 0.999322921073, -0.0185137145776])
    #tran = np.array([2.26476414097,-1.22419669418,0.411353037002])
    #rot = np.array([[-0.9959393, -0.0305329,  0.0846911], [-0.0310315,  0.9995079, -0.0045767], [-0.0845097, -0.0071862, -0.9963967]])
    pose_target2 = rox.Transform(rot, tran)
    pose_target3 = copy.deepcopy(pose_target2)

    ##Execute movement to set location
    print "Executing initial path"
    controller_commander.compute_cartesian_path_and_move(
        pose_target2, avoid_collisions=False)

    #Initilialize aruco boards and parameters
    aruco_dict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_ARUCO_ORIGINAL)
    parameters = cv2.aruco.DetectorParameters_create()
    parameters.cornerRefinementMethod = cv2.aruco.CORNER_REFINE_SUBPIX
    parameters.adaptiveThreshWinSizeMax = 30
    parameters.adaptiveThreshWinSizeStep = 7

    # 1st Panel tag info
    board_ground = cv2.aruco.GridBoard_create(4, 4, .04, .0075, aruco_dict, 32)
    board_panel = cv2.aruco.GridBoard_create(8, 3, .025, .0075, aruco_dict, 80)
    #Load object points ground tag in panel tag coordinate system from mat file
    loaded_object_points_ground_in_panel_system_stage_1 = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat'
    )['object_points_ground_in_panel_tag_system']
    loaded_object_points_ground_in_panel_system_stage_2 = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['object_points_ground_in_panel_tag_system']
    loaded_object_points_ground_in_panel_system = loaded_object_points_ground_in_panel_system_stage_1

    #    #Initialize camera intrinsic parameters #18285636_10_05_2018_5_params
    #18285636_10_05_2018_5_params_111122018
    CamParam = CameraParams(2283.391289766133, 1192.255485086403,
                            2279.484382094687, 1021.399092147012, 1.0,
                            -0.022101211408596, -0.095163053709332,
                            -0.003986991791212, -0.003479613658352,
                            0.179926705467534)

    #Subscribe tp controller state. Again?
    rospy.Subscriber("controller_state", controllerstate, callback)

    UV = np.zeros([150, 8])
    P = np.zeros([150, 3])

    #focal length in pixel units, this number is averaged values from f_hat for x and y
    f_hat_u = 2283.391289766133  #2342.561249990927#2282.523358266698#2281.339593273153 #2446.88
    f_hat_v = 2279.484382094687  #2338.448312671424#2280.155828279608
    #functions like a gain, used with velocity to track position
    dt = 0.1
    du = 100.0
    dv = 100.0
    dutmp = 100.0
    dvtmp = 100.0
    #TimeGain = [0.1,0.1, 0.1]
    du_array = []
    dv_array = []
    dx_array = []
    iteration = 0
    stage = 1
    #step_ts = 0.004
    Kc = 0.0002
    #time_save = []
    #FTdata_save = []
    Tran_z = np.array([[0, 0, -1], [0, -1, 0], [1, 0, 0]])
    Vec_wrench = 100 * np.array([
        0.019296738361905, 0.056232033265447, 0.088644197659430,
        0.620524934626544, -0.517896661195076, 0.279323567303444,
        -0.059640563813256, 0.631460085138371, -0.151143175570223,
        -6.018321330845553
    ]).transpose()

    FTdata_0 = FTdata
    T = listener.lookupTransform("base", "link_6", rospy.Time(0))
    rg = 9.8 * np.matmul(
        np.matmul(T.R, Tran_z).transpose(),
        np.array([0, 0, 1]).transpose())
    A1 = np.hstack([
        rox.hat(rg).transpose(),
        np.zeros([3, 1]),
        np.eye(3),
        np.zeros([3, 3])
    ])
    A2 = np.hstack(
        [np.zeros([3, 3]),
         rg.reshape([3, 1]),
         np.zeros([3, 3]),
         np.eye(3)])
    A = np.vstack([A1, A2])
    FTdata_0est = np.matmul(A, Vec_wrench)
    FTread_array = []
    FTdata_array = []
    t_now_array = []
    step_size = []

    cv2.namedWindow('Image', cv2.WINDOW_NORMAL)
    cv2.resizeWindow('Image', 490, 410)

    ### PBVS set the initial pose
    #### Final Nest Placement Error Calculation ===============================
    #Read new image
    last_ros_image_stamp = object_commander.ros_image_stamp
    try:
        ros_gripper_2_trigger.wait_for_service(timeout=0.1)
        ros_gripper_2_trigger()
    except:
        pass
    wait_count = 0
    while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp:

        if wait_count > 50:
            raise Exception("Image receive timeout")
        time.sleep(0.25)
        wait_count += 1
    result = object_commander.ros_image

    #Detect tag corners in aqcuired image using aruco
    corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
        result, aruco_dict, parameters=parameters)

    #Sort corners and ids according to ascending order of ids
    corners_original = copy.deepcopy(corners)
    ids_original = np.copy(ids)
    sorting_indices = np.argsort(ids_original, 0)
    ids_sorted = ids_original[sorting_indices]
    ids_sorted = ids_sorted.reshape([len(ids_original), 1])
    combined_list = zip(np.ndarray.tolist(ids.flatten()), corners_original)
    combined_list.sort()
    corners_sorted = [x for y, x in combined_list]
    ids = np.copy(ids_sorted)
    corners = copy.deepcopy(corners_sorted)

    #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image)
    #false_ids_ind = np.where(ids>73)
    mask = np.ones(ids.shape, dtype=bool)
    #mask[false_ids_ind] = False
    ids = ids[mask]
    corners = np.array(corners)
    corners = corners[mask.flatten(), :]
    corners = list(corners)

    #Define object and image points of both tags
    objPoints_ground, imgPoints_ground = aruco.getBoardObjectAndImagePoints(
        board_ground, corners, ids)
    objPoints_panel, imgPoints_panel = aruco.getBoardObjectAndImagePoints(
        board_panel, corners, ids)
    objPoints_ground = objPoints_ground.reshape([objPoints_ground.shape[0], 3])
    imgPoints_ground = imgPoints_ground.reshape([imgPoints_ground.shape[0], 2])
    objPoints_panel = objPoints_panel.reshape([objPoints_panel.shape[0], 3])
    imgPoints_panel = imgPoints_panel.reshape([imgPoints_panel.shape[0], 2])

    #Save pose of marker boards after the iterations end(while in the final hovering position above nest)
    #Get pose of both ground and panel markers from detected corners
    retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(
        objPoints_ground, imgPoints_ground, CamParam.camMatrix,
        CamParam.distCoeff)
    Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
    retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(
        objPoints_panel, imgPoints_panel, CamParam.camMatrix,
        CamParam.distCoeff)
    Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)

    print "============== Observed Pose difference in nest position"
    observed_tvec_difference = tvec_ground - tvec_panel
    observed_rvec_difference = rvec_ground - rvec_panel
    print "tvec difference: ", observed_tvec_difference
    print "rvec differnece: ", observed_rvec_difference

    #Load ideal pose differnece information from file
    loaded_rvec_difference = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat'
    )['rvec_difference']
    loaded_tvec_difference = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat'
    )['tvec_difference']

    print "============== Ideal Pose difference in nest position"
    print "tvec difference: ", loaded_tvec_difference
    print "rvec differnece: ", loaded_rvec_difference

    print "============== Difference in pose difference in nest position"
    tvec_err = loaded_tvec_difference - observed_tvec_difference
    rvec_err = loaded_rvec_difference - observed_rvec_difference
    print "tvec difference: ", tvec_err
    print "rvec differnece: ", rvec_err

    # Adjustment
    print "Adjustment ===================="
    current_joint_angles = controller_commander.get_current_joint_values()
    dx = np.array([0, 0, 0, -tvec_err[0], tvec_err[1] + 0.03, tvec_err[2]
                   ]) * 0.75
    joints_vel = QP_abbirb6640(
        np.array(current_joint_angles).reshape(6, 1), np.array(dx))
    goal = trapezoid_gen(
        np.array(current_joint_angles) + joints_vel.dot(1),
        np.array(current_joint_angles), 0.25, np.array(dx))
    client = actionlib.SimpleActionClient("joint_trajectory_action",
                                          FollowJointTrajectoryAction)
    client.wait_for_server()
    client.send_goal(goal)
    client.wait_for_result()
    res = client.get_result()
    if (res.error_code != 0):
        raise Exception("Trajectory execution returned error")

    print res
    #raw_input("confirm move...")
    print "End of Initial Pose ===================="

    ### End of initial pose

    step_size_min = 100000
    stage = 2
    loaded_object_points_ground_in_panel_system = loaded_object_points_ground_in_panel_system_stage_2

    while (
        (du > 4) | (dv > 4) and (iteration < 55)
    ):  #try changing du and dv to lower values(number of iterations increases)
        iteration += 1
        t_now_array.append(time.time())

        #Go to stage2 of movement(mostly downward z motion)
        if ((du < 4) and (dv < 4) and (stage == 1)):
            #Save pose of marker boards after the iterations end(while in the final hovering position above nest)
            #Get pose of both ground and panel markers from detected corners
            retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(
                objPoints_ground, imgPoints_ground, CamParam.camMatrix,
                CamParam.distCoeff)
            Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
            retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(
                objPoints_panel, imgPoints_panel, CamParam.camMatrix,
                CamParam.distCoeff)
            Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)

            print "============== Observed Pose difference in hovering position"
            observed_tvec_difference = tvec_ground - tvec_panel
            observed_rvec_difference = rvec_ground - rvec_panel
            print "tvec difference: ", observed_tvec_difference
            print "rvec differnece: ", observed_rvec_difference

            #Load ideal pose differnece information from file
            loaded_rvec_difference = loadmat(
                '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat'
            )['rvec_difference']
            loaded_tvec_difference = loadmat(
                '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat'
            )['tvec_difference']

            print "============== Ideal Pose difference in hovering position"
            print "tvec difference: ", loaded_tvec_difference
            print "rvec differnece: ", loaded_rvec_difference

            tvec_difference_Above_Nest = loaded_tvec_difference - observed_tvec_difference
            rvec_difference_Above_Nest = loaded_rvec_difference - observed_rvec_difference

            print "============== Difference in pose difference in hovering position"
            print "tvec difference: ", tvec_difference_Above_Nest
            print "rvec differnece: ", rvec_difference_Above_Nest

            #Saving pose information to file
            filename_pose1 = "/home/rpi-cats/Desktop/DJ/Code/Data/Panel1_Above_Nest_Pose_" + str(
                t1) + ".mat"
            scipy.io.savemat(filename_pose1,
                             mdict={
                                 'tvec_ground_Above_Nest':
                                 tvec_ground,
                                 'tvec_panel_Above_Nest':
                                 tvec_panel,
                                 'Rca_ground_Above_Nest':
                                 Rca_ground,
                                 'Rca_panel_Above_Nest':
                                 Rca_panel,
                                 'tvec_difference_Above_Nest':
                                 tvec_ground - tvec_panel,
                                 'rvec_difference_Above_Nest':
                                 rvec_ground - rvec_panel,
                                 'loaded_tvec_difference':
                                 loaded_tvec_difference,
                                 'loaded_rvec_difference':
                                 loaded_rvec_difference,
                                 'observed_tvec_difference':
                                 observed_tvec_difference,
                                 'observed_rvec_difference':
                                 observed_rvec_difference
                             })

            #raw_input("Confirm Stage 2")
            stage = 2
            #			dt=0.02
            loaded_object_points_ground_in_panel_system = loaded_object_points_ground_in_panel_system_stage_2
        #print pose_target2.p[2]

        #Print current robot pose at the beginning of this iteration
        Cur_Pose = controller_commander.get_current_pose_msg()
        print "============ Current Robot Pose"
        print Cur_Pose

        #Read new image
        last_ros_image_stamp = object_commander.ros_image_stamp
        try:
            ros_gripper_2_trigger.wait_for_service(timeout=0.1)
            ros_gripper_2_trigger()
        except:
            pass
        wait_count = 0
        while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp:
            if wait_count > 50:
                raise Exception("Image receive timeout")
            time.sleep(0.25)
            wait_count += 1
        result = object_commander.ros_image
        #Save
        #        filename = "Acquisition3_%d.jpg" % (time.time())
        #        scipy.misc.imsave(filename, result)
        #Display
        #        cv2.namedWindow('Aqcuired Image',cv2.WINDOW_NORMAL)
        #        cv2.imshow('Acquired Image',result)
        #        cv2.waitKey(1)

        #Detect tag corners in aqcuired image using aruco
        corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
            result, aruco_dict, parameters=parameters)
        frame_with_markers_and_axis = result

        #Sort corners and ids according to ascending order of ids
        corners_original = copy.deepcopy(corners)
        ids_original = np.copy(ids)
        sorting_indices = np.argsort(ids_original, 0)
        ids_sorted = ids_original[sorting_indices]
        ids_sorted = ids_sorted.reshape([len(ids_original), 1])
        combined_list = zip(np.ndarray.tolist(ids.flatten()), corners_original)
        combined_list.sort()
        corners_sorted = [x for y, x in combined_list]
        ids = np.copy(ids_sorted)
        corners = copy.deepcopy(corners_sorted)

        #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image)
        false_ids_ind = np.where(ids > 150)
        mask = np.ones(ids.shape, dtype=bool)
        mask[false_ids_ind] = False
        ids = ids[mask]
        corners = np.array(corners)
        corners = corners[mask.flatten(), :]
        corners = list(corners)

        #Define object and image points of both tags
        objPoints_ground, imgPoints_ground = aruco.getBoardObjectAndImagePoints(
            board_ground, corners, ids)
        objPoints_panel, imgPoints_panel = aruco.getBoardObjectAndImagePoints(
            board_panel, corners, ids)
        objPoints_ground = objPoints_ground.reshape(
            [objPoints_ground.shape[0], 3])
        imgPoints_ground = imgPoints_ground.reshape(
            [imgPoints_ground.shape[0], 2])
        objPoints_panel = objPoints_panel.reshape(
            [objPoints_panel.shape[0], 3])
        imgPoints_panel = imgPoints_panel.reshape(
            [imgPoints_panel.shape[0], 2])

        #Get pose of both ground and panel markers from detected corners
        retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(
            objPoints_ground, imgPoints_ground, CamParam.camMatrix,
            CamParam.distCoeff)
        Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
        retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(
            objPoints_panel, imgPoints_panel, CamParam.camMatrix,
            CamParam.distCoeff)
        Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)

        frame_with_markers_and_axis = cv2.aruco.drawAxis(
            frame_with_markers_and_axis, CamParam.camMatrix,
            CamParam.distCoeff, Rca_ground, tvec_ground, 0.2)
        frame_with_markers_and_axis = cv2.aruco.drawAxis(
            frame_with_markers_and_axis, CamParam.camMatrix,
            CamParam.distCoeff, Rca_panel, tvec_panel, 0.2)

        rvec_all_markers_ground, tvec_all_markers_ground, _ = aruco.estimatePoseSingleMarkers(
            corners[0:20], 0.04, CamParam.camMatrix, CamParam.distCoeff)
        rvec_all_markers_panel, tvec_all_markers_panel, _ = aruco.estimatePoseSingleMarkers(
            corners[20:45], 0.025, CamParam.camMatrix, CamParam.distCoeff)
        tvec_all = np.concatenate(
            (tvec_all_markers_ground, tvec_all_markers_panel), axis=0)
        for i_ids, i_corners, i_tvec in zip(ids, corners, tvec_all):
            UV[i_ids, :] = i_corners.reshape(
                [1, 8])  #np.average(i_corners, axis=1)
            P[i_ids, :] = i_tvec

        #print 'P',P
        #used to find the height of the tags and the delta change of height, z height at desired position
        Z = 1 * P[32:48,
                  2]  #- [0.68184539, 0.68560932, 0.68966803, 0.69619578])

        #check if all tags detected
        if retVal_ground != 0 and retVal_panel != 0:
            dutmp = []
            dvtmp = []

            #Pixel estimates of the ideal ground tag location
            reprojected_imagePoints_ground_2, jacobian2 = cv2.projectPoints(
                loaded_object_points_ground_in_panel_system.transpose(),
                rvec_panel, tvec_panel, CamParam.camMatrix, CamParam.distCoeff)
            reprojected_imagePoints_ground_2 = reprojected_imagePoints_ground_2.reshape(
                [reprojected_imagePoints_ground_2.shape[0], 2])

            #plot image points for ground tag from corner detection and from re-projections
            for point1, point2 in zip(
                    imgPoints_ground,
                    np.float32(reprojected_imagePoints_ground_2)):
                cv2.circle(frame_with_markers_and_axis, tuple(point1), 5,
                           (0, 0, 255), 3)
                cv2.circle(frame_with_markers_and_axis, tuple(point2), 5,
                           (255, 0, 0), 3)

            cv2.imshow('Image', frame_with_markers_and_axis)
            cv2.waitKey(1)
            #Save
            filename_image = "/home/rpi-cats/Desktop/DJ/Code/Images/Panel1_Acquisition_" + str(
                t1) + "_" + str(iteration) + ".jpg"
            scipy.misc.imsave(filename_image, frame_with_markers_and_axis)

            #Go through a particular point in all tags to build the complete Jacobian
            for ic in range(4):
                #uses first set of tags, numbers used to offset camera frame, come from camera parameters
                #UV_target = np.vstack([UV[9:13,2*ic]-1209.151959040735,UV[9:13,2*ic+1]-1055.254852652610]).T - UV_shift[:,(2*ic):(2*ic+2)]		#shift corner estimates to the image centers. Necessary for the image jacobian to work.
                reprojected_imagePoints_ground_2 = np.reshape(
                    reprojected_imagePoints_ground_2, (16, 8))
                UV_target = np.vstack([
                    reprojected_imagePoints_ground_2[:, 2 * ic] -
                    1192.255485086403,
                    reprojected_imagePoints_ground_2[:, 2 * ic + 1] -
                    1021.399092147012
                ]).T
                uc = UV_target[:, 0]
                vc = UV_target[:, 1]

                #                print 'UV_target', UV_target
                UV_current = np.vstack([
                    UV[32:48, 2 * ic] - 1192.255485086403,
                    UV[32:48, 2 * ic + 1] - 1021.399092147012
                ]).T
                #find difference between current and desired tag difference
                delta_UV = UV_target - UV_current
                dutmp.append(np.mean(delta_UV[:, 0]))
                dvtmp.append(np.mean(delta_UV[:, 1]))
                for tag_i in range(16):
                    if tag_i == 0:
                        J_cam_tmp = 1.0 * np.array(
                            [[
                                -f_hat_u / Z[tag_i], 0.0, uc[tag_i] / Z[tag_i],
                                uc[tag_i] * vc[tag_i] / f_hat_u, -1.0 *
                                (uc[tag_i] * uc[tag_i] / f_hat_u + f_hat_u),
                                vc[tag_i]
                            ],
                             [
                                 0.0, -f_hat_v / Z[tag_i], vc[tag_i] /
                                 Z[tag_i], vc[tag_i] * vc[tag_i] / f_hat_v +
                                 f_hat_v, -1.0 * uc[tag_i] * vc[tag_i] /
                                 f_hat_v, -uc[tag_i]
                             ]])
                    else:
                        J_cam_tmp = np.concatenate(
                            (J_cam_tmp,
                             1.0 * np.array(
                                 [[
                                     -f_hat_u / Z[tag_i], 0.0,
                                     uc[tag_i] / Z[tag_i],
                                     uc[tag_i] * vc[tag_i] / f_hat_u, -1.0 *
                                     (uc[tag_i] * uc[tag_i] / f_hat_u +
                                      f_hat_u), vc[tag_i]
                                 ],
                                  [
                                      0.0, -f_hat_v / Z[tag_i], vc[tag_i] /
                                      Z[tag_i], vc[tag_i] * vc[tag_i] / f_hat_v
                                      + f_hat_v, -1.0 * uc[tag_i] * vc[tag_i] /
                                      f_hat_v, -uc[tag_i]
                                  ]])),
                            axis=0)
                #camera jacobian
                if ic == 0:
                    J_cam = J_cam_tmp
                    delta_UV_all = delta_UV.reshape(32, 1)
                    UV_target_all = UV_target.reshape(32, 1)
                else:
                    J_cam = np.vstack([J_cam, J_cam_tmp])
                    delta_UV_all = np.vstack(
                        [delta_UV_all, delta_UV.reshape(32, 1)])
                    UV_target_all = np.vstack(
                        [UV_target_all,
                         UV_target.reshape(32, 1)])

            print 'dutmp: ', dutmp
            print 'dvtmp: ', dvtmp
            du = np.mean(np.absolute(dutmp))
            dv = np.mean(np.absolute(dvtmp))
            print 'Average du of all points:', du, 'Average dv of all points:', dv
            du_array.append(du)
            dv_array.append(dv)

            #print "Jcam",J_cam
            #print "UV",delta_UV_all
            #dx = np.matmul(np.linalg.pinv(J_cam),np.array(delta_UV_all))

            dx = QP_Cam(
                J_cam, delta_UV_all
            )  #np.matmul(np.linalg.pinv(J_cam),np.array(delta_UV_all))
            dx = dx.reshape([6, 1])

            dx_array.append(dx)

            #print '**************Jac:',dx, QP_Cam(J_cam,delta_UV_all)

            dx = np.array([
                dx[3, 0], -dx[4, 0], -dx[5, 0], dx[0, 0], -dx[1, 0], -dx[2, 0]
            ])

            if stage == 2:
                T = listener.lookupTransform("base", "link_6", rospy.Time(0))
                rg = 9.8 * np.matmul(
                    np.matmul(T.R, Tran_z).transpose(),
                    np.array([0, 0, 1]).transpose())
                A1 = np.hstack([
                    rox.hat(rg).transpose(),
                    np.zeros([3, 1]),
                    np.eye(3),
                    np.zeros([3, 3])
                ])
                A2 = np.hstack([
                    np.zeros([3, 3]),
                    rg.reshape([3, 1]),
                    np.zeros([3, 3]),
                    np.eye(3)
                ])
                A = np.vstack([A1, A2])
                FTdata_est = np.matmul(A, Vec_wrench)
                FTread = FTdata - FTdata_0 - FTdata_est + FTdata_0est
                print 'FTread:', FTread

                print 'Z', FTread[-1]
                if FTread[-1] > -100:
                    F_d = -150
                    Kc = 0.0001
                else:
                    Kc = 0.0001
                    F_d = -200

                Vz = Kc * (F_d - FTread[-1])
                print 'Vz', Vz
                dx[-1] = Vz + dx[-1]
                FTread_array.append(FTread)
                FTdata_array.append(FTdata)

            current_joint_angles = controller_commander.get_current_joint_values(
            )

            step_size_tmp = np.linalg.norm(dx)
            if step_size_tmp <= step_size_min:
                step_size_min = step_size_tmp
            else:
                dx = dx / step_size_tmp * step_size_min

            joints_vel = QP_abbirb6640(
                np.array(current_joint_angles).reshape(6, 1), np.array(dx))

            goal = trapezoid_gen(
                np.array(current_joint_angles) + joints_vel.dot(dt),
                np.array(current_joint_angles), 0.25, np.array(dx))
            step_size.append(np.linalg.norm(dx))

            client = actionlib.SimpleActionClient("joint_trajectory_action",
                                                  FollowJointTrajectoryAction)
            client.wait_for_server()

            client.send_goal(goal)
            client.wait_for_result()
            res = client.get_result()
            if (res.error_code != 0):
                raise Exception("Trajectory execution returned error")

            print res

            print 'Current Iteration Finished.'

    #Saving iteration data to file
    filename_data = "/home/rpi-cats/Desktop/YC/Data/Panel1_Data_" + str(
        t1) + ".mat"
    scipy.io.savemat(filename_data,
                     mdict={
                         'du_array': du_array,
                         'dv_array': dv_array,
                         'dx_array': dx_array,
                         'step_size': step_size,
                         'iteration': iteration
                     })

    #Saving force data to file
    filename_force_data = "/home/rpi-cats/Desktop/YC/Data/Panel1_Data_force_" + str(
        t1) + ".mat"
    scipy.io.savemat(filename_force_data,
                     mdict={
                         'FTread': FTread_array,
                         'FTdata': FTdata_array,
                         't_now_array': t_now_array
                     })

    print '###############################'
    print 'step_size', step_size
    print 'iteration', iteration
    print '###############################'
    '''
    print "============ Final Push Down to Nest"
    controller_commander.set_controller_mode(controller_commander.MODE_AUTO_TRAJECTORY, 0.2, [],[])

    
    Cur_Pose = controller_commander.get_current_pose_msg()
    rot_current = [Cur_Pose.pose.orientation.w, Cur_Pose.pose.orientation.x,Cur_Pose.pose.orientation.y,Cur_Pose.pose.orientation.z]
    trans_current = [Cur_Pose.pose.position.x,Cur_Pose.pose.position.y,Cur_Pose.pose.position.z]
    pose_target2.R = rox.q2R([rot_current[0], rot_current[1], rot_current[2], rot_current[3]])
    pose_target2.p = trans_current
    pose_target2.p[2] -= 0.11
    '''

    #### Final Nest Placement Error Calculation ===============================
    #Read new image
    last_ros_image_stamp = object_commander.ros_image_stamp
    try:
        ros_gripper_2_trigger.wait_for_service(timeout=0.1)
        ros_gripper_2_trigger()
    except:
        pass
    wait_count = 0
    while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp:
        if wait_count > 50:
            raise Exception("Image receive timeout")
        time.sleep(0.25)
        wait_count += 1
    result = object_commander.ros_image

    #Detect tag corners in aqcuired image using aruco
    corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
        result, aruco_dict, parameters=parameters)

    #Sort corners and ids according to ascending order of ids
    corners_original = copy.deepcopy(corners)
    ids_original = np.copy(ids)
    sorting_indices = np.argsort(ids_original, 0)
    ids_sorted = ids_original[sorting_indices]
    ids_sorted = ids_sorted.reshape([len(ids_original), 1])
    combined_list = zip(np.ndarray.tolist(ids.flatten()), corners_original)
    combined_list.sort()
    corners_sorted = [x for y, x in combined_list]
    ids = np.copy(ids_sorted)
    corners = copy.deepcopy(corners_sorted)

    #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image)
    #false_ids_ind = np.where(ids>73)
    mask = np.ones(ids.shape, dtype=bool)
    #mask[false_ids_ind] = False
    ids = ids[mask]
    corners = np.array(corners)
    corners = corners[mask.flatten(), :]
    corners = list(corners)

    #Define object and image points of both tags
    objPoints_ground, imgPoints_ground = aruco.getBoardObjectAndImagePoints(
        board_ground, corners, ids)
    objPoints_panel, imgPoints_panel = aruco.getBoardObjectAndImagePoints(
        board_panel, corners, ids)
    objPoints_ground = objPoints_ground.reshape([objPoints_ground.shape[0], 3])
    imgPoints_ground = imgPoints_ground.reshape([imgPoints_ground.shape[0], 2])
    objPoints_panel = objPoints_panel.reshape([objPoints_panel.shape[0], 3])
    imgPoints_panel = imgPoints_panel.reshape([imgPoints_panel.shape[0], 2])

    #Save pose of marker boards after the iterations end(while in the final hovering position above nest)
    #Get pose of both ground and panel markers from detected corners
    retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(
        objPoints_ground, imgPoints_ground, CamParam.camMatrix,
        CamParam.distCoeff)
    Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
    retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(
        objPoints_panel, imgPoints_panel, CamParam.camMatrix,
        CamParam.distCoeff)
    Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)

    print "============== Observed Pose difference in nest position"
    observed_tvec_difference = tvec_ground - tvec_panel
    observed_rvec_difference = rvec_ground - rvec_panel
    print "tvec difference: ", observed_tvec_difference
    print "rvec differnece: ", observed_rvec_difference

    #Load ideal pose differnece information from file
    loaded_rvec_difference = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['rvec_difference']
    loaded_tvec_difference = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['tvec_difference']

    print "============== Ideal Pose difference in nest position"
    print "tvec difference: ", loaded_tvec_difference
    print "rvec differnece: ", loaded_rvec_difference

    print "============== Difference in pose difference in nest position"
    tvec_err = loaded_tvec_difference - observed_tvec_difference
    rvec_err = loaded_rvec_difference - observed_rvec_difference
    print "tvec difference: ", tvec_err
    print "rvec differnece: ", rvec_err

    # Adjustment
    print "Adjustment ===================="
    current_joint_angles = controller_commander.get_current_joint_values()
    dx = np.array([0, 0, 0, -tvec_err[0], tvec_err[1], 0])
    joints_vel = QP_abbirb6640(
        np.array(current_joint_angles).reshape(6, 1), np.array(dx))
    goal = trapezoid_gen(
        np.array(current_joint_angles) + joints_vel.dot(1),
        np.array(current_joint_angles), 0.25, np.array(dx))
    client = actionlib.SimpleActionClient("joint_trajectory_action",
                                          FollowJointTrajectoryAction)
    client.wait_for_server()
    client.send_goal(goal)
    client.wait_for_result()
    res = client.get_result()
    if (res.error_code != 0):
        raise Exception("Trajectory execution returned error")

    print res
    print "End of Adjustment ===================="

    #### Final Nest Placement Error Calculation ===============================
    #Read new image
    last_ros_image_stamp = object_commander.ros_image_stamp
    try:
        ros_gripper_2_trigger.wait_for_service(timeout=0.1)
        ros_gripper_2_trigger()
    except:
        pass
    wait_count = 0
    while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp:
        if wait_count > 50:
            raise Exception("Image receive timeout")
        time.sleep(0.25)
        wait_count += 1
    result = object_commander.ros_image

    #Detect tag corners in aqcuired image using aruco
    corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
        result, aruco_dict, parameters=parameters)

    #Sort corners and ids according to ascending order of ids
    corners_original = copy.deepcopy(corners)
    ids_original = np.copy(ids)
    sorting_indices = np.argsort(ids_original, 0)
    ids_sorted = ids_original[sorting_indices]
    ids_sorted = ids_sorted.reshape([len(ids_original), 1])
    combined_list = zip(np.ndarray.tolist(ids.flatten()), corners_original)
    combined_list.sort()
    corners_sorted = [x for y, x in combined_list]
    ids = np.copy(ids_sorted)
    corners = copy.deepcopy(corners_sorted)

    #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image)
    #false_ids_ind = np.where(ids>73)
    mask = np.ones(ids.shape, dtype=bool)
    #mask[false_ids_ind] = False
    ids = ids[mask]
    corners = np.array(corners)
    corners = corners[mask.flatten(), :]
    corners = list(corners)

    #Define object and image points of both tags
    objPoints_ground, imgPoints_ground = aruco.getBoardObjectAndImagePoints(
        board_ground, corners, ids)
    objPoints_panel, imgPoints_panel = aruco.getBoardObjectAndImagePoints(
        board_panel, corners, ids)
    objPoints_ground = objPoints_ground.reshape([objPoints_ground.shape[0], 3])
    imgPoints_ground = imgPoints_ground.reshape([imgPoints_ground.shape[0], 2])
    objPoints_panel = objPoints_panel.reshape([objPoints_panel.shape[0], 3])
    imgPoints_panel = imgPoints_panel.reshape([imgPoints_panel.shape[0], 2])

    #Save pose of marker boards after the iterations end(while in the final hovering position above nest)
    #Get pose of both ground and panel markers from detected corners
    retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(
        objPoints_ground, imgPoints_ground, CamParam.camMatrix,
        CamParam.distCoeff)
    Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
    retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(
        objPoints_panel, imgPoints_panel, CamParam.camMatrix,
        CamParam.distCoeff)
    Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)

    print "============== Observed Pose difference in nest position"
    observed_tvec_difference = tvec_ground - tvec_panel
    observed_rvec_difference = rvec_ground - rvec_panel
    print "tvec difference: ", observed_tvec_difference
    print "rvec differnece: ", observed_rvec_difference

    #Load ideal pose differnece information from file
    loaded_rvec_difference = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['rvec_difference']
    loaded_tvec_difference = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['tvec_difference']

    print "============== Ideal Pose difference in nest position"
    print "tvec difference: ", loaded_tvec_difference
    print "rvec differnece: ", loaded_rvec_difference

    print "============== Difference in pose difference in nest position"
    tvec_err = loaded_tvec_difference - observed_tvec_difference
    rvec_err = loaded_rvec_difference - observed_rvec_difference
    rospy.loginfo("tvec difference: %f, %f, %f", tvec_err[0], tvec_err[1],
                  tvec_err[2])
    rospy.loginfo("rvec difference: %f, %f, %f", rvec_err[0], rvec_err[1],
                  rvec_err[2])
    print "rvec differnece: ", tvec_err
    print "rvec differnece: ", rvec_err

    #Saving pose information to file
    filename_pose2 = "/home/rpi-cats/Desktop/DJ/Code/Data/In_Nest_Pose_" + str(
        t1) + ".mat"
    scipy.io.savemat(filename_pose2,
                     mdict={
                         'tvec_ground_In_Nest': tvec_ground,
                         'tvec_panel_In_Nest': tvec_panel,
                         'Rca_ground_In_Nest': Rca_ground,
                         'Rca_panel_In_Nest': Rca_panel,
                         'tvec_difference_In_Nest': tvec_ground - tvec_panel,
                         'rvec_difference_In_Nest': rvec_ground - rvec_panel,
                         'loaded_tvec_difference': loaded_tvec_difference,
                         'loaded_rvec_difference': loaded_rvec_difference,
                         'observed_tvec_difference': observed_tvec_difference,
                         'observed_rvec_difference': observed_rvec_difference
                     })
    controller_commander.set_controller_mode(
        controller_commander.MODE_AUTO_TRAJECTORY, 0.7, [])
    #    raw_input("confirm release vacuum")
    rapid_node.set_digital_io("Vacuum_enable", 0)
    g = ProcessStepGoal('plan_place_set_second_step', "")

    process_client.send_goal(g)

    #self.in_process=True

    print process_client.get_result()
    print "VACUUM OFF!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
    time.sleep(0.5)

    tran = np.array([2.17209718963, -1.13702651252, 0.224273328841])
    rot = rox.q2R(
        [0.0145063655084, -0.0282932350764, 0.999322921073, -0.0185137145776])
    pose_target2 = rox.Transform(rot, tran)

    Cur_Pose = controller_commander.get_current_pose_msg()
    rot_current = [
        Cur_Pose.pose.orientation.w, Cur_Pose.pose.orientation.x,
        Cur_Pose.pose.orientation.y, Cur_Pose.pose.orientation.z
    ]
    trans_current = [
        Cur_Pose.pose.position.x, Cur_Pose.pose.position.y,
        Cur_Pose.pose.position.z
    ]
    pose_target2.R = rox.q2R(
        [rot_current[0], rot_current[1], rot_current[2], rot_current[3]])
    pose_target2.p = trans_current
    pose_target2.p[2] += 0.5
    #
    #
    #    #print 'Target:',pose_target3
    #
    #    #print "============ Executing plan4"
    controller_commander.compute_cartesian_path_and_move(
        pose_target2, avoid_collisions=False)

    t2 = time.time()
    print 'Execution Finished.'
    print "Execution time: " + str(t2 - t1) + " seconds"
    s = ProcessState()
    s.state = "place_set"
    s.payload = "leeward_mid_panel"
    s.target = ""
    process_state_pub.publish(s)
def main():
            
    #Start timer to measure execution time        
    t1 = time.time()
    
    #Subscribe to controller_state 
    rospy.Subscriber("controller_state", controllerstate, callback)
    last_ros_image_stamp = rospy.Time(0)
    
    #Force-torque    
    if not "disable-ft" in sys.argv:
        ft_threshold1=ft_threshold
    else:
        ft_threshold1=[]
    
    #Initialize ros node of this process
    rospy.init_node('user_defined_motion', anonymous=True)
    
    #print "============ Starting setup"   
    
    listener = PayloadTransformListener()
    rapid_node = rapid_node_pkg.RAPIDCommander()
    controller_commander = controller_commander_pkg.ControllerCommander()

    object_commander=ObjectRecognitionCommander()
    robot = urdf.robot_from_parameter_server()   
  
    #subscribe to Gripper camera node for image acquisition     
    ros_gripper_2_img_sub=rospy.Subscriber('/gripper_camera_2/image', Image, object_commander.ros_raw_gripper_2_image_cb)
    ros_gripper_2_trigger=rospy.ServiceProxy('/gripper_camera_2/continuous_trigger', CameraTrigger)

    #Set controller command mode
    controller_commander.set_controller_mode(controller_commander.MODE_AUTO_TRAJECTORY, 0.4, ft_threshold_place,[])
    time.sleep(0.5)
    
    
    #Set Location above the panel where the end effector goes first (in the world/base frame) Ideal location of panel.
    current_joint_angles = controller_commander.get_current_joint_values()
    Cur_Pose = controller_commander.get_current_pose_msg()
    rot_o = [Cur_Pose.pose.orientation.w, Cur_Pose.pose.orientation.x,Cur_Pose.pose.orientation.y,Cur_Pose.pose.orientation.z]
    trans_o = np.array([Cur_Pose.pose.position.x,Cur_Pose.pose.position.y,Cur_Pose.pose.position.z])

    pos_command = []
    quat_command = []
    pos_read = []
    quat_read = []
    time_all = []


    for i in range(4):
        rot_set = rox.q2R(rot_o)
        tran_set = trans_o+np.array([0,0,i*0.25])


        pose_target2 = rox.Transform(rot_set, tran_set)
    
         
        #Execute movement to set location
        print "Executing initial path"
        controller_commander.compute_cartesian_path_and_move(pose_target2, avoid_collisions=False)
        time.sleep(1)
    
        Cur_Pose = controller_commander.get_current_pose_msg()
        rot_cur = [Cur_Pose.pose.orientation.w, Cur_Pose.pose.orientation.x,Cur_Pose.pose.orientation.y,Cur_Pose.pose.orientation.z]
        trans_cur = [Cur_Pose.pose.position.x,Cur_Pose.pose.position.y,Cur_Pose.pose.position.z]

        pos_command.append(tran_set)
        quat_command.append(rot_set)
        pos_read.append(trans_cur)
        quat_read.append(rot_cur)
        time_all.append(time.time())
    

    #Saving iteration data to file
    filename_data = "/home/rpi-cats/Desktop/YC/Data/Motion Capture/Data_"+str(t1)+".mat"
    scipy.io.savemat(filename_data, mdict={'pos_command':pos_command, 'quat_command':quat_command, 'pos_read':pos_read, 'quat_read':quat_read, 'time_all':time_all})
	

    t2 = time.time()
    print 'Execution Finished.'
    print "Execution time: " + str(t2-t1) + " seconds"
コード例 #16
0
    def runTest(self):

        #msg2q, q2msg
        q = np.random.rand(4)
        q = q / np.linalg.norm(q)

        q_msg = rox_msg.q2msg(q)
        q_msg._check_types()
        np.testing.assert_allclose(q, [q_msg.w, q_msg.x, q_msg.y, q_msg.z],
                                   atol=1e-4)
        q2 = rox_msg.msg2q(q_msg)
        np.testing.assert_allclose(q, q2, atol=1e-4)

        #msg2R, R2msg
        R = rox.q2R(q)
        q_msg_R = rox_msg.R2msg(R)
        q_msg_R._check_types()
        np.testing.assert_allclose(
            q, [q_msg_R.w, q_msg_R.x, q_msg_R.y, q_msg_R.z], atol=1e-4)
        R2 = rox_msg.msg2R(q_msg_R)
        np.testing.assert_allclose(R, R2, atol=1e-4)

        #msg2p, p2msg
        p = np.random.rand(3)
        p_msg = rox_msg.p2msg(p)
        p_msg._check_types()
        np.testing.assert_allclose(p, [p_msg.x, p_msg.y, p_msg.z], atol=1e-4)
        p2 = rox_msg.msg2p(p_msg)
        np.testing.assert_allclose(p, p2, atol=1e-4)

        #transform messages of varying types
        tf = rox.Transform(R, p)
        pose_msg = rox_msg.transform2pose_msg(tf)
        pose_msg._check_types()
        np.testing.assert_allclose(R,
                                   rox_msg.msg2R(pose_msg.orientation),
                                   atol=1e-4)
        np.testing.assert_allclose(p,
                                   rox_msg.msg2p(pose_msg.position),
                                   atol=1e-4)
        pose2 = rox_msg.msg2transform(pose_msg)
        np.testing.assert_allclose(R, pose2.R, atol=1e-4)
        np.testing.assert_allclose(p, pose2.p, atol=1e-4)
        tf_msg = rox_msg.transform2msg(tf)
        tf_msg._check_types()
        np.testing.assert_allclose(R,
                                   rox_msg.msg2R(tf_msg.rotation),
                                   atol=1e-4)
        np.testing.assert_allclose(p,
                                   rox_msg.msg2p(tf_msg.translation),
                                   atol=1e-4)
        tf2 = rox_msg.msg2transform(tf_msg)
        np.testing.assert_allclose(R, tf2.R, atol=1e-4)
        np.testing.assert_allclose(p, tf2.p, atol=1e-4)

        #transform stamped messages of varying types
        tf3 = rox.Transform(R, p, 'parent_link', 'child_link')
        print tf3
        print str(tf3)
        pose_stamped_msg = rox_msg.transform2pose_stamped_msg(tf3)
        pose_stamped_msg._check_types()
        np.testing.assert_allclose(R,
                                   rox_msg.msg2R(
                                       pose_stamped_msg.pose.orientation),
                                   atol=1e-4)
        np.testing.assert_allclose(p,
                                   rox_msg.msg2p(
                                       pose_stamped_msg.pose.position),
                                   atol=1e-4)
        assert pose_stamped_msg.header.frame_id == 'parent_link'
        pose3 = rox_msg.msg2transform(pose_stamped_msg)
        np.testing.assert_allclose(R, pose3.R, atol=1e-4)
        np.testing.assert_allclose(p, pose3.p, atol=1e-4)
        assert pose3.parent_frame_id == 'parent_link'
        tf_stamped_msg = rox_msg.transform2transform_stamped_msg(tf3)
        tf_stamped_msg._check_types()
        np.testing.assert_allclose(R,
                                   rox_msg.msg2R(
                                       tf_stamped_msg.transform.rotation),
                                   atol=1e-4)
        np.testing.assert_allclose(p,
                                   rox_msg.msg2p(
                                       tf_stamped_msg.transform.translation),
                                   atol=1e-4)
        assert tf_stamped_msg.header.frame_id == 'parent_link'
        assert tf_stamped_msg.child_frame_id == 'child_link'
        tf4 = rox_msg.msg2transform(tf_stamped_msg)
        np.testing.assert_allclose(R, tf4.R, atol=1e-4)
        np.testing.assert_allclose(p, tf4.p, atol=1e-4)
        assert tf4.parent_frame_id == 'parent_link'
        assert tf4.child_frame_id == 'child_link'

        #msg2twist, twist2msg
        twist = np.random.rand(6)
        twist_msg = rox_msg.twist2msg(twist)
        twist_msg._check_types()
        np.testing.assert_allclose(twist, [twist_msg.angular.x, twist_msg.angular.y, twist_msg.angular.z, \
                                           twist_msg.linear.x, twist_msg.linear.y, twist_msg.linear.z], \
                                            atol=1e-4)
        twist2 = rox_msg.msg2twist(twist_msg)
        np.testing.assert_allclose(twist, twist2, atol=1e-4)

        #msg2wrench, wrench2msg
        wrench = np.random.rand(6)
        wrench_msg = rox_msg.wrench2msg(wrench)
        wrench_msg._check_types()
        np.testing.assert_allclose(wrench, [wrench_msg.torque.x, wrench_msg.torque.y, wrench_msg.torque.z, \
                                           wrench_msg.force.x, wrench_msg.force.y, wrench_msg.force.z], \
                                            atol=1e-4)
        wrench2 = rox_msg.msg2wrench(wrench_msg)
        np.testing.assert_allclose(wrench, wrench2, atol=1e-4)
コード例 #17
0
ファイル: client_robot.py プロジェクト: johnwason/RR_Project
url = None
for serviceinfo2 in res:
    if robot_name in serviceinfo2.Name:
        url = serviceinfo2.ConnectionURL
        break
if url == None:
    print('service not found')
    sys.exit()

####################Start Service and robot setup

robot_sub = RRN.SubscribeService(url)
robot = robot_sub.GetDefaultClientWait(1)

state_w = robot_sub.SubscribeWire("robot_state")

print('device name: ', robot.robot_info.device_info.device.name)

time.sleep(0.5)
robot_state_wire = state_w.TryGetInValue()
if robot_state_wire[0]:
    robot_state = robot_state_wire[1]
    print("robot_joints: ", robot_state.joint_position)

    position = robot_state.kin_chain_tcp[0]['position']
    orientation = robot_state.kin_chain_tcp[0]['orientation']
    print('eef position: ', position)
    print('eef orientation: ', q2R(list(orientation)))
else:
    print("wire value not set")
コード例 #18
0
 def quaternion_to_rpy(self, rr_quaternion):
     return rox.R2rpy(rox.q2R(self.quaternion_to_q(rr_quaternion)))
コード例 #19
0
 def quaternion_to_R(self, rr_quaternion):
     return rox.q2R(self.quaternion_to_q(rr_quaternion))