示例#1
0
 def detect_marker(self, img, marker):
         
     camMatrix=np.reshape(self.camera_info.K,(3,3))
     distCoeffs=np.array(self.camera_info.D)
         
     parameters =  cv2.aruco.DetectorParameters_create()
     parameters.cornerRefinementWinSize=32
     parameters.cornerRefinementMethod=cv2.aruco.CORNER_REFINE_CONTOUR        
     corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(img, self.aruco_dict, parameters=parameters)
     
     img2=cv2.aruco.drawDetectedMarkers(img, corners,ids)
     img3=cv2.resize(img2,(0,0), fx=0.25,fy=0.25)
     cv2.imshow("",img3)
     cv2.waitKey(1)
     
     board = self.get_aruco_gridboard(marker)
     
     retval, rvec, tvec = cv2.aruco.estimatePoseBoard(corners, ids, board, camMatrix, distCoeffs)
     
     if (retval <= 0):
         #cv2.waitKey()
         raise Exception("Invalid image")
     
     Ra, b = cv2.Rodrigues(rvec)
     a_pose=rox.Transform(Ra,tvec)
     
     frame_with_markers_and_axis = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
     frame_with_markers_and_axis    =    cv2.aruco.drawAxis(    frame_with_markers_and_axis,  camMatrix, distCoeffs, rvec, tvec, 0.2    )
     frame_with_markers_and_axis=cv2.resize(frame_with_markers_and_axis,(0,0), fx=0.25,fy=0.25)
     cv2.imshow("transform", frame_with_markers_and_axis)
     cv2.waitKey(1)
     
     return a_pose
示例#2
0
    def test_attached_collision_object(self):

        expected_scene_1 = rospy.wait_for_message("/expected_planning_scene_1",
                                                  PlanningScene,
                                                  timeout=10)
        scene_1 = rospy.wait_for_message('/planning_scene',
                                         PlanningScene,
                                         timeout=10)
        self._assert_planning_scene_equal(expected_scene_1, scene_1)

        rospy.sleep(rospy.Duration(1))

        update_pose_proxy = rospy.ServiceProxy("update_payload_pose",
                                               UpdatePayloadPose)

        payload2_to_gripper_target_tf = self.tf_listener.lookupTransform(
            "payload2", "payload2_gripper_target", rospy.Time(0))

        req = UpdatePayloadPoseRequest()
        req.header.frame_id = "vacuum_gripper_tool"
        req.name = "payload2"
        req.pose = rox_msg.transform2pose_msg(
            payload2_to_gripper_target_tf.inv())
        req.confidence = 0.5
        res = update_pose_proxy(req)
        assert res.success

        expected_scene_2 = rospy.wait_for_message("/expected_planning_scene_2",
                                                  PlanningScene,
                                                  timeout=10)
        scene_2 = rospy.wait_for_message('/planning_scene',
                                         PlanningScene,
                                         timeout=10)
        self._assert_planning_scene_equal(expected_scene_2, scene_2)

        world_to_fixture2_payload2_target_tf = self.tf_listener.lookupTransform(
            "world", "fixture2_payload2_target", rospy.Time(0))

        #Add in an offset to represent a final placement error
        fixture2_payload2_target_to_payload2_tf = rox.Transform(
            rox.rot([0, 0, 1], np.deg2rad(5)), [0.1, 0, 0],
            "fixture2_payload2_target", "payload2")

        req2 = UpdatePayloadPoseRequest()
        req2.header.frame_id = "world"
        req2.name = "payload2"
        req2.pose = rox_msg.transform2pose_msg(
            world_to_fixture2_payload2_target_tf *
            fixture2_payload2_target_to_payload2_tf)
        res2 = update_pose_proxy(req2)
        assert res2.success

        expected_scene_3 = rospy.wait_for_message("/expected_planning_scene_3",
                                                  PlanningScene,
                                                  timeout=10)
        scene_3 = rospy.wait_for_message('/planning_scene',
                                         PlanningScene,
                                         timeout=10)
        self._assert_planning_scene_equal(expected_scene_3, scene_3)
 def _xyz_rpy_to_rox_transform(self,
                               xyz,
                               rpy,
                               parent_frame_id=None,
                               child_frame_id=None):
     p = xyz
     R = rox.rpy2R(rpy)
     return rox.Transform(R, p, parent_frame_id, child_frame_id)
示例#4
0
def _calibrate_camera_extrinsic(intrinsic_calib, image, board,
                                camera_local_device_name):

    # TODO: verify calibration data

    mtx = intrinsic_calib.K
    dist_rr = intrinsic_calib.distortion_info.data
    dist = np.array(
        [dist_rr.k1, dist_rr.k2, dist_rr.p1, dist_rr.p2, dist_rr.k3],
        dtype=np.float64)

    image_util = ImageUtil()
    frame = image_util.compressed_image_to_array(image)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    if board == "chessboard":
        width = 7
        height = 6
        square_size = 0.03
    else:
        raise RR.InvalidOperationException(
            f"Invalid calibration board {board}")

    ret, corners = cv2.findChessboardCorners(gray, (width, height), None)
    assert ret, "Could not find calibration target"

    objp = np.zeros((height * width, 3), np.float32)
    objp[:, :2] = np.mgrid[0:width, 0:height].T.reshape(-1, 2)

    objp = objp * square_size

    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

    corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)

    ret, rvecs, tvecs = cv2.solvePnP(objp, corners2, mtx, dist)

    R = cv2.Rodrigues(rvecs.flatten())[0]

    R_landmark = np.array([[0, 1, 0], [1, 0, 0], [0, 0, -1]], dtype=np.float64)

    R_cam1 = R.transpose()
    p_cam1 = -R.transpose() @ tvecs

    R_cam = R_landmark.transpose() @ R_cam1
    p_cam = R_landmark.transpose() @ p_cam1

    cv_image2 = cv2.aruco.drawAxis(frame, mtx, dist,
                                   cv2.Rodrigues(R_cam.transpose())[0],
                                   -R_cam.transpose() @ p_cam, 0.1)

    T = rox.Transform(R_cam, p_cam, "world", camera_local_device_name)

    geom_util = GeometryUtil()
    cov = np.eye(6) * 1e-5
    return geom_util.rox_transform_to_named_pose(
        T), cov, image_util.array_to_compressed_image_jpg(cv_image2), 0.0
示例#5
0
    def get_object_gripper_target_pose(self, key):

        object_pose = self.get_object_pose(key)

        (trans1, rot1) = self.listener.lookupTransform(key,
                                                       key + "_gripper_target",
                                                       rospy.Time(0))
        tag_rel_pose = rox.Transform(
            rox.q2R([rot1[3], rot1[0], rot1[1], rot1[2]]), trans1)
        return object_pose * tag_rel_pose
def _marker_corner_poses(T_marker, marker_size):
    corners = np.array([[-1, -1, 0], [1, -1, 0], [1, 1, 0], [-1, 1, 0]
                        ]) * marker_size / 2.0

    ret = []

    for i in range(4):
        ret.append(T_marker *
                   rox.Transform(np.eye(3), corners[i, :].flatten()))

    return ret
def _do_transform_test(to_rr, from_rr, rr_type, node):
    rox_transform = rox.Transform(rox.rpy2R(np.random.rand(3)),
                                  np.random.rand(3))
    rr_val = to_rr(rox_transform)
    rr_msg = PackMessageElement(rr_val,
                                f"com.robotraconteur.geometry.{rr_type}",
                                node=node)
    rr_msg.UpdateData()
    rr_val2 = UnpackMessageElement(rr_msg, node=node)
    rox_transform2 = from_rr(rr_val2)

    np.testing.assert_almost_equal(rox_transform.R, rox_transform2.R)
    np.testing.assert_almost_equal(rox_transform.p, rox_transform2.p)
    def compute_step_gripper_target_pose(self,
                                         img,
                                         Kp,
                                         no_z=False,
                                         z_offset=0):

        fixed_marker_transform, payload_marker_transform, error_transform = self.compute_error_transform(
            img)

        if no_z:
            error_transform.p[2] = 0
        else:
            error_transform.p[2] -= z_offset

        gripper_to_camera_tf = self.tf_listener.lookupTransform(
            "vacuum_gripper_tool", "gripper_camera_2", rospy.Time(0))

        world_to_vacuum_gripper_tool_tf = self.tf_listener.lookupTransform(
            "world", "vacuum_gripper_tool", rospy.Time(0))

        #Scale by Kp
        k, theta = rox.R2rot(error_transform.R)
        r = np.multiply(k * theta, Kp[0:3])
        r_norm = np.linalg.norm(r)
        if (r_norm < 1e-6):
            error_transform2_R = np.eye(3)
        else:
            error_transform2_R = rox.rot(r / r_norm, r_norm)
        error_transform2_p = np.multiply(error_transform.p, (Kp[3:6]))

        error_transform2 = rox.Transform(error_transform2_R,
                                         error_transform2_p)

        gripper_to_fixed_marker_tf = gripper_to_camera_tf * fixed_marker_transform
        gripper_to_desired_fixed_marker_tf = gripper_to_fixed_marker_tf * error_transform2

        #print gripper_to_fixed_marker_tf

        ret = world_to_vacuum_gripper_tool_tf * (
            gripper_to_desired_fixed_marker_tf *
            gripper_to_fixed_marker_tf.inv()).inv()

        #print world_to_vacuum_gripper_tool_tf
        #print ret

        #print error_transform

        return ret, error_transform
示例#9
0
 def release_suction_cups(self):
     ################## RELEASE SUCTION CUPS AND LIFT THE GRIPPER ################## 	
     rospy.loginfo("============ Release Suction Cups...")
     self.controller_commander.set_controller_mode(self.controller_commander.MODE_AUTO_TRAJECTORY, 0.7, [])
     self.rapid_node.set_digital_io("Vacuum_enable", 0)
     #g = ProcessStepGoal('plan_place_set_second_step',"")
     #process_client.send_goal(g)
     time.sleep(0.5)
 
     Cur_Pose = self.controller_commander.get_current_pose_msg()
     rot_current = [Cur_Pose.pose.orientation.w, Cur_Pose.pose.orientation.x,Cur_Pose.pose.orientation.y,Cur_Pose.pose.orientation.z]
     trans_current = [Cur_Pose.pose.position.x,Cur_Pose.pose.position.y,Cur_Pose.pose.position.z]
     pose_target2 = rox.Transform(rox.q2R([rot_current[0], rot_current[1], rot_current[2], rot_current[3]]), trans_current)
     pose_target2.p[2] += 0.25
 
     rospy.loginfo("============ Lift gripper...")
     #TODO: Change to trajopt planning
     self.controller_commander.compute_cartesian_path_and_move(pose_target2, avoid_collisions=False)
示例#10
0
def compute_step_gripper_target_pose(img, fixed_marker, payload_marker, desired_transform, \
                 camera_info, aruco_dict, Kp, tf_listener):

    fixed_marker_transform, error_transform = compute_error_transform(img, fixed_marker, payload_marker, \
                                              desired_transform, camera_info, aruco_dict)

    gripper_to_camera_tf = tf_listener.lookupTransform("vacuum_gripper_tool",
                                                       "gripper_camera_2",
                                                       rospy.Time(0))

    world_to_vacuum_gripper_tool_tf = tf_listener.lookupTransform(
        "world", "vacuum_gripper_tool", rospy.Time(0))

    #Scale by Kp
    k, theta = rox.R2rot(error_transform.R)
    r = np.multiply(k * theta, Kp[0:3])
    r_norm = np.linalg.norm(r)
    if (r_norm < 1e-6):
        error_transform2_R = np.eye(3)
    else:
        error_transform2_R = rox.rot(r / r_norm, r_norm)
    error_transform2_p = np.multiply(error_transform.p, (Kp[3:6]))

    error_transform2 = rox.Transform(error_transform2_R, error_transform2_p)

    gripper_to_fixed_marker_tf = gripper_to_camera_tf * fixed_marker_transform
    gripper_to_desired_fixed_marker_tf = gripper_to_fixed_marker_tf * error_transform2

    #print gripper_to_fixed_marker_tf

    ret = world_to_vacuum_gripper_tool_tf * (
        gripper_to_desired_fixed_marker_tf *
        gripper_to_fixed_marker_tf.inv()).inv()

    #print world_to_vacuum_gripper_tool_tf
    #print ret

    print error_transform

    return ret
    def plan_reset_position(self, goal=None):

        self._begin_step(goal)
        try:
            Q = [0.02196692, -0.10959773, 0.99369529, -0.00868731]
            P = [1.8475985, -0.04983688, 0.82486047]

            rospy.loginfo("Planning to reset position")
            self.state = "reset_position"
            self.process_index = 0

            pose_target = rox.Transform(rox.q2R(Q), np.copy(P))

            path = self._plan(pose_target, config="reposition_robot")

            self.plan_dictionary['reset_position'] = path

            self._step_complete(goal)

        except Exception as err:
            traceback.print_exc()
            self._step_failed(err, goal)
示例#12
0
def _origin_to_pose(origin):
    R = rox.rpy2R(origin.rpy) if origin.rpy is not None else np.eye(3)
    p = origin.xyz if origin.xyz is not None else np.zeros((3, ))
    t = rox.Transform(R, p)
    return rox_msg.transform2pose_msg(t)
def update_ik_info(R_d, p_d):
    # R_d, p_d: Desired orientation and position
    global robot
    global d_q  # Get Current Joint angles in radian ndarray
    global num_joints

    q_cur = d_q  # initial guess on the current joint angles
    q_cur = q_cur.reshape((num_joints, 1))

    epsilon = 0.001  # Damping Constant
    Kq = epsilon * np.eye(
        len(q_cur)
    )  # small value to make sure positive definite used in Damped Least Square
    # print_div( "<br> Kq " + str(Kq) ) # DEBUG

    max_steps = 200  # number of steps to for convergence

    # print_div( "<br> q_cur " + str(q_cur) ) # DEBUG

    itr = 0  # Iterations
    converged = False
    while itr < max_steps and not converged:

        pose = rox.fwdkin(robot, q_cur)
        R_cur = pose.R
        p_cur = pose.p

        #calculate current Jacobian
        J0T = rox.robotjacobian(robot, q_cur)

        # Transform Jacobian to End effector frame from the base frame
        Tr = np.zeros((6, 6))
        Tr[:3, :3] = R_cur.T
        Tr[3:, 3:] = R_cur.T
        J0T = Tr @ J0T
        # print_div( "<br> J0T " + str(J0T) ) # DEBUG

        # Error in position and orientation
        # ER = np.matmul(R_cur, np.transpose(R_d))
        ER = np.matmul(np.transpose(R_d), R_cur)
        #print_div( "<br> ER " + str(ER) ) # DEBUG
        EP = R_cur.T @ (p_cur - p_d)
        #print_div( "<br> EP " + str(EP) ) # DEBUG

        #decompose ER to (k,theta) pair
        k, theta = rox.R2rot(ER)
        # print_div( "<br> k " + str(k) ) # DEBUG
        # print_div( "<br> theta " + str(theta) ) # DEBUG

        ## set up s for different norm for ER
        # s=2*np.dot(k,np.sin(theta)) #eR1
        # s = np.dot(k,np.sin(theta/2))         #eR2
        s = np.sin(theta / 2) * np.array(k)  #eR2
        # s=2*theta*k              #eR3
        # s=np.dot(J_phi,phi)              #eR4
        # print_div( "<br> s " + str(s) ) # DEBUG

        alpha = 1  # Step size
        # Damped Least square for iterative incerse kinematics
        delta = alpha * (np.linalg.inv(Kq + J0T.T @ J0T) @ J0T.T @ np.hstack(
            (s, EP)).T)
        # print_div( "<br> delta " + str(delta) ) # DEBUG

        q_cur = q_cur - delta.reshape((num_joints, 1))

        # Convergence Check
        converged = (np.hstack((s, EP)) < 0.0001).all()
        # print_div( "<br> converged? " + str(converged) ) # DEBUG

        itr += 1  # Increase the iteration

    joints_text = ""
    for i in q_cur:
        joints_text += "(%.3f, %.3f) " % (np.rad2deg(i), i)
    print_div_ik_info(
        str(rox.Transform(R_d, p_d)) + "<br>" + joints_text + "<br>" +
        str(converged) + ", itr = " + str(itr))
    return q_cur, converged
示例#14
0
def main():

    t1 = time.time()

    if not "disable-ft" in sys.argv:
        ft_threshold1 = ft_threshold
    else:
        ft_threshold1 = []

    rospy.init_node('Vision_MoveIt_new_Cam_wason2', anonymous=True)

    print "============ Starting setup"

    listener = PayloadTransformListener()

    rapid_node = rapid_node_pkg.AbbIrc5RAPIDNodeCommander()
    controller_commander = controller_commander_pkg.arm_composites_manufacturing_controller_commander(
    )

    object_commander = ObjectRecognitionCommander()
    '''
    object_target=object_commander.get_object_gripper_target_pose("leeward_mid_panel")
    
    controller_commander.set_controller_mode(controller_commander.MODE_AUTO_TRAJECTORY, 1.2, ft_threshold1)  
    
    print "============ Printing robot Pose"
    print controller_commander.get_current_pose_msg()  
    #print robot.get_current_state().joint_state.position
    print "============ Generating plan 1"
    
    pose_target=copy.deepcopy(object_target)
    pose_target.p[2] += 0.8
  
    print 'Target:',pose_target
    
    print "============ Executing plan1"
    controller_commander.plan_and_move(pose_target)        
    print 'Execution Finished.'
    
    ########## Vertical Path 1 ############

    controller_commander.set_controller_mode(controller_commander.MODE_AUTO_TRAJECTORY, 0.8, [])
    
    print "============ Printing robot Pose"
    print controller_commander.get_current_pose_msg()  
    print "============ Generating plan 2"

    pose_target2=copy.deepcopy(object_target)
    pose_target2.p[2] += 0.15
    
    print 'Target:',pose_target2
    
    print "============ Executing plan2"
    controller_commander.compute_cartesian_path_and_move(pose_target2, avoid_collisions=False)
    print 'Execution Finished.'
        
    ########## Vertical Path 2 ############

    controller_commander.set_controller_mode(controller_commander.MODE_AUTO_TRAJECTORY, 0.4, ft_threshold1)
    
    print "============ Printing robot Pose"
    print controller_commander.get_current_pose_msg()  
    print "============ Generating plan 3"

    pose_target2=copy.deepcopy(object_target)
    pose_target2.p[2] -= 0.15
    
    print 'Target:',pose_target2
    
    print "============ Executing plan3"
    controller_commander.compute_cartesian_path_and_move(pose_target2, avoid_collisions=False)
    print 'Execution Finished.'
    
    ########## Lift Path ############

    controller_commander.set_controller_mode(controller_commander.MODE_AUTO_TRAJECTORY, 0.7, [])

    print "============ Lift panel!"
            
    rapid_node.set_digital_io("Vacuum_enable", 1)
    time.sleep(0.5)
    pose_target3=copy.deepcopy(object_target)
    pose_target3.p[2] += 0.8
            
    
    print 'Target:',pose_target3
    
    print "============ Executing plan4"
    controller_commander.compute_cartesian_path_and_move(pose_target3, avoid_collisions=False)
    '''

    print "=========== Do place!"
    print ""

    controller_commander.set_controller_mode(
        controller_commander.MODE_AUTO_TRAJECTORY, 0.8, [])

    print "============ Generating plan 5"
    '''
    panel_target_pose = listener.lookupTransform("world", "panel_nest_leeward_mid_panel_target", rospy.Time(0))
    #panel_target_pose=rox.Pose(rox.q2R([rot1[3], rot1[0], rot1[1], rot1[2]]), trans1)
    panel_gripper_target_pose = listener.lookupTransform("leeward_mid_panel", "leeward_mid_panel_gripper_target", rospy.Time(0))
    #panel_gripper_target_pose=rox.Pose(rox.q2R([rot2[3], rot2[0], rot2[1], rot2[2]]), trans2)
    pose_target=panel_target_pose * panel_gripper_target_pose
    print pose_target.R
    print pose_target.p


    pose_target2=copy.deepcopy(pose_target)
    pose_target2.p[2] += 0.5
    '''
    rot = np.array([[-0.99804142, 0.00642963, 0.06222524],
                    [0.00583933, 0.99993626, -0.00966372],
                    [-0.06228341, -0.00928144, -0.99801535]])
    tran = [2.197026484647054, 1.2179574262842452, 0.12376598588449844]
    pose_target = rox.Transform(rot, tran)
    pose_target.p = np.array(
        [2.197026484647054, 1.2179574262842452, 0.12376598588449844])
    pose_target.R = np.array([[-0.99804142, 0.00642963, 0.06222524],
                              [0.00583933, 0.99993626, -0.00966372],
                              [-0.06228341, -0.00928144, -0.99801535]])

    pose_target2 = copy.deepcopy(pose_target)
    pose_target2.p[2] += 0.35

    print "============ Executing plan 5"
    controller_commander.plan_and_move(pose_target2)
    print "============ Executing plan 6"
    controller_commander.set_controller_mode(
        controller_commander.MODE_AUTO_TRAJECTORY, 0.8, [])

    pose_target2 = copy.deepcopy(pose_target)
    pose_target2.p[2] += 0.10

    Cur_Pose = controller_commander.get_current_pose_msg()
    print "============ Printing robot Pose"
    print Cur_Pose
    print "============ Generating plan 7"
    Tca1, Tca2 = CameraService()

    #(trans3,rot3) = listener.lookupTransform("world", "link_6", rospy.Time(0))
    Tot_pose = listener.lookupTransform("world", "link_6", rospy.Time(0))
    #Tot_pose=rox.Pose(rox.q2R([rot3[3], rot3[0], rot3[1], rot3[2]]), trans3)

    Tca1_pose = rox.Transform(Tca1[0:3, 0:3], Tca1[0:3, 3])
    Tca2_pose = rox.Transform(Tca2[0:3, 0:3], Tca2[0:3, 3])
    Ttc_pose = rox.Transform(Ttc2[0:3, 0:3], Ttc2[0:3, 3])

    Toa1 = Tot_pose * Ttc_pose * Tca1_pose
    Toa2 = Tot_pose * Ttc_pose * Tca2_pose

    dP = Toa1.p - Toa2.p
    dR = np.matmul(Toa1.R, Toa2.R.T)
    print 'dP', dP
    print 'dR', dR

    pose_target2.R = np.matmul(pose_target2.R, np.matmul(dR_desired, dR.T).T)
    pose_target2.p = pose_target2.p - (dP - dP_desired)
    print 'Z:', pose_target2.p[2]
    pose_target2.p[2] = 0.1

    pose_target3 = copy.deepcopy(pose_target)
    pose_target3.p[2] -= 0.15

    print 'Target:', pose_target2.R, pose_target2.p
    print 'Compare:', pose_target3.R, pose_target3.p

    controller_commander.set_controller_mode(
        controller_commander.MODE_AUTO_TRAJECTORY, 0.4, ft_threshold_place)

    print "============ Executing plan3"
    try:
        controller_commander.compute_cartesian_path_and_move(
            pose_target2, avoid_collisions=False)
    except:
        pass
    print 'Execution Finished.'
    '''
    Tca1,Tca2 = CameraService()
    Tot_pose = listener.lookupTransform("world", "link_6", rospy.Time(0))
    #Tot_pose=rox.Pose(rox.q2R([rot3[3], rot3[0], rot3[1], rot3[2]]), trans3)
    Tca1_pose = rox.Transform(Tca1[0:3,0:3], Tca1[0:3,3])
    Tca2_pose = rox.Transform(Tca2[0:3,0:3], Tca2[0:3,3])
    Ttc_pose = rox.Transform(Ttc2[0:3,0:3], Ttc2[0:3,3])

    Toa1 = Tot_pose * Ttc_pose * Tca1_pose
    Toa2 = Tot_pose * Ttc_pose * Tca2_pose

    dP = Toa1.p-Toa2.p
    dR = np.matmul(Toa1.R,Toa2.R.T)
    print 'dP',dP.shape
    print 'dR',dR.shape
    '''
    tmp = np.hstack([dR, np.reshape(dP, [3, 1])])

    filename = "dR_dP.txt"
    f_handle = file(filename, 'a')
    np.savetxt(f_handle, tmp)
    f_handle.close()
    print controller_commander.get_current_pose_msg()
    print "============ Lift gripper!"
    controller_commander.set_controller_mode(
        controller_commander.MODE_AUTO_TRAJECTORY, 0.7, [])
    #raw_input("confirm release vacuum")
    rapid_node.set_digital_io("Vacuum_enable", 0)
    print "VACUUM OFF!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
    time.sleep(0.5)
    pose_target3 = copy.deepcopy(pose_target)
    pose_target3.p[2] += 0.35

    print 'Target:', pose_target3

    print "============ Executing plan4"
    controller_commander.compute_cartesian_path_and_move(
        pose_target3, avoid_collisions=False)

    t2 = time.time()
    print 'Execution Finished.'
    print "Execution time: " + str(t2 - t1) + " seconds"
def main():

    #Start timer to measure execution time
    t1 = time.time()

    #Subscribe to controller_state
    rospy.Subscriber("controller_state", controllerstate, callback)
    last_ros_image_stamp = rospy.Time(0)

    #Force-torque
    if not "disable-ft" in sys.argv:
        ft_threshold1 = ft_threshold
    else:
        ft_threshold1 = []

    #Initialize ros node of this process
    rospy.init_node('Placement_DJ_1', anonymous=True)
    process_client = actionlib.SimpleActionClient('process_step',
                                                  ProcessStepAction)
    process_state_pub = rospy.Publisher("GUI_state",
                                        ProcessState,
                                        queue_size=100,
                                        latch=True)
    process_client.wait_for_server()
    listener = PayloadTransformListener()
    rapid_node = rapid_node_pkg.RAPIDCommander()
    controller_commander = controller_commander_pkg.ControllerCommander()

    object_commander = ObjectRecognitionCommander()
    robot = urdf.robot_from_parameter_server()

    #subscribe to Gripper camera node for image acquisition
    ros_gripper_2_img_sub = rospy.Subscriber(
        '/gripper_camera_2/image', Image,
        object_commander.ros_raw_gripper_2_image_cb)

    ros_gripper_2_trigger = rospy.ServiceProxy(
        '/gripper_camera_2/camera_trigger', Trigger)

    ros_gripper_2_trigger = rospy.ServiceProxy(
        'gripper_camera_2/camera_trigger', Trigger)

    #Set controller command mode
    controller_commander.set_controller_mode(
        controller_commander.MODE_AUTO_TRAJECTORY, 0.4, [], [])
    time.sleep(0.5)

    #open loop set the initial pose
    #Set Location above the panel where the end effector goes first (in the world/base frame) Ideal location of panel.
    #Cur_Pose = controller_commander.get_current_pose_msg()
    #print Cur_Pose
    #raw_input("confirm release vacuum")

    tran = np.array(
        [2.15484, 1.21372,
         0.25766])  #np.array([2.17209718963,-1.13702651252,0.224273328841])
    rot = rox.q2R(
        [0.02110, -0.03317, 0.99922, -0.00468]
    )  #rox.q2R([0.0145063655084, -0.0282932350764, 0.999322921073, -0.0185137145776])
    #tran = np.array([2.26476414097,-1.22419669418,0.411353037002])
    #rot = np.array([[-0.9959393, -0.0305329,  0.0846911], [-0.0310315,  0.9995079, -0.0045767], [-0.0845097, -0.0071862, -0.9963967]])
    pose_target2 = rox.Transform(rot, tran)
    pose_target3 = copy.deepcopy(pose_target2)

    ##Execute movement to set location
    print "Executing initial path"
    controller_commander.compute_cartesian_path_and_move(
        pose_target2, avoid_collisions=False)

    #Initilialize aruco boards and parameters
    aruco_dict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_ARUCO_ORIGINAL)
    parameters = cv2.aruco.DetectorParameters_create()
    parameters.cornerRefinementMethod = cv2.aruco.CORNER_REFINE_SUBPIX
    parameters.adaptiveThreshWinSizeMax = 30
    parameters.adaptiveThreshWinSizeStep = 7

    # 1st Panel tag info
    board_ground = cv2.aruco.GridBoard_create(4, 4, .04, .0075, aruco_dict, 32)
    board_panel = cv2.aruco.GridBoard_create(8, 3, .025, .0075, aruco_dict, 80)
    #Load object points ground tag in panel tag coordinate system from mat file
    loaded_object_points_ground_in_panel_system_stage_1 = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat'
    )['object_points_ground_in_panel_tag_system']
    loaded_object_points_ground_in_panel_system_stage_2 = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['object_points_ground_in_panel_tag_system']
    loaded_object_points_ground_in_panel_system = loaded_object_points_ground_in_panel_system_stage_1

    #    #Initialize camera intrinsic parameters #18285636_10_05_2018_5_params
    #18285636_10_05_2018_5_params_111122018
    CamParam = CameraParams(2283.391289766133, 1192.255485086403,
                            2279.484382094687, 1021.399092147012, 1.0,
                            -0.022101211408596, -0.095163053709332,
                            -0.003986991791212, -0.003479613658352,
                            0.179926705467534)

    #Subscribe tp controller state. Again?
    rospy.Subscriber("controller_state", controllerstate, callback)

    UV = np.zeros([150, 8])
    P = np.zeros([150, 3])

    #focal length in pixel units, this number is averaged values from f_hat for x and y
    f_hat_u = 2283.391289766133  #2342.561249990927#2282.523358266698#2281.339593273153 #2446.88
    f_hat_v = 2279.484382094687  #2338.448312671424#2280.155828279608
    #functions like a gain, used with velocity to track position
    dt = 0.1
    du = 100.0
    dv = 100.0
    dutmp = 100.0
    dvtmp = 100.0
    #TimeGain = [0.1,0.1, 0.1]
    du_array = []
    dv_array = []
    dx_array = []
    iteration = 0
    stage = 1
    #step_ts = 0.004
    Kc = 0.0002
    #time_save = []
    #FTdata_save = []
    Tran_z = np.array([[0, 0, -1], [0, -1, 0], [1, 0, 0]])
    Vec_wrench = 100 * np.array([
        0.019296738361905, 0.056232033265447, 0.088644197659430,
        0.620524934626544, -0.517896661195076, 0.279323567303444,
        -0.059640563813256, 0.631460085138371, -0.151143175570223,
        -6.018321330845553
    ]).transpose()

    FTdata_0 = FTdata
    T = listener.lookupTransform("base", "link_6", rospy.Time(0))
    rg = 9.8 * np.matmul(
        np.matmul(T.R, Tran_z).transpose(),
        np.array([0, 0, 1]).transpose())
    A1 = np.hstack([
        rox.hat(rg).transpose(),
        np.zeros([3, 1]),
        np.eye(3),
        np.zeros([3, 3])
    ])
    A2 = np.hstack(
        [np.zeros([3, 3]),
         rg.reshape([3, 1]),
         np.zeros([3, 3]),
         np.eye(3)])
    A = np.vstack([A1, A2])
    FTdata_0est = np.matmul(A, Vec_wrench)
    FTread_array = []
    FTdata_array = []
    t_now_array = []
    step_size = []

    cv2.namedWindow('Image', cv2.WINDOW_NORMAL)
    cv2.resizeWindow('Image', 490, 410)

    ### PBVS set the initial pose
    #### Final Nest Placement Error Calculation ===============================
    #Read new image
    last_ros_image_stamp = object_commander.ros_image_stamp
    try:
        ros_gripper_2_trigger.wait_for_service(timeout=0.1)
        ros_gripper_2_trigger()
    except:
        pass
    wait_count = 0
    while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp:

        if wait_count > 50:
            raise Exception("Image receive timeout")
        time.sleep(0.25)
        wait_count += 1
    result = object_commander.ros_image

    #Detect tag corners in aqcuired image using aruco
    corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
        result, aruco_dict, parameters=parameters)

    #Sort corners and ids according to ascending order of ids
    corners_original = copy.deepcopy(corners)
    ids_original = np.copy(ids)
    sorting_indices = np.argsort(ids_original, 0)
    ids_sorted = ids_original[sorting_indices]
    ids_sorted = ids_sorted.reshape([len(ids_original), 1])
    combined_list = zip(np.ndarray.tolist(ids.flatten()), corners_original)
    combined_list.sort()
    corners_sorted = [x for y, x in combined_list]
    ids = np.copy(ids_sorted)
    corners = copy.deepcopy(corners_sorted)

    #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image)
    #false_ids_ind = np.where(ids>73)
    mask = np.ones(ids.shape, dtype=bool)
    #mask[false_ids_ind] = False
    ids = ids[mask]
    corners = np.array(corners)
    corners = corners[mask.flatten(), :]
    corners = list(corners)

    #Define object and image points of both tags
    objPoints_ground, imgPoints_ground = aruco.getBoardObjectAndImagePoints(
        board_ground, corners, ids)
    objPoints_panel, imgPoints_panel = aruco.getBoardObjectAndImagePoints(
        board_panel, corners, ids)
    objPoints_ground = objPoints_ground.reshape([objPoints_ground.shape[0], 3])
    imgPoints_ground = imgPoints_ground.reshape([imgPoints_ground.shape[0], 2])
    objPoints_panel = objPoints_panel.reshape([objPoints_panel.shape[0], 3])
    imgPoints_panel = imgPoints_panel.reshape([imgPoints_panel.shape[0], 2])

    #Save pose of marker boards after the iterations end(while in the final hovering position above nest)
    #Get pose of both ground and panel markers from detected corners
    retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(
        objPoints_ground, imgPoints_ground, CamParam.camMatrix,
        CamParam.distCoeff)
    Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
    retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(
        objPoints_panel, imgPoints_panel, CamParam.camMatrix,
        CamParam.distCoeff)
    Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)

    print "============== Observed Pose difference in nest position"
    observed_tvec_difference = tvec_ground - tvec_panel
    observed_rvec_difference = rvec_ground - rvec_panel
    print "tvec difference: ", observed_tvec_difference
    print "rvec differnece: ", observed_rvec_difference

    #Load ideal pose differnece information from file
    loaded_rvec_difference = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat'
    )['rvec_difference']
    loaded_tvec_difference = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat'
    )['tvec_difference']

    print "============== Ideal Pose difference in nest position"
    print "tvec difference: ", loaded_tvec_difference
    print "rvec differnece: ", loaded_rvec_difference

    print "============== Difference in pose difference in nest position"
    tvec_err = loaded_tvec_difference - observed_tvec_difference
    rvec_err = loaded_rvec_difference - observed_rvec_difference
    print "tvec difference: ", tvec_err
    print "rvec differnece: ", rvec_err

    # Adjustment
    print "Adjustment ===================="
    current_joint_angles = controller_commander.get_current_joint_values()
    dx = np.array([0, 0, 0, -tvec_err[0], tvec_err[1] + 0.03, tvec_err[2]
                   ]) * 0.75
    joints_vel = QP_abbirb6640(
        np.array(current_joint_angles).reshape(6, 1), np.array(dx))
    goal = trapezoid_gen(
        np.array(current_joint_angles) + joints_vel.dot(1),
        np.array(current_joint_angles), 0.25, np.array(dx))
    client = actionlib.SimpleActionClient("joint_trajectory_action",
                                          FollowJointTrajectoryAction)
    client.wait_for_server()
    client.send_goal(goal)
    client.wait_for_result()
    res = client.get_result()
    if (res.error_code != 0):
        raise Exception("Trajectory execution returned error")

    print res
    #raw_input("confirm move...")
    print "End of Initial Pose ===================="

    ### End of initial pose

    step_size_min = 100000
    stage = 2
    loaded_object_points_ground_in_panel_system = loaded_object_points_ground_in_panel_system_stage_2

    while (
        (du > 4) | (dv > 4) and (iteration < 55)
    ):  #try changing du and dv to lower values(number of iterations increases)
        iteration += 1
        t_now_array.append(time.time())

        #Go to stage2 of movement(mostly downward z motion)
        if ((du < 4) and (dv < 4) and (stage == 1)):
            #Save pose of marker boards after the iterations end(while in the final hovering position above nest)
            #Get pose of both ground and panel markers from detected corners
            retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(
                objPoints_ground, imgPoints_ground, CamParam.camMatrix,
                CamParam.distCoeff)
            Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
            retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(
                objPoints_panel, imgPoints_panel, CamParam.camMatrix,
                CamParam.distCoeff)
            Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)

            print "============== Observed Pose difference in hovering position"
            observed_tvec_difference = tvec_ground - tvec_panel
            observed_rvec_difference = rvec_ground - rvec_panel
            print "tvec difference: ", observed_tvec_difference
            print "rvec differnece: ", observed_rvec_difference

            #Load ideal pose differnece information from file
            loaded_rvec_difference = loadmat(
                '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat'
            )['rvec_difference']
            loaded_tvec_difference = loadmat(
                '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat'
            )['tvec_difference']

            print "============== Ideal Pose difference in hovering position"
            print "tvec difference: ", loaded_tvec_difference
            print "rvec differnece: ", loaded_rvec_difference

            tvec_difference_Above_Nest = loaded_tvec_difference - observed_tvec_difference
            rvec_difference_Above_Nest = loaded_rvec_difference - observed_rvec_difference

            print "============== Difference in pose difference in hovering position"
            print "tvec difference: ", tvec_difference_Above_Nest
            print "rvec differnece: ", rvec_difference_Above_Nest

            #Saving pose information to file
            filename_pose1 = "/home/rpi-cats/Desktop/DJ/Code/Data/Panel1_Above_Nest_Pose_" + str(
                t1) + ".mat"
            scipy.io.savemat(filename_pose1,
                             mdict={
                                 'tvec_ground_Above_Nest':
                                 tvec_ground,
                                 'tvec_panel_Above_Nest':
                                 tvec_panel,
                                 'Rca_ground_Above_Nest':
                                 Rca_ground,
                                 'Rca_panel_Above_Nest':
                                 Rca_panel,
                                 'tvec_difference_Above_Nest':
                                 tvec_ground - tvec_panel,
                                 'rvec_difference_Above_Nest':
                                 rvec_ground - rvec_panel,
                                 'loaded_tvec_difference':
                                 loaded_tvec_difference,
                                 'loaded_rvec_difference':
                                 loaded_rvec_difference,
                                 'observed_tvec_difference':
                                 observed_tvec_difference,
                                 'observed_rvec_difference':
                                 observed_rvec_difference
                             })

            #raw_input("Confirm Stage 2")
            stage = 2
            #			dt=0.02
            loaded_object_points_ground_in_panel_system = loaded_object_points_ground_in_panel_system_stage_2
        #print pose_target2.p[2]

        #Print current robot pose at the beginning of this iteration
        Cur_Pose = controller_commander.get_current_pose_msg()
        print "============ Current Robot Pose"
        print Cur_Pose

        #Read new image
        last_ros_image_stamp = object_commander.ros_image_stamp
        try:
            ros_gripper_2_trigger.wait_for_service(timeout=0.1)
            ros_gripper_2_trigger()
        except:
            pass
        wait_count = 0
        while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp:
            if wait_count > 50:
                raise Exception("Image receive timeout")
            time.sleep(0.25)
            wait_count += 1
        result = object_commander.ros_image
        #Save
        #        filename = "Acquisition3_%d.jpg" % (time.time())
        #        scipy.misc.imsave(filename, result)
        #Display
        #        cv2.namedWindow('Aqcuired Image',cv2.WINDOW_NORMAL)
        #        cv2.imshow('Acquired Image',result)
        #        cv2.waitKey(1)

        #Detect tag corners in aqcuired image using aruco
        corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
            result, aruco_dict, parameters=parameters)
        frame_with_markers_and_axis = result

        #Sort corners and ids according to ascending order of ids
        corners_original = copy.deepcopy(corners)
        ids_original = np.copy(ids)
        sorting_indices = np.argsort(ids_original, 0)
        ids_sorted = ids_original[sorting_indices]
        ids_sorted = ids_sorted.reshape([len(ids_original), 1])
        combined_list = zip(np.ndarray.tolist(ids.flatten()), corners_original)
        combined_list.sort()
        corners_sorted = [x for y, x in combined_list]
        ids = np.copy(ids_sorted)
        corners = copy.deepcopy(corners_sorted)

        #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image)
        false_ids_ind = np.where(ids > 150)
        mask = np.ones(ids.shape, dtype=bool)
        mask[false_ids_ind] = False
        ids = ids[mask]
        corners = np.array(corners)
        corners = corners[mask.flatten(), :]
        corners = list(corners)

        #Define object and image points of both tags
        objPoints_ground, imgPoints_ground = aruco.getBoardObjectAndImagePoints(
            board_ground, corners, ids)
        objPoints_panel, imgPoints_panel = aruco.getBoardObjectAndImagePoints(
            board_panel, corners, ids)
        objPoints_ground = objPoints_ground.reshape(
            [objPoints_ground.shape[0], 3])
        imgPoints_ground = imgPoints_ground.reshape(
            [imgPoints_ground.shape[0], 2])
        objPoints_panel = objPoints_panel.reshape(
            [objPoints_panel.shape[0], 3])
        imgPoints_panel = imgPoints_panel.reshape(
            [imgPoints_panel.shape[0], 2])

        #Get pose of both ground and panel markers from detected corners
        retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(
            objPoints_ground, imgPoints_ground, CamParam.camMatrix,
            CamParam.distCoeff)
        Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
        retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(
            objPoints_panel, imgPoints_panel, CamParam.camMatrix,
            CamParam.distCoeff)
        Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)

        frame_with_markers_and_axis = cv2.aruco.drawAxis(
            frame_with_markers_and_axis, CamParam.camMatrix,
            CamParam.distCoeff, Rca_ground, tvec_ground, 0.2)
        frame_with_markers_and_axis = cv2.aruco.drawAxis(
            frame_with_markers_and_axis, CamParam.camMatrix,
            CamParam.distCoeff, Rca_panel, tvec_panel, 0.2)

        rvec_all_markers_ground, tvec_all_markers_ground, _ = aruco.estimatePoseSingleMarkers(
            corners[0:20], 0.04, CamParam.camMatrix, CamParam.distCoeff)
        rvec_all_markers_panel, tvec_all_markers_panel, _ = aruco.estimatePoseSingleMarkers(
            corners[20:45], 0.025, CamParam.camMatrix, CamParam.distCoeff)
        tvec_all = np.concatenate(
            (tvec_all_markers_ground, tvec_all_markers_panel), axis=0)
        for i_ids, i_corners, i_tvec in zip(ids, corners, tvec_all):
            UV[i_ids, :] = i_corners.reshape(
                [1, 8])  #np.average(i_corners, axis=1)
            P[i_ids, :] = i_tvec

        #print 'P',P
        #used to find the height of the tags and the delta change of height, z height at desired position
        Z = 1 * P[32:48,
                  2]  #- [0.68184539, 0.68560932, 0.68966803, 0.69619578])

        #check if all tags detected
        if retVal_ground != 0 and retVal_panel != 0:
            dutmp = []
            dvtmp = []

            #Pixel estimates of the ideal ground tag location
            reprojected_imagePoints_ground_2, jacobian2 = cv2.projectPoints(
                loaded_object_points_ground_in_panel_system.transpose(),
                rvec_panel, tvec_panel, CamParam.camMatrix, CamParam.distCoeff)
            reprojected_imagePoints_ground_2 = reprojected_imagePoints_ground_2.reshape(
                [reprojected_imagePoints_ground_2.shape[0], 2])

            #plot image points for ground tag from corner detection and from re-projections
            for point1, point2 in zip(
                    imgPoints_ground,
                    np.float32(reprojected_imagePoints_ground_2)):
                cv2.circle(frame_with_markers_and_axis, tuple(point1), 5,
                           (0, 0, 255), 3)
                cv2.circle(frame_with_markers_and_axis, tuple(point2), 5,
                           (255, 0, 0), 3)

            cv2.imshow('Image', frame_with_markers_and_axis)
            cv2.waitKey(1)
            #Save
            filename_image = "/home/rpi-cats/Desktop/DJ/Code/Images/Panel1_Acquisition_" + str(
                t1) + "_" + str(iteration) + ".jpg"
            scipy.misc.imsave(filename_image, frame_with_markers_and_axis)

            #Go through a particular point in all tags to build the complete Jacobian
            for ic in range(4):
                #uses first set of tags, numbers used to offset camera frame, come from camera parameters
                #UV_target = np.vstack([UV[9:13,2*ic]-1209.151959040735,UV[9:13,2*ic+1]-1055.254852652610]).T - UV_shift[:,(2*ic):(2*ic+2)]		#shift corner estimates to the image centers. Necessary for the image jacobian to work.
                reprojected_imagePoints_ground_2 = np.reshape(
                    reprojected_imagePoints_ground_2, (16, 8))
                UV_target = np.vstack([
                    reprojected_imagePoints_ground_2[:, 2 * ic] -
                    1192.255485086403,
                    reprojected_imagePoints_ground_2[:, 2 * ic + 1] -
                    1021.399092147012
                ]).T
                uc = UV_target[:, 0]
                vc = UV_target[:, 1]

                #                print 'UV_target', UV_target
                UV_current = np.vstack([
                    UV[32:48, 2 * ic] - 1192.255485086403,
                    UV[32:48, 2 * ic + 1] - 1021.399092147012
                ]).T
                #find difference between current and desired tag difference
                delta_UV = UV_target - UV_current
                dutmp.append(np.mean(delta_UV[:, 0]))
                dvtmp.append(np.mean(delta_UV[:, 1]))
                for tag_i in range(16):
                    if tag_i == 0:
                        J_cam_tmp = 1.0 * np.array(
                            [[
                                -f_hat_u / Z[tag_i], 0.0, uc[tag_i] / Z[tag_i],
                                uc[tag_i] * vc[tag_i] / f_hat_u, -1.0 *
                                (uc[tag_i] * uc[tag_i] / f_hat_u + f_hat_u),
                                vc[tag_i]
                            ],
                             [
                                 0.0, -f_hat_v / Z[tag_i], vc[tag_i] /
                                 Z[tag_i], vc[tag_i] * vc[tag_i] / f_hat_v +
                                 f_hat_v, -1.0 * uc[tag_i] * vc[tag_i] /
                                 f_hat_v, -uc[tag_i]
                             ]])
                    else:
                        J_cam_tmp = np.concatenate(
                            (J_cam_tmp,
                             1.0 * np.array(
                                 [[
                                     -f_hat_u / Z[tag_i], 0.0,
                                     uc[tag_i] / Z[tag_i],
                                     uc[tag_i] * vc[tag_i] / f_hat_u, -1.0 *
                                     (uc[tag_i] * uc[tag_i] / f_hat_u +
                                      f_hat_u), vc[tag_i]
                                 ],
                                  [
                                      0.0, -f_hat_v / Z[tag_i], vc[tag_i] /
                                      Z[tag_i], vc[tag_i] * vc[tag_i] / f_hat_v
                                      + f_hat_v, -1.0 * uc[tag_i] * vc[tag_i] /
                                      f_hat_v, -uc[tag_i]
                                  ]])),
                            axis=0)
                #camera jacobian
                if ic == 0:
                    J_cam = J_cam_tmp
                    delta_UV_all = delta_UV.reshape(32, 1)
                    UV_target_all = UV_target.reshape(32, 1)
                else:
                    J_cam = np.vstack([J_cam, J_cam_tmp])
                    delta_UV_all = np.vstack(
                        [delta_UV_all, delta_UV.reshape(32, 1)])
                    UV_target_all = np.vstack(
                        [UV_target_all,
                         UV_target.reshape(32, 1)])

            print 'dutmp: ', dutmp
            print 'dvtmp: ', dvtmp
            du = np.mean(np.absolute(dutmp))
            dv = np.mean(np.absolute(dvtmp))
            print 'Average du of all points:', du, 'Average dv of all points:', dv
            du_array.append(du)
            dv_array.append(dv)

            #print "Jcam",J_cam
            #print "UV",delta_UV_all
            #dx = np.matmul(np.linalg.pinv(J_cam),np.array(delta_UV_all))

            dx = QP_Cam(
                J_cam, delta_UV_all
            )  #np.matmul(np.linalg.pinv(J_cam),np.array(delta_UV_all))
            dx = dx.reshape([6, 1])

            dx_array.append(dx)

            #print '**************Jac:',dx, QP_Cam(J_cam,delta_UV_all)

            dx = np.array([
                dx[3, 0], -dx[4, 0], -dx[5, 0], dx[0, 0], -dx[1, 0], -dx[2, 0]
            ])

            if stage == 2:
                T = listener.lookupTransform("base", "link_6", rospy.Time(0))
                rg = 9.8 * np.matmul(
                    np.matmul(T.R, Tran_z).transpose(),
                    np.array([0, 0, 1]).transpose())
                A1 = np.hstack([
                    rox.hat(rg).transpose(),
                    np.zeros([3, 1]),
                    np.eye(3),
                    np.zeros([3, 3])
                ])
                A2 = np.hstack([
                    np.zeros([3, 3]),
                    rg.reshape([3, 1]),
                    np.zeros([3, 3]),
                    np.eye(3)
                ])
                A = np.vstack([A1, A2])
                FTdata_est = np.matmul(A, Vec_wrench)
                FTread = FTdata - FTdata_0 - FTdata_est + FTdata_0est
                print 'FTread:', FTread

                print 'Z', FTread[-1]
                if FTread[-1] > -100:
                    F_d = -150
                    Kc = 0.0001
                else:
                    Kc = 0.0001
                    F_d = -200

                Vz = Kc * (F_d - FTread[-1])
                print 'Vz', Vz
                dx[-1] = Vz + dx[-1]
                FTread_array.append(FTread)
                FTdata_array.append(FTdata)

            current_joint_angles = controller_commander.get_current_joint_values(
            )

            step_size_tmp = np.linalg.norm(dx)
            if step_size_tmp <= step_size_min:
                step_size_min = step_size_tmp
            else:
                dx = dx / step_size_tmp * step_size_min

            joints_vel = QP_abbirb6640(
                np.array(current_joint_angles).reshape(6, 1), np.array(dx))

            goal = trapezoid_gen(
                np.array(current_joint_angles) + joints_vel.dot(dt),
                np.array(current_joint_angles), 0.25, np.array(dx))
            step_size.append(np.linalg.norm(dx))

            client = actionlib.SimpleActionClient("joint_trajectory_action",
                                                  FollowJointTrajectoryAction)
            client.wait_for_server()

            client.send_goal(goal)
            client.wait_for_result()
            res = client.get_result()
            if (res.error_code != 0):
                raise Exception("Trajectory execution returned error")

            print res

            print 'Current Iteration Finished.'

    #Saving iteration data to file
    filename_data = "/home/rpi-cats/Desktop/YC/Data/Panel1_Data_" + str(
        t1) + ".mat"
    scipy.io.savemat(filename_data,
                     mdict={
                         'du_array': du_array,
                         'dv_array': dv_array,
                         'dx_array': dx_array,
                         'step_size': step_size,
                         'iteration': iteration
                     })

    #Saving force data to file
    filename_force_data = "/home/rpi-cats/Desktop/YC/Data/Panel1_Data_force_" + str(
        t1) + ".mat"
    scipy.io.savemat(filename_force_data,
                     mdict={
                         'FTread': FTread_array,
                         'FTdata': FTdata_array,
                         't_now_array': t_now_array
                     })

    print '###############################'
    print 'step_size', step_size
    print 'iteration', iteration
    print '###############################'
    '''
    print "============ Final Push Down to Nest"
    controller_commander.set_controller_mode(controller_commander.MODE_AUTO_TRAJECTORY, 0.2, [],[])

    
    Cur_Pose = controller_commander.get_current_pose_msg()
    rot_current = [Cur_Pose.pose.orientation.w, Cur_Pose.pose.orientation.x,Cur_Pose.pose.orientation.y,Cur_Pose.pose.orientation.z]
    trans_current = [Cur_Pose.pose.position.x,Cur_Pose.pose.position.y,Cur_Pose.pose.position.z]
    pose_target2.R = rox.q2R([rot_current[0], rot_current[1], rot_current[2], rot_current[3]])
    pose_target2.p = trans_current
    pose_target2.p[2] -= 0.11
    '''

    #### Final Nest Placement Error Calculation ===============================
    #Read new image
    last_ros_image_stamp = object_commander.ros_image_stamp
    try:
        ros_gripper_2_trigger.wait_for_service(timeout=0.1)
        ros_gripper_2_trigger()
    except:
        pass
    wait_count = 0
    while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp:
        if wait_count > 50:
            raise Exception("Image receive timeout")
        time.sleep(0.25)
        wait_count += 1
    result = object_commander.ros_image

    #Detect tag corners in aqcuired image using aruco
    corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
        result, aruco_dict, parameters=parameters)

    #Sort corners and ids according to ascending order of ids
    corners_original = copy.deepcopy(corners)
    ids_original = np.copy(ids)
    sorting_indices = np.argsort(ids_original, 0)
    ids_sorted = ids_original[sorting_indices]
    ids_sorted = ids_sorted.reshape([len(ids_original), 1])
    combined_list = zip(np.ndarray.tolist(ids.flatten()), corners_original)
    combined_list.sort()
    corners_sorted = [x for y, x in combined_list]
    ids = np.copy(ids_sorted)
    corners = copy.deepcopy(corners_sorted)

    #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image)
    #false_ids_ind = np.where(ids>73)
    mask = np.ones(ids.shape, dtype=bool)
    #mask[false_ids_ind] = False
    ids = ids[mask]
    corners = np.array(corners)
    corners = corners[mask.flatten(), :]
    corners = list(corners)

    #Define object and image points of both tags
    objPoints_ground, imgPoints_ground = aruco.getBoardObjectAndImagePoints(
        board_ground, corners, ids)
    objPoints_panel, imgPoints_panel = aruco.getBoardObjectAndImagePoints(
        board_panel, corners, ids)
    objPoints_ground = objPoints_ground.reshape([objPoints_ground.shape[0], 3])
    imgPoints_ground = imgPoints_ground.reshape([imgPoints_ground.shape[0], 2])
    objPoints_panel = objPoints_panel.reshape([objPoints_panel.shape[0], 3])
    imgPoints_panel = imgPoints_panel.reshape([imgPoints_panel.shape[0], 2])

    #Save pose of marker boards after the iterations end(while in the final hovering position above nest)
    #Get pose of both ground and panel markers from detected corners
    retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(
        objPoints_ground, imgPoints_ground, CamParam.camMatrix,
        CamParam.distCoeff)
    Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
    retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(
        objPoints_panel, imgPoints_panel, CamParam.camMatrix,
        CamParam.distCoeff)
    Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)

    print "============== Observed Pose difference in nest position"
    observed_tvec_difference = tvec_ground - tvec_panel
    observed_rvec_difference = rvec_ground - rvec_panel
    print "tvec difference: ", observed_tvec_difference
    print "rvec differnece: ", observed_rvec_difference

    #Load ideal pose differnece information from file
    loaded_rvec_difference = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['rvec_difference']
    loaded_tvec_difference = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['tvec_difference']

    print "============== Ideal Pose difference in nest position"
    print "tvec difference: ", loaded_tvec_difference
    print "rvec differnece: ", loaded_rvec_difference

    print "============== Difference in pose difference in nest position"
    tvec_err = loaded_tvec_difference - observed_tvec_difference
    rvec_err = loaded_rvec_difference - observed_rvec_difference
    print "tvec difference: ", tvec_err
    print "rvec differnece: ", rvec_err

    # Adjustment
    print "Adjustment ===================="
    current_joint_angles = controller_commander.get_current_joint_values()
    dx = np.array([0, 0, 0, -tvec_err[0], tvec_err[1], 0])
    joints_vel = QP_abbirb6640(
        np.array(current_joint_angles).reshape(6, 1), np.array(dx))
    goal = trapezoid_gen(
        np.array(current_joint_angles) + joints_vel.dot(1),
        np.array(current_joint_angles), 0.25, np.array(dx))
    client = actionlib.SimpleActionClient("joint_trajectory_action",
                                          FollowJointTrajectoryAction)
    client.wait_for_server()
    client.send_goal(goal)
    client.wait_for_result()
    res = client.get_result()
    if (res.error_code != 0):
        raise Exception("Trajectory execution returned error")

    print res
    print "End of Adjustment ===================="

    #### Final Nest Placement Error Calculation ===============================
    #Read new image
    last_ros_image_stamp = object_commander.ros_image_stamp
    try:
        ros_gripper_2_trigger.wait_for_service(timeout=0.1)
        ros_gripper_2_trigger()
    except:
        pass
    wait_count = 0
    while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp:
        if wait_count > 50:
            raise Exception("Image receive timeout")
        time.sleep(0.25)
        wait_count += 1
    result = object_commander.ros_image

    #Detect tag corners in aqcuired image using aruco
    corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
        result, aruco_dict, parameters=parameters)

    #Sort corners and ids according to ascending order of ids
    corners_original = copy.deepcopy(corners)
    ids_original = np.copy(ids)
    sorting_indices = np.argsort(ids_original, 0)
    ids_sorted = ids_original[sorting_indices]
    ids_sorted = ids_sorted.reshape([len(ids_original), 1])
    combined_list = zip(np.ndarray.tolist(ids.flatten()), corners_original)
    combined_list.sort()
    corners_sorted = [x for y, x in combined_list]
    ids = np.copy(ids_sorted)
    corners = copy.deepcopy(corners_sorted)

    #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image)
    #false_ids_ind = np.where(ids>73)
    mask = np.ones(ids.shape, dtype=bool)
    #mask[false_ids_ind] = False
    ids = ids[mask]
    corners = np.array(corners)
    corners = corners[mask.flatten(), :]
    corners = list(corners)

    #Define object and image points of both tags
    objPoints_ground, imgPoints_ground = aruco.getBoardObjectAndImagePoints(
        board_ground, corners, ids)
    objPoints_panel, imgPoints_panel = aruco.getBoardObjectAndImagePoints(
        board_panel, corners, ids)
    objPoints_ground = objPoints_ground.reshape([objPoints_ground.shape[0], 3])
    imgPoints_ground = imgPoints_ground.reshape([imgPoints_ground.shape[0], 2])
    objPoints_panel = objPoints_panel.reshape([objPoints_panel.shape[0], 3])
    imgPoints_panel = imgPoints_panel.reshape([imgPoints_panel.shape[0], 2])

    #Save pose of marker boards after the iterations end(while in the final hovering position above nest)
    #Get pose of both ground and panel markers from detected corners
    retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(
        objPoints_ground, imgPoints_ground, CamParam.camMatrix,
        CamParam.distCoeff)
    Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
    retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(
        objPoints_panel, imgPoints_panel, CamParam.camMatrix,
        CamParam.distCoeff)
    Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)

    print "============== Observed Pose difference in nest position"
    observed_tvec_difference = tvec_ground - tvec_panel
    observed_rvec_difference = rvec_ground - rvec_panel
    print "tvec difference: ", observed_tvec_difference
    print "rvec differnece: ", observed_rvec_difference

    #Load ideal pose differnece information from file
    loaded_rvec_difference = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['rvec_difference']
    loaded_tvec_difference = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['tvec_difference']

    print "============== Ideal Pose difference in nest position"
    print "tvec difference: ", loaded_tvec_difference
    print "rvec differnece: ", loaded_rvec_difference

    print "============== Difference in pose difference in nest position"
    tvec_err = loaded_tvec_difference - observed_tvec_difference
    rvec_err = loaded_rvec_difference - observed_rvec_difference
    rospy.loginfo("tvec difference: %f, %f, %f", tvec_err[0], tvec_err[1],
                  tvec_err[2])
    rospy.loginfo("rvec difference: %f, %f, %f", rvec_err[0], rvec_err[1],
                  rvec_err[2])
    print "rvec differnece: ", tvec_err
    print "rvec differnece: ", rvec_err

    #Saving pose information to file
    filename_pose2 = "/home/rpi-cats/Desktop/DJ/Code/Data/In_Nest_Pose_" + str(
        t1) + ".mat"
    scipy.io.savemat(filename_pose2,
                     mdict={
                         'tvec_ground_In_Nest': tvec_ground,
                         'tvec_panel_In_Nest': tvec_panel,
                         'Rca_ground_In_Nest': Rca_ground,
                         'Rca_panel_In_Nest': Rca_panel,
                         'tvec_difference_In_Nest': tvec_ground - tvec_panel,
                         'rvec_difference_In_Nest': rvec_ground - rvec_panel,
                         'loaded_tvec_difference': loaded_tvec_difference,
                         'loaded_rvec_difference': loaded_rvec_difference,
                         'observed_tvec_difference': observed_tvec_difference,
                         'observed_rvec_difference': observed_rvec_difference
                     })
    controller_commander.set_controller_mode(
        controller_commander.MODE_AUTO_TRAJECTORY, 0.7, [])
    #    raw_input("confirm release vacuum")
    rapid_node.set_digital_io("Vacuum_enable", 0)
    g = ProcessStepGoal('plan_place_set_second_step', "")

    process_client.send_goal(g)

    #self.in_process=True

    print process_client.get_result()
    print "VACUUM OFF!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
    time.sleep(0.5)

    tran = np.array([2.17209718963, -1.13702651252, 0.224273328841])
    rot = rox.q2R(
        [0.0145063655084, -0.0282932350764, 0.999322921073, -0.0185137145776])
    pose_target2 = rox.Transform(rot, tran)

    Cur_Pose = controller_commander.get_current_pose_msg()
    rot_current = [
        Cur_Pose.pose.orientation.w, Cur_Pose.pose.orientation.x,
        Cur_Pose.pose.orientation.y, Cur_Pose.pose.orientation.z
    ]
    trans_current = [
        Cur_Pose.pose.position.x, Cur_Pose.pose.position.y,
        Cur_Pose.pose.position.z
    ]
    pose_target2.R = rox.q2R(
        [rot_current[0], rot_current[1], rot_current[2], rot_current[3]])
    pose_target2.p = trans_current
    pose_target2.p[2] += 0.5
    #
    #
    #    #print 'Target:',pose_target3
    #
    #    #print "============ Executing plan4"
    controller_commander.compute_cartesian_path_and_move(
        pose_target2, avoid_collisions=False)

    t2 = time.time()
    print 'Execution Finished.'
    print "Execution time: " + str(t2 - t1) + " seconds"
    s = ProcessState()
    s.state = "place_set"
    s.payload = "leeward_mid_panel"
    s.target = ""
    process_state_pub.publish(s)
示例#16
0
    def runTest(self):

        #msg2q, q2msg
        q = np.random.rand(4)
        q = q / np.linalg.norm(q)

        q_msg = rox_msg.q2msg(q)
        q_msg._check_types()
        np.testing.assert_allclose(q, [q_msg.w, q_msg.x, q_msg.y, q_msg.z],
                                   atol=1e-4)
        q2 = rox_msg.msg2q(q_msg)
        np.testing.assert_allclose(q, q2, atol=1e-4)

        #msg2R, R2msg
        R = rox.q2R(q)
        q_msg_R = rox_msg.R2msg(R)
        q_msg_R._check_types()
        np.testing.assert_allclose(
            q, [q_msg_R.w, q_msg_R.x, q_msg_R.y, q_msg_R.z], atol=1e-4)
        R2 = rox_msg.msg2R(q_msg_R)
        np.testing.assert_allclose(R, R2, atol=1e-4)

        #msg2p, p2msg
        p = np.random.rand(3)
        p_msg = rox_msg.p2msg(p)
        p_msg._check_types()
        np.testing.assert_allclose(p, [p_msg.x, p_msg.y, p_msg.z], atol=1e-4)
        p2 = rox_msg.msg2p(p_msg)
        np.testing.assert_allclose(p, p2, atol=1e-4)

        #transform messages of varying types
        tf = rox.Transform(R, p)
        pose_msg = rox_msg.transform2pose_msg(tf)
        pose_msg._check_types()
        np.testing.assert_allclose(R,
                                   rox_msg.msg2R(pose_msg.orientation),
                                   atol=1e-4)
        np.testing.assert_allclose(p,
                                   rox_msg.msg2p(pose_msg.position),
                                   atol=1e-4)
        pose2 = rox_msg.msg2transform(pose_msg)
        np.testing.assert_allclose(R, pose2.R, atol=1e-4)
        np.testing.assert_allclose(p, pose2.p, atol=1e-4)
        tf_msg = rox_msg.transform2msg(tf)
        tf_msg._check_types()
        np.testing.assert_allclose(R,
                                   rox_msg.msg2R(tf_msg.rotation),
                                   atol=1e-4)
        np.testing.assert_allclose(p,
                                   rox_msg.msg2p(tf_msg.translation),
                                   atol=1e-4)
        tf2 = rox_msg.msg2transform(tf_msg)
        np.testing.assert_allclose(R, tf2.R, atol=1e-4)
        np.testing.assert_allclose(p, tf2.p, atol=1e-4)

        #transform stamped messages of varying types
        tf3 = rox.Transform(R, p, 'parent_link', 'child_link')
        print tf3
        print str(tf3)
        pose_stamped_msg = rox_msg.transform2pose_stamped_msg(tf3)
        pose_stamped_msg._check_types()
        np.testing.assert_allclose(R,
                                   rox_msg.msg2R(
                                       pose_stamped_msg.pose.orientation),
                                   atol=1e-4)
        np.testing.assert_allclose(p,
                                   rox_msg.msg2p(
                                       pose_stamped_msg.pose.position),
                                   atol=1e-4)
        assert pose_stamped_msg.header.frame_id == 'parent_link'
        pose3 = rox_msg.msg2transform(pose_stamped_msg)
        np.testing.assert_allclose(R, pose3.R, atol=1e-4)
        np.testing.assert_allclose(p, pose3.p, atol=1e-4)
        assert pose3.parent_frame_id == 'parent_link'
        tf_stamped_msg = rox_msg.transform2transform_stamped_msg(tf3)
        tf_stamped_msg._check_types()
        np.testing.assert_allclose(R,
                                   rox_msg.msg2R(
                                       tf_stamped_msg.transform.rotation),
                                   atol=1e-4)
        np.testing.assert_allclose(p,
                                   rox_msg.msg2p(
                                       tf_stamped_msg.transform.translation),
                                   atol=1e-4)
        assert tf_stamped_msg.header.frame_id == 'parent_link'
        assert tf_stamped_msg.child_frame_id == 'child_link'
        tf4 = rox_msg.msg2transform(tf_stamped_msg)
        np.testing.assert_allclose(R, tf4.R, atol=1e-4)
        np.testing.assert_allclose(p, tf4.p, atol=1e-4)
        assert tf4.parent_frame_id == 'parent_link'
        assert tf4.child_frame_id == 'child_link'

        #msg2twist, twist2msg
        twist = np.random.rand(6)
        twist_msg = rox_msg.twist2msg(twist)
        twist_msg._check_types()
        np.testing.assert_allclose(twist, [twist_msg.angular.x, twist_msg.angular.y, twist_msg.angular.z, \
                                           twist_msg.linear.x, twist_msg.linear.y, twist_msg.linear.z], \
                                            atol=1e-4)
        twist2 = rox_msg.msg2twist(twist_msg)
        np.testing.assert_allclose(twist, twist2, atol=1e-4)

        #msg2wrench, wrench2msg
        wrench = np.random.rand(6)
        wrench_msg = rox_msg.wrench2msg(wrench)
        wrench_msg._check_types()
        np.testing.assert_allclose(wrench, [wrench_msg.torque.x, wrench_msg.torque.y, wrench_msg.torque.z, \
                                           wrench_msg.force.x, wrench_msg.force.y, wrench_msg.force.z], \
                                            atol=1e-4)
        wrench2 = rox_msg.msg2wrench(wrench_msg)
        np.testing.assert_allclose(wrench, wrench2, atol=1e-4)
def main():
            
    #Start timer to measure execution time        
    t1 = time.time()
    
    #Subscribe to controller_state 
    rospy.Subscriber("controller_state", controllerstate, callback)
    last_ros_image_stamp = rospy.Time(0)
    
    #Force-torque    
    if not "disable-ft" in sys.argv:
        ft_threshold1=ft_threshold
    else:
        ft_threshold1=[]
    
    #Initialize ros node of this process
    rospy.init_node('user_defined_motion', anonymous=True)
    
    #print "============ Starting setup"   
    
    listener = PayloadTransformListener()
    rapid_node = rapid_node_pkg.RAPIDCommander()
    controller_commander = controller_commander_pkg.ControllerCommander()

    object_commander=ObjectRecognitionCommander()
    robot = urdf.robot_from_parameter_server()   
  
    #subscribe to Gripper camera node for image acquisition     
    ros_gripper_2_img_sub=rospy.Subscriber('/gripper_camera_2/image', Image, object_commander.ros_raw_gripper_2_image_cb)
    ros_gripper_2_trigger=rospy.ServiceProxy('/gripper_camera_2/continuous_trigger', CameraTrigger)

    #Set controller command mode
    controller_commander.set_controller_mode(controller_commander.MODE_AUTO_TRAJECTORY, 0.4, ft_threshold_place,[])
    time.sleep(0.5)
    
    
    #Set Location above the panel where the end effector goes first (in the world/base frame) Ideal location of panel.
    current_joint_angles = controller_commander.get_current_joint_values()
    Cur_Pose = controller_commander.get_current_pose_msg()
    rot_o = [Cur_Pose.pose.orientation.w, Cur_Pose.pose.orientation.x,Cur_Pose.pose.orientation.y,Cur_Pose.pose.orientation.z]
    trans_o = np.array([Cur_Pose.pose.position.x,Cur_Pose.pose.position.y,Cur_Pose.pose.position.z])

    pos_command = []
    quat_command = []
    pos_read = []
    quat_read = []
    time_all = []


    for i in range(4):
        rot_set = rox.q2R(rot_o)
        tran_set = trans_o+np.array([0,0,i*0.25])


        pose_target2 = rox.Transform(rot_set, tran_set)
    
         
        #Execute movement to set location
        print "Executing initial path"
        controller_commander.compute_cartesian_path_and_move(pose_target2, avoid_collisions=False)
        time.sleep(1)
    
        Cur_Pose = controller_commander.get_current_pose_msg()
        rot_cur = [Cur_Pose.pose.orientation.w, Cur_Pose.pose.orientation.x,Cur_Pose.pose.orientation.y,Cur_Pose.pose.orientation.z]
        trans_cur = [Cur_Pose.pose.position.x,Cur_Pose.pose.position.y,Cur_Pose.pose.position.z]

        pos_command.append(tran_set)
        quat_command.append(rot_set)
        pos_read.append(trans_cur)
        quat_read.append(rot_cur)
        time_all.append(time.time())
    

    #Saving iteration data to file
    filename_data = "/home/rpi-cats/Desktop/YC/Data/Motion Capture/Data_"+str(t1)+".mat"
    scipy.io.savemat(filename_data, mdict={'pos_command':pos_command, 'quat_command':quat_command, 'pos_read':pos_read, 'quat_read':quat_read, 'time_all':time_all})
	

    t2 = time.time()
    print 'Execution Finished.'
    print "Execution time: " + str(t2-t1) + " seconds"
示例#18
0
    def _do_aruco_detection(self, img, intrinsic_global_name,
                            extrinsic_global_name, aruco_dict_str, aruco_id,
                            aruco_markersize, roi):

        intrinsic_calib = None
        extrinsic_calib = None

        if intrinsic_global_name is not None and extrinsic_global_name is not None:
            var_storage = self.device_manager.get_device_client(
                "variable_storage", 0.1)
            intrinsic_calib = var_storage.getf_variable_value(
                "globals", intrinsic_global_name).data
            extrinsic_calib = var_storage.getf_variable_value(
                "globals", extrinsic_global_name).data

        display_img = img.copy()

        assert aruco_dict_str.startswith(
            "DICT_"), "Invalid aruco dictionary name"

        aruco_dict_i = getattr(aruco,
                               aruco_dict_str)  # convert string to python
        aruco_dict = cv2.aruco.Dictionary_get(aruco_dict_i)
        aruco_params = cv2.aruco.DetectorParameters_create()
        aruco_params.cornerRefinementMethod = cv2.aruco.CORNER_REFINE_SUBPIX

        corners1, ids1, rejected = cv2.aruco.detectMarkers(
            img, aruco_dict, parameters=aruco_params)

        if corners1 is None:
            corners1 = []
        if ids1 is None:
            ids1 = []

        if aruco_id < 0:
            corners2 = corners1
            ids2 = ids1
        else:
            corners2 = []
            ids2 = []
            for id2, corner2 in zip(ids1, corners1):
                if id2 == aruco_id:
                    corners2.append(corner2)
                    ids2.append(id2)
            ids2 = np.array(ids2)

        if roi is None:
            corners = corners2
            ids = ids2
        else:
            roi_x = roi.center.pose[0]["position"]["x"]
            roi_y = roi.center.pose[0]["position"]["y"]
            roi_theta = roi.center.pose[0]["orientation"]
            roi_w = roi.size[0]["width"]
            roi_h = roi.size[0]["height"]
            geom_roi1 = shapely.geometry.box(-roi_w / 2.,
                                             -roi_h / 2.,
                                             roi_w / 2.,
                                             roi_h / 2.,
                                             ccw=True)
            geom_roi2 = shapely.affinity.translate(geom_roi1,
                                                   xoff=roi_x,
                                                   yoff=roi_y)
            geom_roi = shapely.affinity.rotate(geom_roi2,
                                               roi_theta,
                                               origin='centroid',
                                               use_radians=True)

            corners = []
            ids = []

            for id3, corner3 in zip(ids2, corners2):
                centroid = shapely.geometry.Polygon([
                    shapely.geometry.Point(corner3[0, i, 0], corner3[0, i, 1])
                    for i in range(4)
                ])
                if geom_roi.contains(centroid):
                    corners.append(corner3)
                    ids.append(id3)
            ids = np.array(ids)

            roi_outline = np.array([geom_roi.exterior.coords], dtype=np.int32)
            display_img = cv2.polylines(display_img,
                                        roi_outline,
                                        True,
                                        color=(255, 255, 0))

        if len(ids) > 0:
            display_img = aruco.drawDetectedMarkers(display_img, corners, ids)

        poses = None
        if intrinsic_calib is not None and extrinsic_calib is not None:
            poses = []
            mtx = intrinsic_calib.K
            d = intrinsic_calib.distortion_info.data
            dist = np.array([d.k1, d.k2, d.p1, d.p2, d.k3])

            T_cam = self._geom_util.named_pose_to_rox_transform(
                extrinsic_calib.pose)

            for id4, corner4 in zip(ids, corners):
                rvec, tvec, markerPoints = cv2.aruco.estimatePoseSingleMarkers(
                    corner4, aruco_markersize, mtx, dist)

                display_img = cv2.aruco.drawAxis(display_img, mtx, dist, rvec,
                                                 tvec, 0.05)

                # R_marker1 = cv2.Rodrigues(rvec.flatten())[0]
                # TODO: 3D pose estimation from rvec is very innaccurate. Use a simple trigonometry
                # to estimate the Z rotation of the tag

                # compute vectors from opposite corners
                v1 = corner4[0, 2, :] - corner4[0, 0, :]
                v2 = corner4[0, 3, :] - corner4[0, 1, :]

                # Use atan2 on each vector and average
                theta1 = (np.arctan2(v1[1], v1[0]) - np.deg2rad(45)) % (2. *
                                                                        np.pi)
                theta2 = (np.arctan2(v2[1], v2[0]) - np.deg2rad(135)) % (2. *
                                                                         np.pi)

                theta = (theta1 + theta2) / 2.

                R_marker1 = rox.rot([1., 0., 0.], np.pi) @ rox.rot(
                    [0., 0., -1.], theta)

                p_marker1 = tvec.flatten()

                T_marker1 = rox.Transform(R_marker1, p_marker1, "camera",
                                          f"aruco{int(id4)}")
                T_marker = T_cam * T_marker1
                rr_marker_pose = self._geom_util.rox_transform_to_named_pose(
                    T_marker)
                poses.append(rr_marker_pose)

        ret_markers = []
        if poses is None:
            poses = [None] * len(ids)
        for id_, corner, pose in zip(ids, corners, poses):
            m = self._detected_marker()
            m.marker_id = int(id_)
            m.marker_corners = np.zeros((4, ), dtype=self._point2d_dtype)
            for i in range(4):
                m.marker_corners[i] = self._geom_util.xy_to_point2d(
                    corner[0, i, :])

            m.marker_pose = pose

            ret_markers.append(m)

        ret = self._aruco_detection_result()
        ret.detected_markers = ret_markers
        ret.display_image = self._image_util.array_to_compressed_image_jpg(
            display_img, 70)
        return ret
示例#19
0
d.connect_device("robot")

c = d.get_device_client("robotics_motion", 1)

#p_target = np.array([-np.random.uniform(0.4,0.8),np.random.uniform(-0.1,0.1),np.random.uniform(0.0,0.4)])
p_target = np.array([
    -np.random.uniform(-0.1, 0.1),
    np.random.uniform(-0.1, 0.1),
    np.random.uniform(0.0, 0.4)
])
rpy_target = np.random.randn(3) * 0.5
rpy_target[0] += np.pi
R_target = rox.rpy2R(rpy_target)
# p_target = np.array([-0.6, 0.0, 0.1])
# R_target = np.array([[0,1,0],[1,0,0],[0,0,-1]])
T_target = rox.Transform(R_target, p_target)

r = d.get_device_client("robot", 1)

geom_util = GeometryUtil(client_obj=r)
p_target = geom_util.rox_transform_to_pose(T_target)

print("Begin movel")
gen = c.movel("robot", p_target, "world", "robot_origin_calibration", 50)

while True:
    try:
        gen.Next()
    except RR.StopIterationException:
        break
print("End movel")
def main():
    step_ts = 0.004
    rospy.init_node("test_moveit_commander_custom_trajectory", anonymous=True)
    rospy.Subscriber("controller_state", ControllerState, callback_function)
    robot = urdf.robot_from_parameter_server()
    controller_commander = controller_commander_pkg.ControllerCommander()

    controller_commander.set_controller_mode(
        controller_commander.MODE_AUTO_TRAJECTORY, 0.4, [], [])
    time.sleep(0.5)
    tran = np.array(
        [2.197026484647054, 1.2179574262842452, 0.12376598588449844])
    rot = np.array([[-0.99804142, 0.00642963, 0.06222524],
                    [0.00583933, 0.99993626, -0.00966372],
                    [-0.06228341, -0.00928144, -0.99801535]])
    pose_target2 = rox.Transform(rot, tran)
    pose_target2.p[2] += 0.20

    print 'Target:', pose_target2
    print "============ Move Close to Panel"
    controller_commander.compute_cartesian_path_and_move(
        pose_target2, avoid_collisions=False)

    controller_commander.set_controller_mode(
        controller_commander.MODE_AUTO_TRAJECTORY, 1.0, [], [])
    time.sleep(1.0)
    Kc = 0.004
    time_save = []
    FTdata_save = []
    Tran_z = np.array([[0, 0, -1], [0, -1, 0], [1, 0, 0]])
    Vec_wrench = 100 * np.array([
        0.019296738361905, 0.056232033265447, 0.088644197659430,
        0.620524934626544, -0.517896661195076, 0.279323567303444,
        -0.059640563813256, 0.631460085138371, -0.151143175570223,
        -6.018321330845553
    ]).transpose()

    listener = PayloadTransformListener()
    rapid_node = rapid_node_pkg.RAPIDCommander()
    #controller_commander=controller_commander_pkg.arm_composites_manufacturing_controller_commander()
    time.sleep(1.0)

    FTdata_0 = FTdata
    T = listener.lookupTransform("base", "link_6", rospy.Time(0))
    rg = 9.8 * np.matmul(
        np.matmul(T.R, Tran_z).transpose(),
        np.array([0, 0, 1]).transpose())
    A1 = np.hstack([
        rox.hat(rg).transpose(),
        np.zeros([3, 1]),
        np.eye(3),
        np.zeros([3, 3])
    ])
    A2 = np.hstack(
        [np.zeros([3, 3]),
         rg.reshape([3, 1]),
         np.zeros([3, 3]),
         np.eye(3)])
    A = np.vstack([A1, A2])
    FTdata_0est = np.matmul(A, Vec_wrench)
    #print 'Test4:',controller_commander.ControllerState

    for i in range(400):
        tic = time.time()
        plan = RobotTrajectory()
        plan.joint_trajectory.joint_names = [
            'joint_1', 'joint_2', 'joint_3', 'joint_4', 'joint_5', 'joint_6'
        ]
        current_joint_angles = controller_commander.get_current_joint_values()

        plan.joint_trajectory.header.frame_id = '/world'
        p1 = JointTrajectoryPoint()
        p1.positions = current_joint_angles
        p1.velocities = np.zeros((6, ))
        p1.accelerations = np.zeros((6, ))
        p1.time_from_start = rospy.Duration(0)

        T = listener.lookupTransform("base", "link_6", rospy.Time(0))
        rg = 9.8 * np.matmul(
            np.matmul(T.R, Tran_z).transpose(),
            np.array([0, 0, 1]).transpose())
        A1 = np.hstack([
            rox.hat(rg).transpose(),
            np.zeros([3, 1]),
            np.eye(3),
            np.zeros([3, 3])
        ])
        A2 = np.hstack([
            np.zeros([3, 3]),
            rg.reshape([3, 1]),
            np.zeros([3, 3]),
            np.eye(3)
        ])
        A = np.vstack([A1, A2])
        FTdata_est = np.matmul(A, Vec_wrench)

        #print 'Test0:',FTdata,FTdata_0,FTdata_est,FTdata_0est
        FTread = FTdata - FTdata_0 - FTdata_est + FTdata_0est
        print 'FTread:', FTread

        print 'FT:', FTdata
        print 'Z', FTread[-1]
        if FTread[-1] > -200:
            F_d = -350
        else:
            F_d = -400

        J = rox.robotjacobian(robot, current_joint_angles)
        Vz = Kc * (F_d - FTread[-1])
        joints_vel = np.linalg.pinv(J).dot(np.array([0, 0, 0, 0, 0, Vz]))
        print 'Joint_command:', joints_vel.dot(step_ts)

        p2 = JointTrajectoryPoint()
        p2.positions = np.array(p1.positions) + joints_vel.dot(
            step_ts)  #np.array([0,np.deg2rad(-2),0,0,0,0])
        p2.velocities = np.zeros((6, ))
        p2.accelerations = np.zeros((6, ))
        p2.time_from_start = rospy.Duration(step_ts)

        #        p3=JointTrajectoryPoint()
        #        p3.positions = np.array(p1.positions) + np.array([0,np.deg2rad(2),0,0,0,0])
        #        p3.velocities = np.zeros((6,))
        #        p3.accelerations = np.zeros((6,))
        #        p3.time_from_start = rospy.Duration(4)

        plan.joint_trajectory.points.append(p1)
        plan.joint_trajectory.points.append(p2)
        #        plan.joint_trajectory.points.append(p3)

        controller_commander.execute(plan)
        print 'Time:', time.time() - tic
        #        print 'Joint:',controller_commander.get_current_joint_values()
        time_save.append(time.time())
        FTdata_save.append(FTread)

    filename = "FTdata.txt"
    f_handle = file(filename, 'a')
    np.savetxt(f_handle, FTdata_save)
    f_handle.close()

    filename = "Time.txt"
    f_handle = file(filename, 'a')
    np.savetxt(f_handle, time_save)
    f_handle.close()
def calibrate(images, joint_poses, aruco_dict, aruco_id, aruco_markersize,
              flange_to_marker, mtx, dist, cam_pose, rox_robot,
              robot_local_device_name):
    """ Apply extrinsic camera calibration operation for images in the given directory path 
    using opencv aruco marker detection, the extrinsic marker poses given in a json file, 
    and the given intrinsic camera parameters."""

    assert aruco_dict.startswith("DICT_"), "Invalid aruco dictionary name"

    aruco_dict = getattr(aruco, aruco_dict)  # convert string to python
    aruco_dict = cv2.aruco.Dictionary_get(aruco_dict)
    aruco_params = cv2.aruco.DetectorParameters_create()

    i = 0

    imgs_out = []

    geom_util = GeometryUtil()
    image_util = ImageUtil()

    object_points = []
    image_points = []

    for img, joints in zip(images, joint_poses):

        # Find the aruco tag corners
        # corners, ids, rejected = cv2.aruco.detectMarkers(img, aruco_dict, parameters=aruco_params,cameraMatrix=mtx, distCoeff=dist)
        corners, ids, rejected = cv2.aruco.detectMarkers(
            img, aruco_dict, parameters=aruco_params)

        # #debug
        # print(str(type(corners))) # <class 'list'>
        # print(str(corners))  # list of numpy arrays of corners
        # print(str(type(ids))) # <class 'numpy.ndarray'>
        # print(str(ids))

        if len(corners) > 0:
            # Only find the id that we desire
            indexes = np.flatnonzero(ids.flatten() == aruco_id).tolist()
            corners = [corners[index] for index in indexes]
            ids = np.asarray([ids[index] for index in indexes])

            # #debug
            # print(str(type(corners))) # <class 'list'>
            # print(str(corners))  # list of numpy arrays of corners
            # print(str(type(ids))) # <class 'numpy.ndarray'>
            # print(str(ids))

            if len(ids) > 0:  # if there exist at least one id that we desire
                # Select the first detected one, discard the others
                corners = corners[0]  # now corners is 1 by 4

                # # extract the marker corners (which are always returned
                # # in top-left, top-right, bottom-right, and bottom-left
                # # order)
                # corners = corners.reshape((4, 2))
                # (topLeft, topRight, bottomRight, bottomLeft) = corners

                # Estimate the pose of the detected marker in camera frame
                rvec, tvec, markerPoints = cv2.aruco.estimatePoseSingleMarkers(
                    corners, aruco_markersize, mtx, dist)

                # # Debug: Show the detected tag and axis in the image
                # # # cv2.aruco.drawDetectedMarkers(img, corners)  # Draw A square around the markers (Does not work)
                img1 = img.copy()
                img_out = cv2.aruco.drawAxis(img1, mtx, dist, rvec, tvec,
                                             aruco_markersize *
                                             0.75)  # Draw Axis
                imgs_out.append(img_out)

                transform_base_2_flange = rox.fwdkin(rox_robot, joints)
                transform_flange_2_marker = geom_util.pose_to_rox_transform(
                    flange_to_marker)
                transform_base_2_marker = transform_base_2_flange * transform_flange_2_marker
                transform_base_2_marker_corners = _marker_corner_poses(
                    transform_base_2_marker, aruco_markersize)
                # Structure of this disctionary is "filename":[[R_base2marker],[T_base2marker],[R_cam2marker],[T_cam2marker]]
                for j in range(4):
                    object_points.append(transform_base_2_marker_corners[j].p)
                    image_points.append(corners[0, j])
                #pose_pairs_dict[i] = (transform_base_2_marker_corners, corners)
                i += 1

    object_points_np = np.array(object_points, dtype=np.float64)
    image_points_np = np.array(image_points, dtype=np.float32)

    # Finally execute the calibration
    retval, rvec, tvec = cv2.solvePnP(object_points_np, image_points_np, mtx,
                                      dist)
    R_cam2base = cv2.Rodrigues(rvec)[0]
    T_cam2base = tvec

    # Add another display image of marker at robot base
    img_out = cv2.aruco.drawAxis(img, mtx, dist,
                                 cv2.Rodrigues(R_cam2base)[0], T_cam2base,
                                 aruco_markersize * 0.75)  # Draw Axis
    imgs_out.append(img_out)

    rox_transform_cam2base = rox.Transform(R_cam2base, T_cam2base,
                                           cam_pose.parent_frame_id,
                                           robot_local_device_name)
    rox_transform_world2base = cam_pose * rox_transform_cam2base

    #R_base2cam = R_cam2base.T
    #T_base2cam = - R_base2cam @ T_cam2base

    R_base2cam = rox_transform_world2base.inv().R
    T_base2cam = rox_transform_world2base.inv().p

    #debug
    print("FINAL RESULTS: ")
    print("str(R_cam2base)")
    # print(str(type(R_cam2base)))
    print(str(R_cam2base))
    print("str(T_cam2base)")
    # print(str(type(T_cam2base.flatten())))
    print(str(T_cam2base))

    print("str(R_base2cam)")
    # print(str(type(R_base2cam)))
    print(str(R_base2cam))
    print("str(T_base2cam)")
    # print(str(type(T_base2cam.flatten())))
    print(str(T_base2cam))

    pose_res = geom_util.rox_transform_to_named_pose(rox_transform_world2base)
    cov = np.eye(6) * 1e-5

    imgs_out2 = [
        image_util.array_to_compressed_image_jpg(i, 70) for i in imgs_out
    ]

    return pose_res, cov, imgs_out2, 0.0
def main():

    #Start timer to measure execution time
    t1 = time.time()

    #Subscribe to controller_state
    rospy.Subscriber("controller_state", controllerstate, callback)
    last_ros_image_stamp = rospy.Time(0)

    #Force-torque
    if not "disable-ft" in sys.argv:
        ft_threshold1 = ft_threshold
    else:
        ft_threshold1 = []

    #Initialize ros node of this process
    rospy.init_node('Placement_DJ_1', anonymous=True)

    #print "============ Starting setup"

    listener = PayloadTransformListener()
    rapid_node = rapid_node_pkg.RAPIDCommander()
    controller_commander = controller_commander_pkg.ControllerCommander()

    object_commander = ObjectRecognitionCommander()
    robot = urdf.robot_from_parameter_server()

    #subscribe to Gripper camera node for image acquisition
    ros_gripper_2_img_sub = rospy.Subscriber(
        '/gripper_camera_2/image', Image,
        object_commander.ros_raw_gripper_2_image_cb)
    ros_gripper_2_trigger = rospy.ServiceProxy(
        '/gripper_camera_2/continuous_trigger', CameraTrigger)

    #Set controller command mode
    controller_commander.set_controller_mode(
        controller_commander.MODE_AUTO_TRAJECTORY, 0.4, ft_threshold_place, [])
    time.sleep(0.5)

    #Set Location above the panel where the end effector goes first (in the world/base frame) Ideal location of panel.
    tran = np.array([2.20120663258, 1.2145882986, 0.0798989466944])
    rot = np.array([[-0.9971185, 0.0071821, 0.0755188],
                    [0.0056502, 0.9997743, -0.0204797],
                    [-0.0756488, -0.0199939, -0.9969341]])
    pose_target2 = rox.Transform(rot, tran)
    pose_target3 = copy.deepcopy(pose_target2)
    pose_target2.p[2] += 0.35  #20 cm above ideal location of panel

    #Execute movement to set location
    print "Executing initial path"
    controller_commander.compute_cartesian_path_and_move(
        pose_target2, avoid_collisions=False)

    #Initilialize aruco boards and parameters
    aruco_dict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_ARUCO_ORIGINAL)
    parameters = cv2.aruco.DetectorParameters_create()
    parameters.cornerRefinementMethod = cv2.aruco.CORNER_REFINE_SUBPIX
    board_ground = cv2.aruco.GridBoard_create(5, 4, 0.04, 0.01, aruco_dict, 20)
    board_panel = cv2.aruco.GridBoard_create(8, 3, 0.025, 0.0075, aruco_dict,
                                             50)
    #    tag_ids=["vacuum_gripper_marker_1","leeward_mid_panel_marker_1", "aligner_ref_1", "aligner_ref_2", "aligner_ref_3", "aligner_ref_4"]
    #    boards=[gripper_board, panel_board, ref1, ref2, ref3, ref4]

    #Initialize camera intrinsic parameters #18285636_10_05_2018_5_params
    CamParam = CameraParams(2342.561249990927, 1209.151959040735,
                            2338.448312671424, 1055.254852652610, 1.0,
                            -0.014840837133389, -0.021008029929566,
                            3.320024219653553e-04, -0.002187550225028,
                            -0.025059986937316)

    #Subscribe tp controller state. Again?
    rospy.Subscriber("controller_state", controllerstate, callback)

    UV = np.zeros([74, 8])
    P = np.zeros([74, 3])

    #Load object points ground tag in panel tag coordinate system from mat file
    loaded_object_points_ground_in_panel_system_stage_1 = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat'
    )['object_points_ground_in_panel_tag_system']
    loaded_object_points_ground_in_panel_system_stage_2 = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['object_points_ground_in_panel_tag_system']
    loaded_object_points_ground_in_panel_system = loaded_object_points_ground_in_panel_system_stage_1

    #focal length in pixel units, this number is averaged values from f_hat for x and y
    f_hat_u = 2342.561249990927  #2282.523358266698#2281.339593273153 #2446.88
    f_hat_v = 2338.448312671424  #2280.155828279608
    #functions like a gain, used with velocity to track position
    dt = 0.1
    du = 100.0
    dv = 100.0
    dutmp = 100.0
    dvtmp = 100.0
    TimeGain = [0.1, 0.1, 0.1]

    du_array = []
    dv_array = []
    dx_array = []
    iteration = 0
    stage = 1
    step_ts = 0.004

    Kc = 0.0002
    time_save = []
    FTdata_save = []
    Tran_z = np.array([[0, 0, -1], [0, -1, 0], [1, 0, 0]])
    Vec_wrench = 100 * np.array([
        0.019296738361905, 0.056232033265447, 0.088644197659430,
        0.620524934626544, -0.517896661195076, 0.279323567303444,
        -0.059640563813256, 0.631460085138371, -0.151143175570223,
        -6.018321330845553
    ]).transpose()

    FTdata_0 = FTdata
    T = listener.lookupTransform("base", "link_6", rospy.Time(0))
    rg = 9.8 * np.matmul(
        np.matmul(T.R, Tran_z).transpose(),
        np.array([0, 0, 1]).transpose())
    A1 = np.hstack([
        rox.hat(rg).transpose(),
        np.zeros([3, 1]),
        np.eye(3),
        np.zeros([3, 3])
    ])
    A2 = np.hstack(
        [np.zeros([3, 3]),
         rg.reshape([3, 1]),
         np.zeros([3, 3]),
         np.eye(3)])
    A = np.vstack([A1, A2])
    FTdata_0est = np.matmul(A, Vec_wrench)
    FTread_array = []
    FTdata_array = []
    t_now_array = []

    controller_commander.set_controller_mode(
        controller_commander.MODE_AUTO_TRAJECTORY, 1.0, [], [])
    #while (pose_target2.p[2] > 0.185):
    #    while ((du>10) | (dv>10) | (pose_target2.p[2] > 0.172)):    #try changing du and dv to lower values(number of iterations increases)
    while (
        (du > 1) | (dv > 1) and (iteration < 125)
    ):  #try changing du and dv to lower values(number of iterations increases)
        #    while ((np.max(np.abs(dutmp))>0.5) | (np.max(np.abs(dvtmp))>0.5)):    #try changing du and dv to lower values(number of iterations increases)
        iteration += 1
        t_now_array.append(time.time())

        #Go to stage2 of movement(mostly downward z motion)
        if ((du < 2) and (dv < 2) and (stage == 1)):
            #Save pose of marker boards after the iterations end(while in the final hovering position above nest)
            #Get pose of both ground and panel markers from detected corners
            retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(
                objPoints_ground, imgPoints_ground, CamParam.camMatrix,
                CamParam.distCoeff)
            Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
            retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(
                objPoints_panel, imgPoints_panel, CamParam.camMatrix,
                CamParam.distCoeff)
            Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)

            print "============== Observed Pose difference in hovering position"
            observed_tvec_difference = tvec_ground - tvec_panel
            observed_rvec_difference = rvec_ground - rvec_panel
            print "tvec difference: ", observed_tvec_difference
            print "rvec differnece: ", observed_rvec_difference

            #Load ideal pose differnece information from file
            loaded_rvec_difference = loadmat(
                '/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat'
            )['rvec_difference']
            loaded_tvec_difference = loadmat(
                '/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat'
            )['tvec_difference']

            print "============== Ideal Pose difference in hovering position"
            print "tvec difference: ", loaded_tvec_difference
            print "rvec differnece: ", loaded_rvec_difference

            tvec_difference_Above_Nest = loaded_tvec_difference - observed_tvec_difference
            rvec_difference_Above_Nest = loaded_rvec_difference - observed_rvec_difference

            print "============== Difference in pose difference in hovering position"
            print "tvec difference: ", tvec_difference_Above_Nest
            print "rvec differnece: ", rvec_difference_Above_Nest

            #Saving pose information to file
            filename_pose1 = "/home/rpi-cats/Desktop/DJ/Code/Data/Above_Nest_Pose_" + str(
                t1) + ".mat"
            scipy.io.savemat(filename_pose1,
                             mdict={
                                 'tvec_ground_Above_Nest':
                                 tvec_ground,
                                 'tvec_panel_Above_Nest':
                                 tvec_panel,
                                 'Rca_ground_Above_Nest':
                                 Rca_ground,
                                 'Rca_panel_Above_Nest':
                                 Rca_panel,
                                 'tvec_difference_Above_Nest':
                                 tvec_ground - tvec_panel,
                                 'rvec_difference_Above_Nest':
                                 rvec_ground - rvec_panel,
                                 'loaded_tvec_difference':
                                 loaded_tvec_difference,
                                 'loaded_rvec_difference':
                                 loaded_rvec_difference,
                                 'observed_tvec_difference':
                                 observed_tvec_difference,
                                 'observed_rvec_difference':
                                 observed_rvec_difference
                             })

            raw_input("Confirm Stage 2")
            stage = 2
            #			dt=0.02
            loaded_object_points_ground_in_panel_system = loaded_object_points_ground_in_panel_system_stage_2
        #print pose_target2.p[2]

        #Print current robot pose at the beginning of this iteration
        Cur_Pose = controller_commander.get_current_pose_msg()
        print "============ Current Robot Pose"
        print Cur_Pose

        #Read new image
        last_ros_image_stamp = object_commander.ros_image_stamp
        try:
            ros_gripper_2_trigger.wait_for_service(timeout=0.1)
            ros_gripper_2_trigger(False)
        except:
            pass
        wait_count = 0
        while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp:
            if wait_count > 50:
                raise Exception("Image receive timeout")
            time.sleep(0.25)
            wait_count += 1
        result = object_commander.ros_image
        #Save
        #        filename = "Acquisition3_%d.jpg" % (time.time())
        #        scipy.misc.imsave(filename, result)
        #Display
        #        cv2.namedWindow('Aqcuired Image',cv2.WINDOW_NORMAL)
        #        cv2.imshow('Acquired Image',result)
        #        cv2.waitKey(1)

        #Detect tag corners in aqcuired image using aruco
        corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
            result, aruco_dict, parameters=parameters)
        frame_with_markers_and_axis = result

        #Sort corners and ids according to ascending order of ids
        corners_original = copy.deepcopy(corners)
        ids_original = np.copy(ids)
        sorting_indices = np.argsort(ids_original, 0)
        ids_sorted = ids_original[sorting_indices]
        ids_sorted = ids_sorted.reshape([len(ids_original), 1])
        combined_list = zip(np.ndarray.tolist(ids.flatten()), corners_original)
        #        print "combined_list:",combined_list
        combined_list.sort()
        corners_sorted = [x for y, x in combined_list]
        ids = np.copy(ids_sorted)
        corners = copy.deepcopy(corners_sorted)

        #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image)
        false_ids_ind = np.where(ids > 73)
        mask = np.ones(ids.shape, dtype=bool)
        mask[false_ids_ind] = False
        ids = ids[mask]
        corners = np.array(corners)
        corners = corners[mask.flatten(), :]
        corners = list(corners)

        #Define object and image points of both tags
        objPoints_ground, imgPoints_ground = aruco.getBoardObjectAndImagePoints(
            board_ground, corners, ids)
        objPoints_panel, imgPoints_panel = aruco.getBoardObjectAndImagePoints(
            board_panel, corners, ids)
        objPoints_ground = objPoints_ground.reshape(
            [objPoints_ground.shape[0], 3])
        imgPoints_ground = imgPoints_ground.reshape(
            [imgPoints_ground.shape[0], 2])
        objPoints_panel = objPoints_panel.reshape(
            [objPoints_panel.shape[0], 3])
        imgPoints_panel = imgPoints_panel.reshape(
            [imgPoints_panel.shape[0], 2])

        #Get pose of both ground and panel markers from detected corners
        retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(
            objPoints_ground, imgPoints_ground, CamParam.camMatrix,
            CamParam.distCoeff)
        Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
        retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(
            objPoints_panel, imgPoints_panel, CamParam.camMatrix,
            CamParam.distCoeff)
        Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)

        frame_with_markers_and_axis = cv2.aruco.drawAxis(
            frame_with_markers_and_axis, CamParam.camMatrix,
            CamParam.distCoeff, Rca_ground, tvec_ground, 0.2)
        frame_with_markers_and_axis = cv2.aruco.drawAxis(
            frame_with_markers_and_axis, CamParam.camMatrix,
            CamParam.distCoeff, Rca_panel, tvec_panel, 0.2)

        rvec_all_markers_ground, tvec_all_markers_ground, _ = aruco.estimatePoseSingleMarkers(
            corners[0:20], 0.04, CamParam.camMatrix, CamParam.distCoeff)
        rvec_all_markers_panel, tvec_all_markers_panel, _ = aruco.estimatePoseSingleMarkers(
            corners[20:45], 0.025, CamParam.camMatrix, CamParam.distCoeff)
        tvec_all = np.concatenate(
            (tvec_all_markers_ground, tvec_all_markers_panel), axis=0)
        for i_ids, i_corners, i_tvec in zip(ids, corners, tvec_all):
            #print 'i_corners',i_corners,i_corners.reshape([1,8])
            UV[i_ids, :] = i_corners.reshape(
                [1, 8])  #np.average(i_corners, axis=1)
            P[i_ids, :] = i_tvec

        #used to find the height of the tags and the delta change of height, z height at desired position
        Z = 1 * P[20:40,
                  2]  #- [0.68184539, 0.68560932, 0.68966803, 0.69619578])

        #check if all tags detected
        if retVal_ground != 0 and retVal_panel != 0:
            dutmp = []
            dvtmp = []

            #Pixel estimates of the ideal ground tag location
            reprojected_imagePoints_ground_2, jacobian2 = cv2.projectPoints(
                loaded_object_points_ground_in_panel_system.transpose(),
                rvec_panel, tvec_panel, CamParam.camMatrix, CamParam.distCoeff)
            reprojected_imagePoints_ground_2 = reprojected_imagePoints_ground_2.reshape(
                [reprojected_imagePoints_ground_2.shape[0], 2])
            #            print "Image Points Ground:", imgPoints_ground
            #            print "Reprojected Image Points Ground2:", reprojected_imagePoints_ground_2
            #            print "Reprojectoin error:",imgPoints_ground-reprojected_imagePoints_ground_2
            #            print "Average Reprojectoin error: ",np.mean(imgPoints_ground-reprojected_imagePoints_ground_2, axis=0)

            #print "Reprojected Image Points Ground2 type:", type(reprojected_imagePoints_ground_2)

            #plot image points for ground tag from corner detection and from re-projections
            for point1, point2 in zip(
                    imgPoints_ground,
                    np.float32(reprojected_imagePoints_ground_2)):
                cv2.circle(frame_with_markers_and_axis, tuple(point1), 5,
                           (0, 0, 255), 3)
                cv2.circle(frame_with_markers_and_axis, tuple(point2), 5,
                           (255, 0, 0), 3)

#            cv2.namedWindow('Aqcuired Image',cv2.WINDOW_NORMAL)
#            cv2.imshow('Acquired Image',frame_with_markers_and_axis)
#            cv2.waitKey(1)
#Save
            filename_image = "/home/rpi-cats/Desktop/DJ/Code/Images/Acquisition_" + str(
                t1) + "_" + str(iteration) + ".jpg"
            scipy.misc.imsave(filename_image, frame_with_markers_and_axis)

            #Go through a particular point in all tags to build the complete Jacobian
            for ic in range(4):
                #uses first set of tags, numbers used to offset camera frame, come from camera parameters
                #UV_target = np.vstack([UV[9:13,2*ic]-1209.151959040735,UV[9:13,2*ic+1]-1055.254852652610]).T - UV_shift[:,(2*ic):(2*ic+2)]		#shift corner estimates to the image centers. Necessary for the image jacobian to work.
                reprojected_imagePoints_ground_2 = np.reshape(
                    reprojected_imagePoints_ground_2, (20, 8))
                UV_target = np.vstack([
                    reprojected_imagePoints_ground_2[:, 2 * ic] -
                    1209.151959040735,
                    reprojected_imagePoints_ground_2[:, 2 * ic + 1] -
                    1055.254852652610
                ]).T
                uc = UV_target[:, 0]
                vc = UV_target[:, 1]

                #                print 'UV_target', UV_target
                UV_current = np.vstack([
                    UV[20:40, 2 * ic] - 1209.151959040735,
                    UV[20:40, 2 * ic + 1] - 1055.254852652610
                ]).T
                #find difference between current and desired tag difference
                delta_UV = UV_target - UV_current
                #                print 'delta_UV: ',ic, delta_UV
                dutmp.append(np.mean(delta_UV[:, 0]))
                dvtmp.append(np.mean(delta_UV[:, 1]))
                for tag_i in range(20):
                    if tag_i == 0:
                        J_cam_tmp = 1.0 * np.array(
                            [[
                                -f_hat_u / Z[tag_i], 0.0, uc[tag_i] / Z[tag_i],
                                uc[tag_i] * vc[tag_i] / f_hat_u, -1.0 *
                                (uc[tag_i] * uc[tag_i] / f_hat_u + f_hat_u),
                                vc[tag_i]
                            ],
                             [
                                 0.0, -f_hat_v / Z[tag_i], vc[tag_i] /
                                 Z[tag_i], vc[tag_i] * vc[tag_i] / f_hat_v +
                                 f_hat_v, -1.0 * uc[tag_i] * vc[tag_i] /
                                 f_hat_v, -uc[tag_i]
                             ]])
                    else:
                        J_cam_tmp = np.concatenate(
                            (J_cam_tmp,
                             1.0 * np.array(
                                 [[
                                     -f_hat_u / Z[tag_i], 0.0,
                                     uc[tag_i] / Z[tag_i],
                                     uc[tag_i] * vc[tag_i] / f_hat_u, -1.0 *
                                     (uc[tag_i] * uc[tag_i] / f_hat_u +
                                      f_hat_u), vc[tag_i]
                                 ],
                                  [
                                      0.0, -f_hat_v / Z[tag_i], vc[tag_i] /
                                      Z[tag_i], vc[tag_i] * vc[tag_i] / f_hat_v
                                      + f_hat_v, -1.0 * uc[tag_i] * vc[tag_i] /
                                      f_hat_v, -uc[tag_i]
                                  ]])),
                            axis=0)
                #camera jacobian
                if ic == 0:
                    J_cam = J_cam_tmp
                    delta_UV_all = delta_UV.reshape(40, 1)
                    UV_target_all = UV_target.reshape(40, 1)
                else:
                    J_cam = np.vstack([J_cam, J_cam_tmp])
                    delta_UV_all = np.vstack(
                        [delta_UV_all, delta_UV.reshape(40, 1)])
                    UV_target_all = np.vstack(
                        [UV_target_all,
                         UV_target.reshape(40, 1)])

            print 'dutmp: ', dutmp
            print 'dvtmp: ', dvtmp
            du = np.mean(np.absolute(dutmp))
            dv = np.mean(np.absolute(dvtmp))
            print 'Average du of all points:', du, 'Average dv of all points:', dv
            du_array.append(du)
            dv_array.append(dv)
            #            print 'delta_UV_all',delta_UV_all
            dx = np.matmul(np.linalg.pinv(J_cam), np.array(delta_UV_all))
            dx_array.append(dx)
            #            print 'dx:',dx             #translational and angular velocity
            #print '1:',dx[0:3,0]
            #print '2:',np.hstack([0,-dx[0:3,0]])
            #print '3:',np.hstack([dx[0:3,0].reshape(3,1),rox.hat(np.array(dx[0:3,0]))])
            #coordinate system change, replace with tf transform
            #dx_w = np.array([dx[3,0],-dx[4,0],-dx[5,0]])        #angular velocity
            #map omega to current quaternion
            #Omega = 0.5*np.vstack([np.hstack([0,-dx_w]),np.hstack([dx_w.reshape(3,1),rox.hat(np.array(dx_w))])])
            #print Omega
            #rot_current = [Cur_Pose.pose.orientation.w, Cur_Pose.pose.orientation.x,Cur_Pose.pose.orientation.y,Cur_Pose.pose.orientation.z]
            #rot = np.matmul(expm(Omega*dt),np.array(rot_current))
            #trans_current = [Cur_Pose.pose.position.x,Cur_Pose.pose.position.y,Cur_Pose.pose.position.z]
            #trans = trans_current + np.array([dx[0,0],-dx[1,0],-dx[2,0]])*TimeGain

            #pose_target2 = rox.Transform(rox.q2R([rot[0], rot[1], rot[2], rot[3]]), trans)
            #Vz=0
            dx = np.array([
                dx[3, 0], -dx[4, 0], -dx[5, 0], dx[0, 0], -dx[1, 0], -dx[2, 0]
            ])

            if stage == 2:
                T = listener.lookupTransform("base", "link_6", rospy.Time(0))
                rg = 9.8 * np.matmul(
                    np.matmul(T.R, Tran_z).transpose(),
                    np.array([0, 0, 1]).transpose())
                A1 = np.hstack([
                    rox.hat(rg).transpose(),
                    np.zeros([3, 1]),
                    np.eye(3),
                    np.zeros([3, 3])
                ])
                A2 = np.hstack([
                    np.zeros([3, 3]),
                    rg.reshape([3, 1]),
                    np.zeros([3, 3]),
                    np.eye(3)
                ])
                A = np.vstack([A1, A2])
                FTdata_est = np.matmul(A, Vec_wrench)
                FTread = FTdata - FTdata_0 - FTdata_est + FTdata_0est
                print 'FTread:', FTread

                print 'Z', FTread[-1]
                if FTread[-1] > -100:
                    F_d = -150
                    Kc = 0.0001
                else:
                    Kc = 0.0001
                    F_d = -200

                Vz = Kc * (F_d - FTread[-1])
                print 'Vz', Vz
                dx[-1] = Vz + dx[-1]
                FTread_array.append(FTread)
                FTdata_array.append(FTdata)

            current_joint_angles = controller_commander.get_current_joint_values(
            )
            J = rox.robotjacobian(robot, current_joint_angles)
            joints_vel = np.linalg.pinv(J).dot(np.array(dx))

            print 'vel_norm:', np.linalg.norm(joints_vel)

            t_duration = np.log(np.linalg.norm(joints_vel) * 10 + 1) + 0.1
            if t_duration < 0.1:
                t_duration = 0.1

            goal = trapezoid_gen(
                np.array(current_joint_angles) + joints_vel.dot(dt),
                np.array(current_joint_angles), t_duration)
            '''
            plan=RobotTrajectory()    
            plan.joint_trajectory.joint_names=['joint_1', 'joint_2', 'joint_3', 'joint_4', 'joint_5', 'joint_6']
        
            plan.joint_trajectory.header.frame_id='/world'
            p1=JointTrajectoryPoint()
            p1.positions = current_joint_angles
            p1.velocities = np.zeros((6,))
            p1.accelerations = np.zeros((6,))
            p1.time_from_start = rospy.Duration(0)     
                 
            p2=JointTrajectoryPoint()
            p2.positions = np.array(p1.positions) + joints_vel.dot(dt)
            p2.velocities = np.zeros((6,))
            p2.accelerations = np.zeros((6,))
            p2.time_from_start = rospy.Duration(dt)

            controller_commander.set_controller_mode(controller_commander.MODE_AUTO_TRAJECTORY, 1.0, [], [])

        
            plan.joint_trajectory.points.append(p1)
            plan.joint_trajectory.points.append(p2)
            '''

            client = actionlib.SimpleActionClient("joint_trajectory_action",
                                                  FollowJointTrajectoryAction)
            client.wait_for_server()

            client.send_goal(goal)
            client.wait_for_result()
            res = client.get_result()
            if (res.error_code != 0):
                raise Exception("Trajectory execution returned error")

            print res

            #break
            #raw_input("confirm move...")
            '''
            print "============ Executing Current Iteration movement"
            try:
                controller_commander.execute(plan)
                #controller_commander.compute_cartesian_path_and_move(pose_target2, avoid_collisions=False)
            except:
                pass
            '''

            print 'Current Iteration Finished.'


#            filename = "delta_UV_all.txt"
#            f_handle = file(filename, 'a')
#            np.savetxt(f_handle, delta_UV_all)
#            f_handle.close()
#
#            filename = "UV_target_all.txt"
#            f_handle = file(filename, 'a')
#            np.savetxt(f_handle, UV_target_all)
#            f_handle.close()
#
#            filename = "P.txt"
#            f_handle = file(filename, 'a')
#            np.savetxt(f_handle, P)
#            f_handle.close()
#
#
#            filename = "Robot.txt"
#            f_handle = file(filename, 'a')
#            np.savetxt(f_handle, np.hstack([np.array(trans_current),np.array(rot_current)]))
#            f_handle.close()

#Saving iteration data to file
    filename_data = "/home/rpi-cats/Desktop/DJ/Code/Data/Data_" + str(
        t1) + ".mat"
    scipy.io.savemat(filename_data,
                     mdict={
                         'du_array': du_array,
                         'dv_array': dv_array,
                         'dx_array': dx_array
                     })

    #Saving force data to file
    filename_force_data = "/home/rpi-cats/Desktop/DJ/Code/Data/Data_force_" + str(
        t1) + ".mat"
    scipy.io.savemat(filename_force_data,
                     mdict={
                         'FTread': FTread_array,
                         'FTdata': FTdata_array,
                         't_now_array': t_now_array
                     })

    #    ####Hovering error calculation
    #    #Read new image
    #    last_ros_image_stamp = object_commander.ros_image_stamp
    #    try:
    #        ros_gripper_2_trigger.wait_for_service(timeout=0.1)
    #        ros_gripper_2_trigger(False)
    #    except:
    #        pass
    #    wait_count=0
    #    while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp:
    #        if wait_count > 50:
    #            raise Exception("Image receive timeout")
    #        time.sleep(0.25)
    #        wait_count += 1
    #    result = object_commander.ros_image
    #
    #    #Detect tag corners in aqcuired image using aruco
    #    corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(result, aruco_dict, parameters=parameters)
    #
    #    #Sort corners and ids according to ascending order of ids
    #    corners_original=copy.deepcopy(corners)
    #    ids_original=np.copy(ids)
    #    sorting_indices=np.argsort(ids_original,0)
    #    ids_sorted=ids_original[sorting_indices]
    #    ids_sorted=ids_sorted.reshape([len(ids_original),1])
    #    combined_list=zip(np.ndarray.tolist(ids.flatten()),corners_original)
    #    combined_list.sort()
    #    corners_sorted=[x for y,x in combined_list]
    #    ids=np.copy(ids_sorted)
    #    corners=copy.deepcopy(corners_sorted)
    #
    #    #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image)
    #    false_ids_ind = np.where(ids>73)
    #    mask = np.ones(ids.shape, dtype=bool)
    #    mask[false_ids_ind] = False
    #    ids = ids[mask]
    #    corners = np.array(corners)
    #    corners = corners[mask.flatten(),:]
    #    corners = list(corners)
    #
    #    #Define object and image points of both tags
    #    objPoints_ground, imgPoints_ground	=	aruco.getBoardObjectAndImagePoints(board_ground, corners, ids)
    #    objPoints_panel, imgPoints_panel	=	aruco.getBoardObjectAndImagePoints(board_panel, corners, ids)
    #    objPoints_ground=objPoints_ground.reshape([objPoints_ground.shape[0],3])
    #    imgPoints_ground=imgPoints_ground.reshape([imgPoints_ground.shape[0],2])
    #    objPoints_panel=objPoints_panel.reshape([objPoints_panel.shape[0],3])
    #    imgPoints_panel=imgPoints_panel.reshape([imgPoints_panel.shape[0],2])
    #
    #    #Save pose of marker boards after the iterations end(while in the final hovering position above nest)
    #    #Get pose of both ground and panel markers from detected corners
    #    retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(objPoints_ground, imgPoints_ground, CamParam.camMatrix, CamParam.distCoeff)
    #    Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
    #    retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(objPoints_panel, imgPoints_panel, CamParam.camMatrix, CamParam.distCoeff)
    #    Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)
    #
    #    print"============== Observed Pose difference in hovering position"
    #    observed_tvec_difference=tvec_ground-tvec_panel
    #    observed_rvec_difference=rvec_ground-rvec_panel
    #    print "tvec difference: ",observed_tvec_difference
    #    print "rvec differnece: ",observed_rvec_difference
    #
    #    #Load ideal pose differnece information from file
    #    loaded_rvec_difference = loadmat('/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat')['rvec_difference']
    #    loaded_tvec_difference = loadmat('/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat')['tvec_difference']
    #
    #    print"============== Ideal Pose difference in hovering position"
    #    print "tvec difference: ",loaded_tvec_difference
    #    print "rvec differnece: ",loaded_rvec_difference
    #
    #    print"============== Difference in pose difference in hovering position"
    #    print "tvec difference: ",loaded_tvec_difference-observed_tvec_difference
    #    print "rvec differnece: ",loaded_rvec_difference-observed_rvec_difference
    #
    #    #Saving pose information to file
    #    filename_pose1 = "/home/armabb6640/Desktop/DJ/Code/Data/Above_Nest_Pose_"+str(t1)+".mat"
    #    scipy.io.savemat(filename_pose1, mdict={'tvec_ground_Above_Nest':tvec_ground, 'tvec_panel_Above_Nest':tvec_panel,
    #    'Rca_ground_Above_Nest':Rca_ground, 'Rca_panel_Above_Nest': Rca_panel, 'tvec_difference_Above_Nest': tvec_ground-tvec_panel, 'rvec_difference_Above_Nest': rvec_ground-rvec_panel})

    print "============ Final Push Down to Nest"
    # while (1):
    #DJ Final push from hovering above nest into resting in the nest
    controller_commander.set_controller_mode(
        controller_commander.MODE_AUTO_TRAJECTORY, 0.2, [], [])
    #    pose_target2.p[2] = 0.115
    #    pose_target2.p[2] = 0.15

    Cur_Pose = controller_commander.get_current_pose_msg()
    rot_current = [
        Cur_Pose.pose.orientation.w, Cur_Pose.pose.orientation.x,
        Cur_Pose.pose.orientation.y, Cur_Pose.pose.orientation.z
    ]
    trans_current = [
        Cur_Pose.pose.position.x, Cur_Pose.pose.position.y,
        Cur_Pose.pose.position.z
    ]
    pose_target2.R = rox.q2R(
        [rot_current[0], rot_current[1], rot_current[2], rot_current[3]])
    pose_target2.p = trans_current
    pose_target2.p[2] -= 0.11
    #    pose_target2.p[0] += 0.005
    #    pose_target2.p[1] -= 0.002

    #    controller_commander.compute_cartesian_path_and_move(pose_target2, avoid_collisions=False)

    #### Final Nest Placement Error Calculation ===============================
    #Read new image
    last_ros_image_stamp = object_commander.ros_image_stamp
    try:
        ros_gripper_2_trigger.wait_for_service(timeout=0.1)
        ros_gripper_2_trigger(False)
    except:
        pass
    wait_count = 0
    while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp:
        if wait_count > 50:
            raise Exception("Image receive timeout")
        time.sleep(0.25)
        wait_count += 1
    result = object_commander.ros_image

    #Detect tag corners in aqcuired image using aruco
    corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
        result, aruco_dict, parameters=parameters)

    #Sort corners and ids according to ascending order of ids
    corners_original = copy.deepcopy(corners)
    ids_original = np.copy(ids)
    sorting_indices = np.argsort(ids_original, 0)
    ids_sorted = ids_original[sorting_indices]
    ids_sorted = ids_sorted.reshape([len(ids_original), 1])
    combined_list = zip(np.ndarray.tolist(ids.flatten()), corners_original)
    combined_list.sort()
    corners_sorted = [x for y, x in combined_list]
    ids = np.copy(ids_sorted)
    corners = copy.deepcopy(corners_sorted)

    #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image)
    false_ids_ind = np.where(ids > 73)
    mask = np.ones(ids.shape, dtype=bool)
    mask[false_ids_ind] = False
    ids = ids[mask]
    corners = np.array(corners)
    corners = corners[mask.flatten(), :]
    corners = list(corners)

    #Define object and image points of both tags
    objPoints_ground, imgPoints_ground = aruco.getBoardObjectAndImagePoints(
        board_ground, corners, ids)
    objPoints_panel, imgPoints_panel = aruco.getBoardObjectAndImagePoints(
        board_panel, corners, ids)
    objPoints_ground = objPoints_ground.reshape([objPoints_ground.shape[0], 3])
    imgPoints_ground = imgPoints_ground.reshape([imgPoints_ground.shape[0], 2])
    objPoints_panel = objPoints_panel.reshape([objPoints_panel.shape[0], 3])
    imgPoints_panel = imgPoints_panel.reshape([imgPoints_panel.shape[0], 2])

    #Save pose of marker boards after the iterations end(while in the final hovering position above nest)
    #Get pose of both ground and panel markers from detected corners
    retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(
        objPoints_ground, imgPoints_ground, CamParam.camMatrix,
        CamParam.distCoeff)
    Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
    retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(
        objPoints_panel, imgPoints_panel, CamParam.camMatrix,
        CamParam.distCoeff)
    Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)

    print "============== Observed Pose difference in nest position"
    observed_tvec_difference = tvec_ground - tvec_panel
    observed_rvec_difference = rvec_ground - rvec_panel
    print "tvec difference: ", observed_tvec_difference
    print "rvec differnece: ", observed_rvec_difference

    #Load ideal pose differnece information from file
    loaded_rvec_difference = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['rvec_difference']
    loaded_tvec_difference = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['tvec_difference']

    print "============== Ideal Pose difference in nest position"
    print "tvec difference: ", loaded_tvec_difference
    print "rvec differnece: ", loaded_rvec_difference

    print "============== Difference in pose difference in nest position"
    print "tvec difference: ", loaded_tvec_difference - observed_tvec_difference
    print "rvec differnece: ", loaded_rvec_difference - observed_rvec_difference

    #Saving pose information to file
    filename_pose2 = "/home/rpi-cats/Desktop/DJ/Code/Data/In_Nest_Pose_" + str(
        t1) + ".mat"
    scipy.io.savemat(filename_pose2,
                     mdict={
                         'tvec_ground_In_Nest': tvec_ground,
                         'tvec_panel_In_Nest': tvec_panel,
                         'Rca_ground_In_Nest': Rca_ground,
                         'Rca_panel_In_Nest': Rca_panel,
                         'tvec_difference_In_Nest': tvec_ground - tvec_panel,
                         'rvec_difference_In_Nest': rvec_ground - rvec_panel,
                         'loaded_tvec_difference': loaded_tvec_difference,
                         'loaded_rvec_difference': loaded_rvec_difference,
                         'observed_tvec_difference': observed_tvec_difference,
                         'observed_rvec_difference': observed_rvec_difference
                     })

    #    print "============ Lift gripper!"
    #    controller_commander.set_controller_mode(controller_commander.MODE_AUTO_TRAJECTORY, 0.7, [])
    #    raw_input("confirm release vacuum")
    #    #rapid_node.set_digital_io("Vacuum_enable", 0)
    #    print "VACUUM OFF!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
    #    time.sleep(0.5)
    #    pose_target3.p[2] += 0.2
    #
    #
    #    #print 'Target:',pose_target3
    #
    #    #print "============ Executing plan4"
    #    #controller_commander.compute_cartesian_path_and_move(pose_target3, avoid_collisions=False)

    t2 = time.time()
    print 'Execution Finished.'
    print "Execution time: " + str(t2 - t1) + " seconds"
 def transform_to_rox_transform(self, rr_transform):
     R = self.quaternion_to_R(rr_transform["rotation"])
     p = self.vector3_to_xyz(rr_transform["translation"])
     return rox.Transform(R, p)
示例#24
0

Q=[0.02196692, -0.10959773,  0.99369529, -0.00868731]
P=[1.8475985 , -0.04983688,  0.82486047]

if __name__ == '__main__':
    
    controller_commander=ControllerCommander()
    
    P=np.reshape(P,(3,))    
        
    rospy.init_node('Reset_Start_pos_wason2', anonymous=True)
    
    controller_commander.set_controller_mode(controller_commander.MODE_HALT, 1, [],[])
    controller_commander.set_controller_mode(controller_commander.MODE_AUTO_TRAJECTORY, 0.5, [],[])  
        
    print "============ Printing robot Pose"
    print controller_commander.get_current_pose_msg()  
    #print robot.get_current_state().joint_state.position
    print "============ Generating plan 1"
    
    pose_target=rox.Transform(rox.q2R(Q), np.copy(P))
        
    print 'Target:',pose_target
    
    print "============ Executing plan1"
    controller_commander.plan_and_move(pose_target)        
    print 'Execution Finished.'
  
       
    def pbvs_jacobian(self):
        self.controller_commander.set_controller_mode(
            self.controller_commander.MODE_AUTO_TRAJECTORY, 0.7, [], [])
        tvec_err = [100, 100, 100]
        rvec_err = [100, 100, 100]

        self.FTdata_0 = self.FTdata

        error_transform = rox.Transform(rox.rpy2R([2, 2, 2]),
                                        np.array([100, 100, 100]))

        FT_data_ori = []
        FT_data_biased = []
        err_data_p = []
        err_data_rpy = []
        joint_data = []
        time_data = []
        #TODO: should listen to stage_3_tol_r not 1 degree
        print self.desired_camera2_transform
        #R_desired_cam = self.desired_camera2_transform.R.dot(self.desired_transform.R)
        #R_desired_cam = R_desired_cam.dot(self.desired_camera2_transform.R.transpose())
        #t_desired_cam = -self.desired_camera2_transform.R.dot(self.desired_transform.p)

        while (error_transform.p[2] > 0.01 or np.linalg.norm(
            [error_transform.p[0], error_transform.p[1]]) > self.stage3_tol_p
               or
               np.linalg.norm(rox.R2rpy(error_transform.R)) > np.deg2rad(1)):

            img = self.receive_image()

            fixed_marker_transform, payload_marker_transform, error_transform = self.compute_error_transform(
                img)
            #print self.desired_transform.R.T, -fixed_marker_transform.R.dot(self.desired_transform.p)

            R_desired_cam = fixed_marker_transform.R.dot(
                self.desired_transform.R)
            R_desired_cam = R_desired_cam.dot(
                fixed_marker_transform.R.transpose())
            t_desired_cam = -fixed_marker_transform.R.dot(
                self.desired_transform.p)

            # Compute error directly in the camera frame
            observed_R_difference = np.dot(
                payload_marker_transform.R,
                fixed_marker_transform.R.transpose())
            k, theta = rox.R2rot(
                np.dot(observed_R_difference, R_desired_cam.transpose())
            )  #np.array(rox.R2rpy(rvec_err1))
            rvec_err1 = k * theta

            observed_tvec_difference = fixed_marker_transform.p - payload_marker_transform.p
            tvec_err1 = -fixed_marker_transform.R.dot(
                self.desired_transform.p) - observed_tvec_difference
            #print 'SPOT: ',tvec_err1, rvec_err1
            # Map error to the robot spatial velocity
            world_to_camera_tf = self.tf_listener.lookupTransform(
                "world", "gripper_camera_2", rospy.Time(0))
            camera_to_link6_tf = self.tf_listener.lookupTransform(
                "gripper_camera_2", "link_6", rospy.Time(0))

            t21 = -np.dot(
                rox.hat(
                    np.dot(world_to_camera_tf.R,
                           (camera_to_link6_tf.p -
                            payload_marker_transform.p.reshape((1, 3))).T)),
                world_to_camera_tf.R)  #np.zeros((3,3))#

            # v = R_oc(vc)c + R_oc(omeega_c)_c x (r_pe)_o = R_oc(vc)c - (r_pe)_o x R_oc(omeega_c)_c
            tvec_err = t21.dot(rvec_err1).reshape(
                (3, )) + world_to_camera_tf.R.dot(tvec_err1).reshape((3, ))
            # omeega = R_oc(omeega_c)_c
            rvec_err = world_to_camera_tf.R.dot(rvec_err1).reshape((3, ))

            tvec_err = np.clip(tvec_err, -0.2, 0.2)
            rvec_err = np.clip(rvec_err, -np.deg2rad(5), np.deg2rad(5))

            if tvec_err[2] < 0.03:
                rospy.loginfo("Only Compliance Control")
                tvec_err[2] = 0

            rot_err = rox.R2rpy(error_transform.R)
            rospy.loginfo("tvec difference: %f, %f, %f", error_transform.p[0],
                          error_transform.p[1], error_transform.p[2])
            rospy.loginfo("rvec difference: %f, %f, %f", rot_err[0],
                          rot_err[1], rot_err[2])

            dx = -np.concatenate((rvec_err, tvec_err)) * self.K_pbvs

            print np.linalg.norm([error_transform.p[0], error_transform.p[1]])
            print np.linalg.norm(rox.R2rpy(error_transform.R))

            # Compliance Force Control
            if (not self.ft_flag):
                raise Exception("havent reached FT callback")
            # Compute the exteranl force
            FTread = self.FTdata - self.FTdata_0  # (F)-(F0)
            print '================ FT1 =============:', FTread
            print '================ FT2 =============:', self.FTdata

            if FTread[-1] > (self.F_d_set1 + 50):
                F_d = self.F_d_set1
            else:
                F_d = self.F_d_set2

            if (self.FTdata == 0).all():
                rospy.loginfo("FT data overflow")
                dx[-1] += self.K_pbvs * 0.004
            else:
                tx_correct = 0
                if abs(self.FTdata[0]) > 80:
                    tx_correct = 0.0002 * (abs(self.FTdata[0]) - 80)

                Vz = self.Kc * (F_d - FTread[-1]) + tx_correct
                dx[-1] = dx[-1] + Vz

            print "dx:", dx

            current_joint_angles = self.controller_commander.get_current_joint_values(
            )
            joints_vel = QP_abbirb6640(
                np.array(current_joint_angles).reshape(6, 1), np.array(dx))
            goal = self.trapezoid_gen(
                np.array(current_joint_angles) + joints_vel.dot(1),
                np.array(current_joint_angles), 0.008, 0.008,
                0.015)  #acc,dcc,vmax)

            print "joints_vel:", joints_vel

            self.client.wait_for_server()
            self.client.send_goal(goal)
            self.client.wait_for_result()
            res = self.client.get_result()
            if (res.error_code != 0):
                raise Exception("Trajectory execution returned error")

            FT_data_ori.append(self.FTdata)
            FT_data_biased.append(FTread)
            err_data_p.append(error_transform.p)
            err_data_rpy.append(rot_err)
            joint_data.append(current_joint_angles)
            time_data.append(time.time())

        filename_pose = "/home/rpi-cats/Desktop/YC/Data/Panel2_Placement_In_Nest_Pose_" + str(
            time.time()) + ".mat"
        scipy.io.savemat(filename_pose,
                         mdict={
                             'FT_data_ori': FT_data_ori,
                             'FT_data_biased': FT_data_biased,
                             'err_data_p': err_data_p,
                             'err_data_rpy': err_data_rpy,
                             'joint_data': joint_data,
                             'time_data': time_data
                         })

        rospy.loginfo("End  ====================")
 def named_transform_to_rox_transform(self, rr_named_transform):
     R = self.quaternion_to_R(rr_named_transform.transform["rotation"])
     p = self.vector3_to_xyz(rr_named_transform.transform["translation"])
     return rox.Transform(R,p, _name_from_identifier(rr_named_transform.parent_frame), \
         _name_from_identifier(rr_named_transform.child_frame))
示例#27
0
    def _do_match_with_pose(self,image, template, intrinsic_calib, extrinsic_calib, object_z, roi):
        
        matcher_roi = None

        if roi is not None:
            roi_x = roi.center.pose[0]["position"]["x"]
            roi_y = roi.center.pose[0]["position"]["y"]
            roi_theta = roi.center.pose[0]["orientation"]
            roi_w = roi.size[0]["width"]
            roi_h = roi.size[0]["height"]
            matcher_roi = (roi_x, roi_y, roi_w, roi_h, -roi_theta)


        # execute the image detection using opencv
        matcher = TemplateMatchingMultiAngleWithROI(template,image,matcher_roi)
        # return_result_image = True
        
        match_center, template_wh, match_angle, detection_result_img = matcher.detect_object(True)
           
        # detection_result.width
        # detection_result.height
        x = match_center[0]
        y = match_center[1]
        theta = match_angle
        src = np.asarray([x,y], dtype=np.float32)
        src = np.reshape(src,(-1,1,2)) # Rehsape as opencv requires (N,1,2)
        # print(src)
        # Now the detection results in image frame is found,
        # Hence, find the detected pose in camera frame with given z distance to camera
        # To do that first we need to get the camera parameters
        # Load the camera matrix and distortion coefficients from the calibration result.
        

        mtx = intrinsic_calib.K
        d = intrinsic_calib.distortion_info.data
        dist = np.array([d.k1,d.k2,d.p1,d.p2,d.k3])

        T_cam = self._geom_util.named_pose_to_rox_transform(extrinsic_calib.pose)

        #TODO: Figure out a better value for this
        object_z_cam_dist = abs(T_cam.p[2]) - object_z

        # Find the corresponding world pose of the detected pose in camera frame
        dst = cv2.undistortPoints(src,mtx,dist) # dst is Xc/Zc and Yc/Zc in the same shape of src
        dst = dst * float(object_z_cam_dist) * 1000.0 # Multiply by given Zc distance to find all cordinates, multiply by 1000 is because of Zc is given in meters but others are in millimeters
        dst = np.squeeze(dst) * 0.001 # Xc and Yc as vector

        # Finally the translation between the detected object center and the camera frame represented in camera frame is T = [Xc,Yc,Zc]
        Xc = dst[0]
        Yc = dst[1]
        Zc = float(object_z_cam_dist)
        T = np.asarray([Xc,Yc,Zc])

        # Now lets find the orientation of the detected object with respect to camera 
        # We are assuming +z axis is looking towards the camera and xy axes of the both object and camera are parallel planes
        # So the rotation matrix would be
        theta = np.deg2rad(theta) #convert theta from degrees to radian
        R_co = np.asarray([[math.cos(theta),-math.sin(theta),0],[-math.sin(theta),-math.cos(theta),0],[0,0,-1]])

        T_obj_cam_frame = rox.Transform(R_co, T, "camera", "object")

        T_obj = T_cam * T_obj_cam_frame
        
        # TODO: Better adjustment of Z height?
        T_obj.p[2] = object_z

        ret1 = self._matched_template_3d_type()
        ret1.pose = self._named_pose_with_covariance_type()
        ret1.pose.pose = self._geom_util.rox_transform_to_named_pose(T_obj)
        ret1.pose.covariance= np.zeros((6,6))
        ret1.confidence = 0

        ret = self._template_matching_result_3d_type()
        ret.template_matches = [ret1]
        ret.display_image = self._image_util.array_to_compressed_image_jpg(detection_result_img,70)

        return ret
 def pose_to_rox_transform(self, rr_pose):
     R = self.quaternion_to_R(rr_pose["orientation"])
     p = self.vector3_to_xyz(rr_pose["position"])
     return rox.Transform(R, p)
示例#29
0
 def tf_lookup(target_frame, source_frame):
     (trans1, rot1) = listener.lookupTransform(target_frame, source_frame,
                                               rospy.Time(0))
     return rox.Transform(rox.q2R([rot1[3], rot1[0], rot1[1], rot1[2]]),
                          trans1)
 def named_pose_to_rox_transform(self, rr_named_pose):
     R = self.quaternion_to_R(rr_named_pose.pose["orientation"])
     p = self.vector3_to_xyz(rr_named_pose.pose["position"])
     return rox.Transform(R,p,_name_from_identifier(rr_named_pose.parent_frame), \
         _name_from_identifier(rr_named_pose.frame))