def __init__(self, arm_controller_ns = "", move_group = "move_group", goal_position_tolerance = 0.04, planning_time = 30, rox_robot = None ):
     
     moveit_commander.roscpp_initialize(sys.argv)
     
     self.moveit_robot = moveit_commander.RobotCommander()
     self.moveit_scene = moveit_commander.PlanningSceneInterface()
     if isinstance(move_group, basestring):
         self.moveit_group = moveit_commander.MoveGroupCommander(move_group)
         self.moveit_group.set_goal_position_tolerance(goal_position_tolerance)
         self.moveit_group.allow_replanning(True)
         self.moveit_group.set_planning_time(planning_time)
     else:
         self.moveit_group = move_group
     
     if isinstance(rox_robot, rox.Robot):
         self.rox_robot = rox_robot
     elif isinstance(rox_robot, basestring):
         self.rox_robot = rox_urdf.robot_from_parameter_server(rox_robot)
     elif rox_robot is None:
         self.rox_robot = rox_urdf.robot_from_parameter_server()
     else:
         raise AssertionError("Invalid parameter type for rox_robot")
     
     set_controller_mode_name = rospy.names.ns_join(arm_controller_ns, "set_controller_mode")        
     self._set_controller_mode=rospy.ServiceProxy(set_controller_mode_name, SetControllerMode)
     self._arm_controller_ns = arm_controller_ns        
def main(robot_interface=None, ft_sensor_interface=None):

    if not rospy.core.is_initialized():
        rospy.init_node("safe_kinematic_controller")

    print rospy.get_param_names()

    robot_root_link = rospy.get_param("~robot_root_link", None)
    robot_tip_link = rospy.get_param("~robot_tip_link", None)

    rate_hz = rospy.get_param("~publish_frequency", 250.0)

    robot = rox_urdf.robot_from_parameter_server(root_link=robot_root_link,
                                                 tip_link=robot_tip_link)

    controller = ROSController(robot,
                               1.0 / rate_hz,
                               robot_interface=robot_interface,
                               ft_sensor_interface=ft_sensor_interface)

    rate = rospy.Rate(rate_hz)

    for _ in xrange(100):
        if rospy.is_shutdown():
            break
        if controller.read_robot_joint_position()[0] == ControllerMode.SUCCESS:
            break
        rate.sleep()

    while not rospy.is_shutdown():
        controller.step()
        rate.sleep()
 def __init__(self, arm_controller_ns = "", rox_robot = None ):
                                     
     if isinstance(rox_robot, rox.Robot):
         self.rox_robot = rox_robot
     elif isinstance(rox_robot, basestring):
         self.rox_robot = rox_urdf.robot_from_parameter_server(rox_robot)
     elif rox_robot is None:
         self.rox_robot = rox_urdf.robot_from_parameter_server()
     else:
         raise AssertionError("Invalid parameter type for rox_robot")
     
     set_controller_mode_name = rospy.names.ns_join(arm_controller_ns, "set_controller_mode")        
     self._set_controller_mode=rospy.ServiceProxy(set_controller_mode_name, SetControllerMode)
     self._arm_controller_ns = arm_controller_ns
     joint_trajectory_action_name = rospy.names.ns_join(arm_controller_ns, "joint_trajectory_action")
     self.joint_trajectory_action=actionlib.SimpleActionClient(joint_trajectory_action_name,FollowJointTrajectoryAction)
     self.controller_state_name = rospy.names.ns_join(arm_controller_ns, "controller_state")
def main():
    step_ts = 0.004
    rospy.init_node("test_moveit_commander_custom_trajectory", anonymous=True)
    rospy.Subscriber("controller_state", ControllerState, callback_function)
    robot = urdf.robot_from_parameter_server()
    controller_commander = controller_commander_pkg.ControllerCommander()

    controller_commander.set_controller_mode(
        controller_commander.MODE_AUTO_TRAJECTORY, 0.4, [], [])
    time.sleep(0.5)
    tran = np.array(
        [2.197026484647054, 1.2179574262842452, 0.12376598588449844])
    rot = np.array([[-0.99804142, 0.00642963, 0.06222524],
                    [0.00583933, 0.99993626, -0.00966372],
                    [-0.06228341, -0.00928144, -0.99801535]])
    pose_target2 = rox.Transform(rot, tran)
    pose_target2.p[2] += 0.20

    print 'Target:', pose_target2
    print "============ Move Close to Panel"
    controller_commander.compute_cartesian_path_and_move(
        pose_target2, avoid_collisions=False)

    controller_commander.set_controller_mode(
        controller_commander.MODE_AUTO_TRAJECTORY, 1.0, [], [])
    time.sleep(1.0)
    Kc = 0.004
    time_save = []
    FTdata_save = []
    Tran_z = np.array([[0, 0, -1], [0, -1, 0], [1, 0, 0]])
    Vec_wrench = 100 * np.array([
        0.019296738361905, 0.056232033265447, 0.088644197659430,
        0.620524934626544, -0.517896661195076, 0.279323567303444,
        -0.059640563813256, 0.631460085138371, -0.151143175570223,
        -6.018321330845553
    ]).transpose()

    listener = PayloadTransformListener()
    rapid_node = rapid_node_pkg.RAPIDCommander()
    #controller_commander=controller_commander_pkg.arm_composites_manufacturing_controller_commander()
    time.sleep(1.0)

    FTdata_0 = FTdata
    T = listener.lookupTransform("base", "link_6", rospy.Time(0))
    rg = 9.8 * np.matmul(
        np.matmul(T.R, Tran_z).transpose(),
        np.array([0, 0, 1]).transpose())
    A1 = np.hstack([
        rox.hat(rg).transpose(),
        np.zeros([3, 1]),
        np.eye(3),
        np.zeros([3, 3])
    ])
    A2 = np.hstack(
        [np.zeros([3, 3]),
         rg.reshape([3, 1]),
         np.zeros([3, 3]),
         np.eye(3)])
    A = np.vstack([A1, A2])
    FTdata_0est = np.matmul(A, Vec_wrench)
    #print 'Test4:',controller_commander.ControllerState

    for i in range(400):
        tic = time.time()
        plan = RobotTrajectory()
        plan.joint_trajectory.joint_names = [
            'joint_1', 'joint_2', 'joint_3', 'joint_4', 'joint_5', 'joint_6'
        ]
        current_joint_angles = controller_commander.get_current_joint_values()

        plan.joint_trajectory.header.frame_id = '/world'
        p1 = JointTrajectoryPoint()
        p1.positions = current_joint_angles
        p1.velocities = np.zeros((6, ))
        p1.accelerations = np.zeros((6, ))
        p1.time_from_start = rospy.Duration(0)

        T = listener.lookupTransform("base", "link_6", rospy.Time(0))
        rg = 9.8 * np.matmul(
            np.matmul(T.R, Tran_z).transpose(),
            np.array([0, 0, 1]).transpose())
        A1 = np.hstack([
            rox.hat(rg).transpose(),
            np.zeros([3, 1]),
            np.eye(3),
            np.zeros([3, 3])
        ])
        A2 = np.hstack([
            np.zeros([3, 3]),
            rg.reshape([3, 1]),
            np.zeros([3, 3]),
            np.eye(3)
        ])
        A = np.vstack([A1, A2])
        FTdata_est = np.matmul(A, Vec_wrench)

        #print 'Test0:',FTdata,FTdata_0,FTdata_est,FTdata_0est
        FTread = FTdata - FTdata_0 - FTdata_est + FTdata_0est
        print 'FTread:', FTread

        print 'FT:', FTdata
        print 'Z', FTread[-1]
        if FTread[-1] > -200:
            F_d = -350
        else:
            F_d = -400

        J = rox.robotjacobian(robot, current_joint_angles)
        Vz = Kc * (F_d - FTread[-1])
        joints_vel = np.linalg.pinv(J).dot(np.array([0, 0, 0, 0, 0, Vz]))
        print 'Joint_command:', joints_vel.dot(step_ts)

        p2 = JointTrajectoryPoint()
        p2.positions = np.array(p1.positions) + joints_vel.dot(
            step_ts)  #np.array([0,np.deg2rad(-2),0,0,0,0])
        p2.velocities = np.zeros((6, ))
        p2.accelerations = np.zeros((6, ))
        p2.time_from_start = rospy.Duration(step_ts)

        #        p3=JointTrajectoryPoint()
        #        p3.positions = np.array(p1.positions) + np.array([0,np.deg2rad(2),0,0,0,0])
        #        p3.velocities = np.zeros((6,))
        #        p3.accelerations = np.zeros((6,))
        #        p3.time_from_start = rospy.Duration(4)

        plan.joint_trajectory.points.append(p1)
        plan.joint_trajectory.points.append(p2)
        #        plan.joint_trajectory.points.append(p3)

        controller_commander.execute(plan)
        print 'Time:', time.time() - tic
        #        print 'Joint:',controller_commander.get_current_joint_values()
        time_save.append(time.time())
        FTdata_save.append(FTread)

    filename = "FTdata.txt"
    f_handle = file(filename, 'a')
    np.savetxt(f_handle, FTdata_save)
    f_handle.close()

    filename = "Time.txt"
    f_handle = file(filename, 'a')
    np.savetxt(f_handle, time_save)
    f_handle.close()
def main():

    #Start timer to measure execution time
    t1 = time.time()

    #Subscribe to controller_state
    rospy.Subscriber("controller_state", controllerstate, callback)
    last_ros_image_stamp = rospy.Time(0)

    #Force-torque
    if not "disable-ft" in sys.argv:
        ft_threshold1 = ft_threshold
    else:
        ft_threshold1 = []

    #Initialize ros node of this process
    rospy.init_node('Placement_DJ_1', anonymous=True)
    process_client = actionlib.SimpleActionClient('process_step',
                                                  ProcessStepAction)
    process_state_pub = rospy.Publisher("GUI_state",
                                        ProcessState,
                                        queue_size=100,
                                        latch=True)
    process_client.wait_for_server()
    listener = PayloadTransformListener()
    rapid_node = rapid_node_pkg.RAPIDCommander()
    controller_commander = controller_commander_pkg.ControllerCommander()

    object_commander = ObjectRecognitionCommander()
    robot = urdf.robot_from_parameter_server()

    #subscribe to Gripper camera node for image acquisition
    ros_gripper_2_img_sub = rospy.Subscriber(
        '/gripper_camera_2/image', Image,
        object_commander.ros_raw_gripper_2_image_cb)

    ros_gripper_2_trigger = rospy.ServiceProxy(
        '/gripper_camera_2/camera_trigger', Trigger)

    ros_gripper_2_trigger = rospy.ServiceProxy(
        'gripper_camera_2/camera_trigger', Trigger)

    #Set controller command mode
    controller_commander.set_controller_mode(
        controller_commander.MODE_AUTO_TRAJECTORY, 0.4, [], [])
    time.sleep(0.5)

    #open loop set the initial pose
    #Set Location above the panel where the end effector goes first (in the world/base frame) Ideal location of panel.
    #Cur_Pose = controller_commander.get_current_pose_msg()
    #print Cur_Pose
    #raw_input("confirm release vacuum")

    tran = np.array(
        [2.15484, 1.21372,
         0.25766])  #np.array([2.17209718963,-1.13702651252,0.224273328841])
    rot = rox.q2R(
        [0.02110, -0.03317, 0.99922, -0.00468]
    )  #rox.q2R([0.0145063655084, -0.0282932350764, 0.999322921073, -0.0185137145776])
    #tran = np.array([2.26476414097,-1.22419669418,0.411353037002])
    #rot = np.array([[-0.9959393, -0.0305329,  0.0846911], [-0.0310315,  0.9995079, -0.0045767], [-0.0845097, -0.0071862, -0.9963967]])
    pose_target2 = rox.Transform(rot, tran)
    pose_target3 = copy.deepcopy(pose_target2)

    ##Execute movement to set location
    print "Executing initial path"
    controller_commander.compute_cartesian_path_and_move(
        pose_target2, avoid_collisions=False)

    #Initilialize aruco boards and parameters
    aruco_dict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_ARUCO_ORIGINAL)
    parameters = cv2.aruco.DetectorParameters_create()
    parameters.cornerRefinementMethod = cv2.aruco.CORNER_REFINE_SUBPIX
    parameters.adaptiveThreshWinSizeMax = 30
    parameters.adaptiveThreshWinSizeStep = 7

    # 1st Panel tag info
    board_ground = cv2.aruco.GridBoard_create(4, 4, .04, .0075, aruco_dict, 32)
    board_panel = cv2.aruco.GridBoard_create(8, 3, .025, .0075, aruco_dict, 80)
    #Load object points ground tag in panel tag coordinate system from mat file
    loaded_object_points_ground_in_panel_system_stage_1 = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat'
    )['object_points_ground_in_panel_tag_system']
    loaded_object_points_ground_in_panel_system_stage_2 = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['object_points_ground_in_panel_tag_system']
    loaded_object_points_ground_in_panel_system = loaded_object_points_ground_in_panel_system_stage_1

    #    #Initialize camera intrinsic parameters #18285636_10_05_2018_5_params
    #18285636_10_05_2018_5_params_111122018
    CamParam = CameraParams(2283.391289766133, 1192.255485086403,
                            2279.484382094687, 1021.399092147012, 1.0,
                            -0.022101211408596, -0.095163053709332,
                            -0.003986991791212, -0.003479613658352,
                            0.179926705467534)

    #Subscribe tp controller state. Again?
    rospy.Subscriber("controller_state", controllerstate, callback)

    UV = np.zeros([150, 8])
    P = np.zeros([150, 3])

    #focal length in pixel units, this number is averaged values from f_hat for x and y
    f_hat_u = 2283.391289766133  #2342.561249990927#2282.523358266698#2281.339593273153 #2446.88
    f_hat_v = 2279.484382094687  #2338.448312671424#2280.155828279608
    #functions like a gain, used with velocity to track position
    dt = 0.1
    du = 100.0
    dv = 100.0
    dutmp = 100.0
    dvtmp = 100.0
    #TimeGain = [0.1,0.1, 0.1]
    du_array = []
    dv_array = []
    dx_array = []
    iteration = 0
    stage = 1
    #step_ts = 0.004
    Kc = 0.0002
    #time_save = []
    #FTdata_save = []
    Tran_z = np.array([[0, 0, -1], [0, -1, 0], [1, 0, 0]])
    Vec_wrench = 100 * np.array([
        0.019296738361905, 0.056232033265447, 0.088644197659430,
        0.620524934626544, -0.517896661195076, 0.279323567303444,
        -0.059640563813256, 0.631460085138371, -0.151143175570223,
        -6.018321330845553
    ]).transpose()

    FTdata_0 = FTdata
    T = listener.lookupTransform("base", "link_6", rospy.Time(0))
    rg = 9.8 * np.matmul(
        np.matmul(T.R, Tran_z).transpose(),
        np.array([0, 0, 1]).transpose())
    A1 = np.hstack([
        rox.hat(rg).transpose(),
        np.zeros([3, 1]),
        np.eye(3),
        np.zeros([3, 3])
    ])
    A2 = np.hstack(
        [np.zeros([3, 3]),
         rg.reshape([3, 1]),
         np.zeros([3, 3]),
         np.eye(3)])
    A = np.vstack([A1, A2])
    FTdata_0est = np.matmul(A, Vec_wrench)
    FTread_array = []
    FTdata_array = []
    t_now_array = []
    step_size = []

    cv2.namedWindow('Image', cv2.WINDOW_NORMAL)
    cv2.resizeWindow('Image', 490, 410)

    ### PBVS set the initial pose
    #### Final Nest Placement Error Calculation ===============================
    #Read new image
    last_ros_image_stamp = object_commander.ros_image_stamp
    try:
        ros_gripper_2_trigger.wait_for_service(timeout=0.1)
        ros_gripper_2_trigger()
    except:
        pass
    wait_count = 0
    while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp:

        if wait_count > 50:
            raise Exception("Image receive timeout")
        time.sleep(0.25)
        wait_count += 1
    result = object_commander.ros_image

    #Detect tag corners in aqcuired image using aruco
    corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
        result, aruco_dict, parameters=parameters)

    #Sort corners and ids according to ascending order of ids
    corners_original = copy.deepcopy(corners)
    ids_original = np.copy(ids)
    sorting_indices = np.argsort(ids_original, 0)
    ids_sorted = ids_original[sorting_indices]
    ids_sorted = ids_sorted.reshape([len(ids_original), 1])
    combined_list = zip(np.ndarray.tolist(ids.flatten()), corners_original)
    combined_list.sort()
    corners_sorted = [x for y, x in combined_list]
    ids = np.copy(ids_sorted)
    corners = copy.deepcopy(corners_sorted)

    #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image)
    #false_ids_ind = np.where(ids>73)
    mask = np.ones(ids.shape, dtype=bool)
    #mask[false_ids_ind] = False
    ids = ids[mask]
    corners = np.array(corners)
    corners = corners[mask.flatten(), :]
    corners = list(corners)

    #Define object and image points of both tags
    objPoints_ground, imgPoints_ground = aruco.getBoardObjectAndImagePoints(
        board_ground, corners, ids)
    objPoints_panel, imgPoints_panel = aruco.getBoardObjectAndImagePoints(
        board_panel, corners, ids)
    objPoints_ground = objPoints_ground.reshape([objPoints_ground.shape[0], 3])
    imgPoints_ground = imgPoints_ground.reshape([imgPoints_ground.shape[0], 2])
    objPoints_panel = objPoints_panel.reshape([objPoints_panel.shape[0], 3])
    imgPoints_panel = imgPoints_panel.reshape([imgPoints_panel.shape[0], 2])

    #Save pose of marker boards after the iterations end(while in the final hovering position above nest)
    #Get pose of both ground and panel markers from detected corners
    retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(
        objPoints_ground, imgPoints_ground, CamParam.camMatrix,
        CamParam.distCoeff)
    Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
    retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(
        objPoints_panel, imgPoints_panel, CamParam.camMatrix,
        CamParam.distCoeff)
    Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)

    print "============== Observed Pose difference in nest position"
    observed_tvec_difference = tvec_ground - tvec_panel
    observed_rvec_difference = rvec_ground - rvec_panel
    print "tvec difference: ", observed_tvec_difference
    print "rvec differnece: ", observed_rvec_difference

    #Load ideal pose differnece information from file
    loaded_rvec_difference = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat'
    )['rvec_difference']
    loaded_tvec_difference = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat'
    )['tvec_difference']

    print "============== Ideal Pose difference in nest position"
    print "tvec difference: ", loaded_tvec_difference
    print "rvec differnece: ", loaded_rvec_difference

    print "============== Difference in pose difference in nest position"
    tvec_err = loaded_tvec_difference - observed_tvec_difference
    rvec_err = loaded_rvec_difference - observed_rvec_difference
    print "tvec difference: ", tvec_err
    print "rvec differnece: ", rvec_err

    # Adjustment
    print "Adjustment ===================="
    current_joint_angles = controller_commander.get_current_joint_values()
    dx = np.array([0, 0, 0, -tvec_err[0], tvec_err[1] + 0.03, tvec_err[2]
                   ]) * 0.75
    joints_vel = QP_abbirb6640(
        np.array(current_joint_angles).reshape(6, 1), np.array(dx))
    goal = trapezoid_gen(
        np.array(current_joint_angles) + joints_vel.dot(1),
        np.array(current_joint_angles), 0.25, np.array(dx))
    client = actionlib.SimpleActionClient("joint_trajectory_action",
                                          FollowJointTrajectoryAction)
    client.wait_for_server()
    client.send_goal(goal)
    client.wait_for_result()
    res = client.get_result()
    if (res.error_code != 0):
        raise Exception("Trajectory execution returned error")

    print res
    #raw_input("confirm move...")
    print "End of Initial Pose ===================="

    ### End of initial pose

    step_size_min = 100000
    stage = 2
    loaded_object_points_ground_in_panel_system = loaded_object_points_ground_in_panel_system_stage_2

    while (
        (du > 4) | (dv > 4) and (iteration < 55)
    ):  #try changing du and dv to lower values(number of iterations increases)
        iteration += 1
        t_now_array.append(time.time())

        #Go to stage2 of movement(mostly downward z motion)
        if ((du < 4) and (dv < 4) and (stage == 1)):
            #Save pose of marker boards after the iterations end(while in the final hovering position above nest)
            #Get pose of both ground and panel markers from detected corners
            retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(
                objPoints_ground, imgPoints_ground, CamParam.camMatrix,
                CamParam.distCoeff)
            Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
            retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(
                objPoints_panel, imgPoints_panel, CamParam.camMatrix,
                CamParam.distCoeff)
            Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)

            print "============== Observed Pose difference in hovering position"
            observed_tvec_difference = tvec_ground - tvec_panel
            observed_rvec_difference = rvec_ground - rvec_panel
            print "tvec difference: ", observed_tvec_difference
            print "rvec differnece: ", observed_rvec_difference

            #Load ideal pose differnece information from file
            loaded_rvec_difference = loadmat(
                '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat'
            )['rvec_difference']
            loaded_tvec_difference = loadmat(
                '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat'
            )['tvec_difference']

            print "============== Ideal Pose difference in hovering position"
            print "tvec difference: ", loaded_tvec_difference
            print "rvec differnece: ", loaded_rvec_difference

            tvec_difference_Above_Nest = loaded_tvec_difference - observed_tvec_difference
            rvec_difference_Above_Nest = loaded_rvec_difference - observed_rvec_difference

            print "============== Difference in pose difference in hovering position"
            print "tvec difference: ", tvec_difference_Above_Nest
            print "rvec differnece: ", rvec_difference_Above_Nest

            #Saving pose information to file
            filename_pose1 = "/home/rpi-cats/Desktop/DJ/Code/Data/Panel1_Above_Nest_Pose_" + str(
                t1) + ".mat"
            scipy.io.savemat(filename_pose1,
                             mdict={
                                 'tvec_ground_Above_Nest':
                                 tvec_ground,
                                 'tvec_panel_Above_Nest':
                                 tvec_panel,
                                 'Rca_ground_Above_Nest':
                                 Rca_ground,
                                 'Rca_panel_Above_Nest':
                                 Rca_panel,
                                 'tvec_difference_Above_Nest':
                                 tvec_ground - tvec_panel,
                                 'rvec_difference_Above_Nest':
                                 rvec_ground - rvec_panel,
                                 'loaded_tvec_difference':
                                 loaded_tvec_difference,
                                 'loaded_rvec_difference':
                                 loaded_rvec_difference,
                                 'observed_tvec_difference':
                                 observed_tvec_difference,
                                 'observed_rvec_difference':
                                 observed_rvec_difference
                             })

            #raw_input("Confirm Stage 2")
            stage = 2
            #			dt=0.02
            loaded_object_points_ground_in_panel_system = loaded_object_points_ground_in_panel_system_stage_2
        #print pose_target2.p[2]

        #Print current robot pose at the beginning of this iteration
        Cur_Pose = controller_commander.get_current_pose_msg()
        print "============ Current Robot Pose"
        print Cur_Pose

        #Read new image
        last_ros_image_stamp = object_commander.ros_image_stamp
        try:
            ros_gripper_2_trigger.wait_for_service(timeout=0.1)
            ros_gripper_2_trigger()
        except:
            pass
        wait_count = 0
        while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp:
            if wait_count > 50:
                raise Exception("Image receive timeout")
            time.sleep(0.25)
            wait_count += 1
        result = object_commander.ros_image
        #Save
        #        filename = "Acquisition3_%d.jpg" % (time.time())
        #        scipy.misc.imsave(filename, result)
        #Display
        #        cv2.namedWindow('Aqcuired Image',cv2.WINDOW_NORMAL)
        #        cv2.imshow('Acquired Image',result)
        #        cv2.waitKey(1)

        #Detect tag corners in aqcuired image using aruco
        corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
            result, aruco_dict, parameters=parameters)
        frame_with_markers_and_axis = result

        #Sort corners and ids according to ascending order of ids
        corners_original = copy.deepcopy(corners)
        ids_original = np.copy(ids)
        sorting_indices = np.argsort(ids_original, 0)
        ids_sorted = ids_original[sorting_indices]
        ids_sorted = ids_sorted.reshape([len(ids_original), 1])
        combined_list = zip(np.ndarray.tolist(ids.flatten()), corners_original)
        combined_list.sort()
        corners_sorted = [x for y, x in combined_list]
        ids = np.copy(ids_sorted)
        corners = copy.deepcopy(corners_sorted)

        #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image)
        false_ids_ind = np.where(ids > 150)
        mask = np.ones(ids.shape, dtype=bool)
        mask[false_ids_ind] = False
        ids = ids[mask]
        corners = np.array(corners)
        corners = corners[mask.flatten(), :]
        corners = list(corners)

        #Define object and image points of both tags
        objPoints_ground, imgPoints_ground = aruco.getBoardObjectAndImagePoints(
            board_ground, corners, ids)
        objPoints_panel, imgPoints_panel = aruco.getBoardObjectAndImagePoints(
            board_panel, corners, ids)
        objPoints_ground = objPoints_ground.reshape(
            [objPoints_ground.shape[0], 3])
        imgPoints_ground = imgPoints_ground.reshape(
            [imgPoints_ground.shape[0], 2])
        objPoints_panel = objPoints_panel.reshape(
            [objPoints_panel.shape[0], 3])
        imgPoints_panel = imgPoints_panel.reshape(
            [imgPoints_panel.shape[0], 2])

        #Get pose of both ground and panel markers from detected corners
        retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(
            objPoints_ground, imgPoints_ground, CamParam.camMatrix,
            CamParam.distCoeff)
        Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
        retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(
            objPoints_panel, imgPoints_panel, CamParam.camMatrix,
            CamParam.distCoeff)
        Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)

        frame_with_markers_and_axis = cv2.aruco.drawAxis(
            frame_with_markers_and_axis, CamParam.camMatrix,
            CamParam.distCoeff, Rca_ground, tvec_ground, 0.2)
        frame_with_markers_and_axis = cv2.aruco.drawAxis(
            frame_with_markers_and_axis, CamParam.camMatrix,
            CamParam.distCoeff, Rca_panel, tvec_panel, 0.2)

        rvec_all_markers_ground, tvec_all_markers_ground, _ = aruco.estimatePoseSingleMarkers(
            corners[0:20], 0.04, CamParam.camMatrix, CamParam.distCoeff)
        rvec_all_markers_panel, tvec_all_markers_panel, _ = aruco.estimatePoseSingleMarkers(
            corners[20:45], 0.025, CamParam.camMatrix, CamParam.distCoeff)
        tvec_all = np.concatenate(
            (tvec_all_markers_ground, tvec_all_markers_panel), axis=0)
        for i_ids, i_corners, i_tvec in zip(ids, corners, tvec_all):
            UV[i_ids, :] = i_corners.reshape(
                [1, 8])  #np.average(i_corners, axis=1)
            P[i_ids, :] = i_tvec

        #print 'P',P
        #used to find the height of the tags and the delta change of height, z height at desired position
        Z = 1 * P[32:48,
                  2]  #- [0.68184539, 0.68560932, 0.68966803, 0.69619578])

        #check if all tags detected
        if retVal_ground != 0 and retVal_panel != 0:
            dutmp = []
            dvtmp = []

            #Pixel estimates of the ideal ground tag location
            reprojected_imagePoints_ground_2, jacobian2 = cv2.projectPoints(
                loaded_object_points_ground_in_panel_system.transpose(),
                rvec_panel, tvec_panel, CamParam.camMatrix, CamParam.distCoeff)
            reprojected_imagePoints_ground_2 = reprojected_imagePoints_ground_2.reshape(
                [reprojected_imagePoints_ground_2.shape[0], 2])

            #plot image points for ground tag from corner detection and from re-projections
            for point1, point2 in zip(
                    imgPoints_ground,
                    np.float32(reprojected_imagePoints_ground_2)):
                cv2.circle(frame_with_markers_and_axis, tuple(point1), 5,
                           (0, 0, 255), 3)
                cv2.circle(frame_with_markers_and_axis, tuple(point2), 5,
                           (255, 0, 0), 3)

            cv2.imshow('Image', frame_with_markers_and_axis)
            cv2.waitKey(1)
            #Save
            filename_image = "/home/rpi-cats/Desktop/DJ/Code/Images/Panel1_Acquisition_" + str(
                t1) + "_" + str(iteration) + ".jpg"
            scipy.misc.imsave(filename_image, frame_with_markers_and_axis)

            #Go through a particular point in all tags to build the complete Jacobian
            for ic in range(4):
                #uses first set of tags, numbers used to offset camera frame, come from camera parameters
                #UV_target = np.vstack([UV[9:13,2*ic]-1209.151959040735,UV[9:13,2*ic+1]-1055.254852652610]).T - UV_shift[:,(2*ic):(2*ic+2)]		#shift corner estimates to the image centers. Necessary for the image jacobian to work.
                reprojected_imagePoints_ground_2 = np.reshape(
                    reprojected_imagePoints_ground_2, (16, 8))
                UV_target = np.vstack([
                    reprojected_imagePoints_ground_2[:, 2 * ic] -
                    1192.255485086403,
                    reprojected_imagePoints_ground_2[:, 2 * ic + 1] -
                    1021.399092147012
                ]).T
                uc = UV_target[:, 0]
                vc = UV_target[:, 1]

                #                print 'UV_target', UV_target
                UV_current = np.vstack([
                    UV[32:48, 2 * ic] - 1192.255485086403,
                    UV[32:48, 2 * ic + 1] - 1021.399092147012
                ]).T
                #find difference between current and desired tag difference
                delta_UV = UV_target - UV_current
                dutmp.append(np.mean(delta_UV[:, 0]))
                dvtmp.append(np.mean(delta_UV[:, 1]))
                for tag_i in range(16):
                    if tag_i == 0:
                        J_cam_tmp = 1.0 * np.array(
                            [[
                                -f_hat_u / Z[tag_i], 0.0, uc[tag_i] / Z[tag_i],
                                uc[tag_i] * vc[tag_i] / f_hat_u, -1.0 *
                                (uc[tag_i] * uc[tag_i] / f_hat_u + f_hat_u),
                                vc[tag_i]
                            ],
                             [
                                 0.0, -f_hat_v / Z[tag_i], vc[tag_i] /
                                 Z[tag_i], vc[tag_i] * vc[tag_i] / f_hat_v +
                                 f_hat_v, -1.0 * uc[tag_i] * vc[tag_i] /
                                 f_hat_v, -uc[tag_i]
                             ]])
                    else:
                        J_cam_tmp = np.concatenate(
                            (J_cam_tmp,
                             1.0 * np.array(
                                 [[
                                     -f_hat_u / Z[tag_i], 0.0,
                                     uc[tag_i] / Z[tag_i],
                                     uc[tag_i] * vc[tag_i] / f_hat_u, -1.0 *
                                     (uc[tag_i] * uc[tag_i] / f_hat_u +
                                      f_hat_u), vc[tag_i]
                                 ],
                                  [
                                      0.0, -f_hat_v / Z[tag_i], vc[tag_i] /
                                      Z[tag_i], vc[tag_i] * vc[tag_i] / f_hat_v
                                      + f_hat_v, -1.0 * uc[tag_i] * vc[tag_i] /
                                      f_hat_v, -uc[tag_i]
                                  ]])),
                            axis=0)
                #camera jacobian
                if ic == 0:
                    J_cam = J_cam_tmp
                    delta_UV_all = delta_UV.reshape(32, 1)
                    UV_target_all = UV_target.reshape(32, 1)
                else:
                    J_cam = np.vstack([J_cam, J_cam_tmp])
                    delta_UV_all = np.vstack(
                        [delta_UV_all, delta_UV.reshape(32, 1)])
                    UV_target_all = np.vstack(
                        [UV_target_all,
                         UV_target.reshape(32, 1)])

            print 'dutmp: ', dutmp
            print 'dvtmp: ', dvtmp
            du = np.mean(np.absolute(dutmp))
            dv = np.mean(np.absolute(dvtmp))
            print 'Average du of all points:', du, 'Average dv of all points:', dv
            du_array.append(du)
            dv_array.append(dv)

            #print "Jcam",J_cam
            #print "UV",delta_UV_all
            #dx = np.matmul(np.linalg.pinv(J_cam),np.array(delta_UV_all))

            dx = QP_Cam(
                J_cam, delta_UV_all
            )  #np.matmul(np.linalg.pinv(J_cam),np.array(delta_UV_all))
            dx = dx.reshape([6, 1])

            dx_array.append(dx)

            #print '**************Jac:',dx, QP_Cam(J_cam,delta_UV_all)

            dx = np.array([
                dx[3, 0], -dx[4, 0], -dx[5, 0], dx[0, 0], -dx[1, 0], -dx[2, 0]
            ])

            if stage == 2:
                T = listener.lookupTransform("base", "link_6", rospy.Time(0))
                rg = 9.8 * np.matmul(
                    np.matmul(T.R, Tran_z).transpose(),
                    np.array([0, 0, 1]).transpose())
                A1 = np.hstack([
                    rox.hat(rg).transpose(),
                    np.zeros([3, 1]),
                    np.eye(3),
                    np.zeros([3, 3])
                ])
                A2 = np.hstack([
                    np.zeros([3, 3]),
                    rg.reshape([3, 1]),
                    np.zeros([3, 3]),
                    np.eye(3)
                ])
                A = np.vstack([A1, A2])
                FTdata_est = np.matmul(A, Vec_wrench)
                FTread = FTdata - FTdata_0 - FTdata_est + FTdata_0est
                print 'FTread:', FTread

                print 'Z', FTread[-1]
                if FTread[-1] > -100:
                    F_d = -150
                    Kc = 0.0001
                else:
                    Kc = 0.0001
                    F_d = -200

                Vz = Kc * (F_d - FTread[-1])
                print 'Vz', Vz
                dx[-1] = Vz + dx[-1]
                FTread_array.append(FTread)
                FTdata_array.append(FTdata)

            current_joint_angles = controller_commander.get_current_joint_values(
            )

            step_size_tmp = np.linalg.norm(dx)
            if step_size_tmp <= step_size_min:
                step_size_min = step_size_tmp
            else:
                dx = dx / step_size_tmp * step_size_min

            joints_vel = QP_abbirb6640(
                np.array(current_joint_angles).reshape(6, 1), np.array(dx))

            goal = trapezoid_gen(
                np.array(current_joint_angles) + joints_vel.dot(dt),
                np.array(current_joint_angles), 0.25, np.array(dx))
            step_size.append(np.linalg.norm(dx))

            client = actionlib.SimpleActionClient("joint_trajectory_action",
                                                  FollowJointTrajectoryAction)
            client.wait_for_server()

            client.send_goal(goal)
            client.wait_for_result()
            res = client.get_result()
            if (res.error_code != 0):
                raise Exception("Trajectory execution returned error")

            print res

            print 'Current Iteration Finished.'

    #Saving iteration data to file
    filename_data = "/home/rpi-cats/Desktop/YC/Data/Panel1_Data_" + str(
        t1) + ".mat"
    scipy.io.savemat(filename_data,
                     mdict={
                         'du_array': du_array,
                         'dv_array': dv_array,
                         'dx_array': dx_array,
                         'step_size': step_size,
                         'iteration': iteration
                     })

    #Saving force data to file
    filename_force_data = "/home/rpi-cats/Desktop/YC/Data/Panel1_Data_force_" + str(
        t1) + ".mat"
    scipy.io.savemat(filename_force_data,
                     mdict={
                         'FTread': FTread_array,
                         'FTdata': FTdata_array,
                         't_now_array': t_now_array
                     })

    print '###############################'
    print 'step_size', step_size
    print 'iteration', iteration
    print '###############################'
    '''
    print "============ Final Push Down to Nest"
    controller_commander.set_controller_mode(controller_commander.MODE_AUTO_TRAJECTORY, 0.2, [],[])

    
    Cur_Pose = controller_commander.get_current_pose_msg()
    rot_current = [Cur_Pose.pose.orientation.w, Cur_Pose.pose.orientation.x,Cur_Pose.pose.orientation.y,Cur_Pose.pose.orientation.z]
    trans_current = [Cur_Pose.pose.position.x,Cur_Pose.pose.position.y,Cur_Pose.pose.position.z]
    pose_target2.R = rox.q2R([rot_current[0], rot_current[1], rot_current[2], rot_current[3]])
    pose_target2.p = trans_current
    pose_target2.p[2] -= 0.11
    '''

    #### Final Nest Placement Error Calculation ===============================
    #Read new image
    last_ros_image_stamp = object_commander.ros_image_stamp
    try:
        ros_gripper_2_trigger.wait_for_service(timeout=0.1)
        ros_gripper_2_trigger()
    except:
        pass
    wait_count = 0
    while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp:
        if wait_count > 50:
            raise Exception("Image receive timeout")
        time.sleep(0.25)
        wait_count += 1
    result = object_commander.ros_image

    #Detect tag corners in aqcuired image using aruco
    corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
        result, aruco_dict, parameters=parameters)

    #Sort corners and ids according to ascending order of ids
    corners_original = copy.deepcopy(corners)
    ids_original = np.copy(ids)
    sorting_indices = np.argsort(ids_original, 0)
    ids_sorted = ids_original[sorting_indices]
    ids_sorted = ids_sorted.reshape([len(ids_original), 1])
    combined_list = zip(np.ndarray.tolist(ids.flatten()), corners_original)
    combined_list.sort()
    corners_sorted = [x for y, x in combined_list]
    ids = np.copy(ids_sorted)
    corners = copy.deepcopy(corners_sorted)

    #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image)
    #false_ids_ind = np.where(ids>73)
    mask = np.ones(ids.shape, dtype=bool)
    #mask[false_ids_ind] = False
    ids = ids[mask]
    corners = np.array(corners)
    corners = corners[mask.flatten(), :]
    corners = list(corners)

    #Define object and image points of both tags
    objPoints_ground, imgPoints_ground = aruco.getBoardObjectAndImagePoints(
        board_ground, corners, ids)
    objPoints_panel, imgPoints_panel = aruco.getBoardObjectAndImagePoints(
        board_panel, corners, ids)
    objPoints_ground = objPoints_ground.reshape([objPoints_ground.shape[0], 3])
    imgPoints_ground = imgPoints_ground.reshape([imgPoints_ground.shape[0], 2])
    objPoints_panel = objPoints_panel.reshape([objPoints_panel.shape[0], 3])
    imgPoints_panel = imgPoints_panel.reshape([imgPoints_panel.shape[0], 2])

    #Save pose of marker boards after the iterations end(while in the final hovering position above nest)
    #Get pose of both ground and panel markers from detected corners
    retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(
        objPoints_ground, imgPoints_ground, CamParam.camMatrix,
        CamParam.distCoeff)
    Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
    retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(
        objPoints_panel, imgPoints_panel, CamParam.camMatrix,
        CamParam.distCoeff)
    Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)

    print "============== Observed Pose difference in nest position"
    observed_tvec_difference = tvec_ground - tvec_panel
    observed_rvec_difference = rvec_ground - rvec_panel
    print "tvec difference: ", observed_tvec_difference
    print "rvec differnece: ", observed_rvec_difference

    #Load ideal pose differnece information from file
    loaded_rvec_difference = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['rvec_difference']
    loaded_tvec_difference = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['tvec_difference']

    print "============== Ideal Pose difference in nest position"
    print "tvec difference: ", loaded_tvec_difference
    print "rvec differnece: ", loaded_rvec_difference

    print "============== Difference in pose difference in nest position"
    tvec_err = loaded_tvec_difference - observed_tvec_difference
    rvec_err = loaded_rvec_difference - observed_rvec_difference
    print "tvec difference: ", tvec_err
    print "rvec differnece: ", rvec_err

    # Adjustment
    print "Adjustment ===================="
    current_joint_angles = controller_commander.get_current_joint_values()
    dx = np.array([0, 0, 0, -tvec_err[0], tvec_err[1], 0])
    joints_vel = QP_abbirb6640(
        np.array(current_joint_angles).reshape(6, 1), np.array(dx))
    goal = trapezoid_gen(
        np.array(current_joint_angles) + joints_vel.dot(1),
        np.array(current_joint_angles), 0.25, np.array(dx))
    client = actionlib.SimpleActionClient("joint_trajectory_action",
                                          FollowJointTrajectoryAction)
    client.wait_for_server()
    client.send_goal(goal)
    client.wait_for_result()
    res = client.get_result()
    if (res.error_code != 0):
        raise Exception("Trajectory execution returned error")

    print res
    print "End of Adjustment ===================="

    #### Final Nest Placement Error Calculation ===============================
    #Read new image
    last_ros_image_stamp = object_commander.ros_image_stamp
    try:
        ros_gripper_2_trigger.wait_for_service(timeout=0.1)
        ros_gripper_2_trigger()
    except:
        pass
    wait_count = 0
    while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp:
        if wait_count > 50:
            raise Exception("Image receive timeout")
        time.sleep(0.25)
        wait_count += 1
    result = object_commander.ros_image

    #Detect tag corners in aqcuired image using aruco
    corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
        result, aruco_dict, parameters=parameters)

    #Sort corners and ids according to ascending order of ids
    corners_original = copy.deepcopy(corners)
    ids_original = np.copy(ids)
    sorting_indices = np.argsort(ids_original, 0)
    ids_sorted = ids_original[sorting_indices]
    ids_sorted = ids_sorted.reshape([len(ids_original), 1])
    combined_list = zip(np.ndarray.tolist(ids.flatten()), corners_original)
    combined_list.sort()
    corners_sorted = [x for y, x in combined_list]
    ids = np.copy(ids_sorted)
    corners = copy.deepcopy(corners_sorted)

    #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image)
    #false_ids_ind = np.where(ids>73)
    mask = np.ones(ids.shape, dtype=bool)
    #mask[false_ids_ind] = False
    ids = ids[mask]
    corners = np.array(corners)
    corners = corners[mask.flatten(), :]
    corners = list(corners)

    #Define object and image points of both tags
    objPoints_ground, imgPoints_ground = aruco.getBoardObjectAndImagePoints(
        board_ground, corners, ids)
    objPoints_panel, imgPoints_panel = aruco.getBoardObjectAndImagePoints(
        board_panel, corners, ids)
    objPoints_ground = objPoints_ground.reshape([objPoints_ground.shape[0], 3])
    imgPoints_ground = imgPoints_ground.reshape([imgPoints_ground.shape[0], 2])
    objPoints_panel = objPoints_panel.reshape([objPoints_panel.shape[0], 3])
    imgPoints_panel = imgPoints_panel.reshape([imgPoints_panel.shape[0], 2])

    #Save pose of marker boards after the iterations end(while in the final hovering position above nest)
    #Get pose of both ground and panel markers from detected corners
    retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(
        objPoints_ground, imgPoints_ground, CamParam.camMatrix,
        CamParam.distCoeff)
    Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
    retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(
        objPoints_panel, imgPoints_panel, CamParam.camMatrix,
        CamParam.distCoeff)
    Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)

    print "============== Observed Pose difference in nest position"
    observed_tvec_difference = tvec_ground - tvec_panel
    observed_rvec_difference = rvec_ground - rvec_panel
    print "tvec difference: ", observed_tvec_difference
    print "rvec differnece: ", observed_rvec_difference

    #Load ideal pose differnece information from file
    loaded_rvec_difference = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['rvec_difference']
    loaded_tvec_difference = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['tvec_difference']

    print "============== Ideal Pose difference in nest position"
    print "tvec difference: ", loaded_tvec_difference
    print "rvec differnece: ", loaded_rvec_difference

    print "============== Difference in pose difference in nest position"
    tvec_err = loaded_tvec_difference - observed_tvec_difference
    rvec_err = loaded_rvec_difference - observed_rvec_difference
    rospy.loginfo("tvec difference: %f, %f, %f", tvec_err[0], tvec_err[1],
                  tvec_err[2])
    rospy.loginfo("rvec difference: %f, %f, %f", rvec_err[0], rvec_err[1],
                  rvec_err[2])
    print "rvec differnece: ", tvec_err
    print "rvec differnece: ", rvec_err

    #Saving pose information to file
    filename_pose2 = "/home/rpi-cats/Desktop/DJ/Code/Data/In_Nest_Pose_" + str(
        t1) + ".mat"
    scipy.io.savemat(filename_pose2,
                     mdict={
                         'tvec_ground_In_Nest': tvec_ground,
                         'tvec_panel_In_Nest': tvec_panel,
                         'Rca_ground_In_Nest': Rca_ground,
                         'Rca_panel_In_Nest': Rca_panel,
                         'tvec_difference_In_Nest': tvec_ground - tvec_panel,
                         'rvec_difference_In_Nest': rvec_ground - rvec_panel,
                         'loaded_tvec_difference': loaded_tvec_difference,
                         'loaded_rvec_difference': loaded_rvec_difference,
                         'observed_tvec_difference': observed_tvec_difference,
                         'observed_rvec_difference': observed_rvec_difference
                     })
    controller_commander.set_controller_mode(
        controller_commander.MODE_AUTO_TRAJECTORY, 0.7, [])
    #    raw_input("confirm release vacuum")
    rapid_node.set_digital_io("Vacuum_enable", 0)
    g = ProcessStepGoal('plan_place_set_second_step', "")

    process_client.send_goal(g)

    #self.in_process=True

    print process_client.get_result()
    print "VACUUM OFF!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
    time.sleep(0.5)

    tran = np.array([2.17209718963, -1.13702651252, 0.224273328841])
    rot = rox.q2R(
        [0.0145063655084, -0.0282932350764, 0.999322921073, -0.0185137145776])
    pose_target2 = rox.Transform(rot, tran)

    Cur_Pose = controller_commander.get_current_pose_msg()
    rot_current = [
        Cur_Pose.pose.orientation.w, Cur_Pose.pose.orientation.x,
        Cur_Pose.pose.orientation.y, Cur_Pose.pose.orientation.z
    ]
    trans_current = [
        Cur_Pose.pose.position.x, Cur_Pose.pose.position.y,
        Cur_Pose.pose.position.z
    ]
    pose_target2.R = rox.q2R(
        [rot_current[0], rot_current[1], rot_current[2], rot_current[3]])
    pose_target2.p = trans_current
    pose_target2.p[2] += 0.5
    #
    #
    #    #print 'Target:',pose_target3
    #
    #    #print "============ Executing plan4"
    controller_commander.compute_cartesian_path_and_move(
        pose_target2, avoid_collisions=False)

    t2 = time.time()
    print 'Execution Finished.'
    print "Execution time: " + str(t2 - t1) + " seconds"
    s = ProcessState()
    s.state = "place_set"
    s.payload = "leeward_mid_panel"
    s.target = ""
    process_state_pub.publish(s)
def main():

    #Start timer to measure execution time
    t1 = time.time()

    #Subscribe to controller_state
    rospy.Subscriber("controller_state", controllerstate, callback)
    last_ros_image_stamp = rospy.Time(0)

    #Force-torque
    if not "disable-ft" in sys.argv:
        ft_threshold1 = ft_threshold
    else:
        ft_threshold1 = []

    #Initialize ros node of this process
    rospy.init_node('Placement_DJ_1', anonymous=True)

    #print "============ Starting setup"

    listener = PayloadTransformListener()
    rapid_node = rapid_node_pkg.RAPIDCommander()
    controller_commander = controller_commander_pkg.ControllerCommander()

    object_commander = ObjectRecognitionCommander()
    robot = urdf.robot_from_parameter_server()

    #subscribe to Gripper camera node for image acquisition
    ros_gripper_2_img_sub = rospy.Subscriber(
        '/gripper_camera_2/image', Image,
        object_commander.ros_raw_gripper_2_image_cb)
    ros_gripper_2_trigger = rospy.ServiceProxy(
        '/gripper_camera_2/continuous_trigger', CameraTrigger)

    #Set controller command mode
    controller_commander.set_controller_mode(
        controller_commander.MODE_AUTO_TRAJECTORY, 0.4, ft_threshold_place, [])
    time.sleep(0.5)

    #Set Location above the panel where the end effector goes first (in the world/base frame) Ideal location of panel.
    tran = np.array([2.20120663258, 1.2145882986, 0.0798989466944])
    rot = np.array([[-0.9971185, 0.0071821, 0.0755188],
                    [0.0056502, 0.9997743, -0.0204797],
                    [-0.0756488, -0.0199939, -0.9969341]])
    pose_target2 = rox.Transform(rot, tran)
    pose_target3 = copy.deepcopy(pose_target2)
    pose_target2.p[2] += 0.35  #20 cm above ideal location of panel

    #Execute movement to set location
    print "Executing initial path"
    controller_commander.compute_cartesian_path_and_move(
        pose_target2, avoid_collisions=False)

    #Initilialize aruco boards and parameters
    aruco_dict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_ARUCO_ORIGINAL)
    parameters = cv2.aruco.DetectorParameters_create()
    parameters.cornerRefinementMethod = cv2.aruco.CORNER_REFINE_SUBPIX
    board_ground = cv2.aruco.GridBoard_create(5, 4, 0.04, 0.01, aruco_dict, 20)
    board_panel = cv2.aruco.GridBoard_create(8, 3, 0.025, 0.0075, aruco_dict,
                                             50)
    #    tag_ids=["vacuum_gripper_marker_1","leeward_mid_panel_marker_1", "aligner_ref_1", "aligner_ref_2", "aligner_ref_3", "aligner_ref_4"]
    #    boards=[gripper_board, panel_board, ref1, ref2, ref3, ref4]

    #Initialize camera intrinsic parameters #18285636_10_05_2018_5_params
    CamParam = CameraParams(2342.561249990927, 1209.151959040735,
                            2338.448312671424, 1055.254852652610, 1.0,
                            -0.014840837133389, -0.021008029929566,
                            3.320024219653553e-04, -0.002187550225028,
                            -0.025059986937316)

    #Subscribe tp controller state. Again?
    rospy.Subscriber("controller_state", controllerstate, callback)

    UV = np.zeros([74, 8])
    P = np.zeros([74, 3])

    #Load object points ground tag in panel tag coordinate system from mat file
    loaded_object_points_ground_in_panel_system_stage_1 = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat'
    )['object_points_ground_in_panel_tag_system']
    loaded_object_points_ground_in_panel_system_stage_2 = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['object_points_ground_in_panel_tag_system']
    loaded_object_points_ground_in_panel_system = loaded_object_points_ground_in_panel_system_stage_1

    #focal length in pixel units, this number is averaged values from f_hat for x and y
    f_hat_u = 2342.561249990927  #2282.523358266698#2281.339593273153 #2446.88
    f_hat_v = 2338.448312671424  #2280.155828279608
    #functions like a gain, used with velocity to track position
    dt = 0.1
    du = 100.0
    dv = 100.0
    dutmp = 100.0
    dvtmp = 100.0
    TimeGain = [0.1, 0.1, 0.1]

    du_array = []
    dv_array = []
    dx_array = []
    iteration = 0
    stage = 1
    step_ts = 0.004

    Kc = 0.0002
    time_save = []
    FTdata_save = []
    Tran_z = np.array([[0, 0, -1], [0, -1, 0], [1, 0, 0]])
    Vec_wrench = 100 * np.array([
        0.019296738361905, 0.056232033265447, 0.088644197659430,
        0.620524934626544, -0.517896661195076, 0.279323567303444,
        -0.059640563813256, 0.631460085138371, -0.151143175570223,
        -6.018321330845553
    ]).transpose()

    FTdata_0 = FTdata
    T = listener.lookupTransform("base", "link_6", rospy.Time(0))
    rg = 9.8 * np.matmul(
        np.matmul(T.R, Tran_z).transpose(),
        np.array([0, 0, 1]).transpose())
    A1 = np.hstack([
        rox.hat(rg).transpose(),
        np.zeros([3, 1]),
        np.eye(3),
        np.zeros([3, 3])
    ])
    A2 = np.hstack(
        [np.zeros([3, 3]),
         rg.reshape([3, 1]),
         np.zeros([3, 3]),
         np.eye(3)])
    A = np.vstack([A1, A2])
    FTdata_0est = np.matmul(A, Vec_wrench)
    FTread_array = []
    FTdata_array = []
    t_now_array = []

    controller_commander.set_controller_mode(
        controller_commander.MODE_AUTO_TRAJECTORY, 1.0, [], [])
    #while (pose_target2.p[2] > 0.185):
    #    while ((du>10) | (dv>10) | (pose_target2.p[2] > 0.172)):    #try changing du and dv to lower values(number of iterations increases)
    while (
        (du > 1) | (dv > 1) and (iteration < 125)
    ):  #try changing du and dv to lower values(number of iterations increases)
        #    while ((np.max(np.abs(dutmp))>0.5) | (np.max(np.abs(dvtmp))>0.5)):    #try changing du and dv to lower values(number of iterations increases)
        iteration += 1
        t_now_array.append(time.time())

        #Go to stage2 of movement(mostly downward z motion)
        if ((du < 2) and (dv < 2) and (stage == 1)):
            #Save pose of marker boards after the iterations end(while in the final hovering position above nest)
            #Get pose of both ground and panel markers from detected corners
            retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(
                objPoints_ground, imgPoints_ground, CamParam.camMatrix,
                CamParam.distCoeff)
            Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
            retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(
                objPoints_panel, imgPoints_panel, CamParam.camMatrix,
                CamParam.distCoeff)
            Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)

            print "============== Observed Pose difference in hovering position"
            observed_tvec_difference = tvec_ground - tvec_panel
            observed_rvec_difference = rvec_ground - rvec_panel
            print "tvec difference: ", observed_tvec_difference
            print "rvec differnece: ", observed_rvec_difference

            #Load ideal pose differnece information from file
            loaded_rvec_difference = loadmat(
                '/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat'
            )['rvec_difference']
            loaded_tvec_difference = loadmat(
                '/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat'
            )['tvec_difference']

            print "============== Ideal Pose difference in hovering position"
            print "tvec difference: ", loaded_tvec_difference
            print "rvec differnece: ", loaded_rvec_difference

            tvec_difference_Above_Nest = loaded_tvec_difference - observed_tvec_difference
            rvec_difference_Above_Nest = loaded_rvec_difference - observed_rvec_difference

            print "============== Difference in pose difference in hovering position"
            print "tvec difference: ", tvec_difference_Above_Nest
            print "rvec differnece: ", rvec_difference_Above_Nest

            #Saving pose information to file
            filename_pose1 = "/home/rpi-cats/Desktop/DJ/Code/Data/Above_Nest_Pose_" + str(
                t1) + ".mat"
            scipy.io.savemat(filename_pose1,
                             mdict={
                                 'tvec_ground_Above_Nest':
                                 tvec_ground,
                                 'tvec_panel_Above_Nest':
                                 tvec_panel,
                                 'Rca_ground_Above_Nest':
                                 Rca_ground,
                                 'Rca_panel_Above_Nest':
                                 Rca_panel,
                                 'tvec_difference_Above_Nest':
                                 tvec_ground - tvec_panel,
                                 'rvec_difference_Above_Nest':
                                 rvec_ground - rvec_panel,
                                 'loaded_tvec_difference':
                                 loaded_tvec_difference,
                                 'loaded_rvec_difference':
                                 loaded_rvec_difference,
                                 'observed_tvec_difference':
                                 observed_tvec_difference,
                                 'observed_rvec_difference':
                                 observed_rvec_difference
                             })

            raw_input("Confirm Stage 2")
            stage = 2
            #			dt=0.02
            loaded_object_points_ground_in_panel_system = loaded_object_points_ground_in_panel_system_stage_2
        #print pose_target2.p[2]

        #Print current robot pose at the beginning of this iteration
        Cur_Pose = controller_commander.get_current_pose_msg()
        print "============ Current Robot Pose"
        print Cur_Pose

        #Read new image
        last_ros_image_stamp = object_commander.ros_image_stamp
        try:
            ros_gripper_2_trigger.wait_for_service(timeout=0.1)
            ros_gripper_2_trigger(False)
        except:
            pass
        wait_count = 0
        while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp:
            if wait_count > 50:
                raise Exception("Image receive timeout")
            time.sleep(0.25)
            wait_count += 1
        result = object_commander.ros_image
        #Save
        #        filename = "Acquisition3_%d.jpg" % (time.time())
        #        scipy.misc.imsave(filename, result)
        #Display
        #        cv2.namedWindow('Aqcuired Image',cv2.WINDOW_NORMAL)
        #        cv2.imshow('Acquired Image',result)
        #        cv2.waitKey(1)

        #Detect tag corners in aqcuired image using aruco
        corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
            result, aruco_dict, parameters=parameters)
        frame_with_markers_and_axis = result

        #Sort corners and ids according to ascending order of ids
        corners_original = copy.deepcopy(corners)
        ids_original = np.copy(ids)
        sorting_indices = np.argsort(ids_original, 0)
        ids_sorted = ids_original[sorting_indices]
        ids_sorted = ids_sorted.reshape([len(ids_original), 1])
        combined_list = zip(np.ndarray.tolist(ids.flatten()), corners_original)
        #        print "combined_list:",combined_list
        combined_list.sort()
        corners_sorted = [x for y, x in combined_list]
        ids = np.copy(ids_sorted)
        corners = copy.deepcopy(corners_sorted)

        #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image)
        false_ids_ind = np.where(ids > 73)
        mask = np.ones(ids.shape, dtype=bool)
        mask[false_ids_ind] = False
        ids = ids[mask]
        corners = np.array(corners)
        corners = corners[mask.flatten(), :]
        corners = list(corners)

        #Define object and image points of both tags
        objPoints_ground, imgPoints_ground = aruco.getBoardObjectAndImagePoints(
            board_ground, corners, ids)
        objPoints_panel, imgPoints_panel = aruco.getBoardObjectAndImagePoints(
            board_panel, corners, ids)
        objPoints_ground = objPoints_ground.reshape(
            [objPoints_ground.shape[0], 3])
        imgPoints_ground = imgPoints_ground.reshape(
            [imgPoints_ground.shape[0], 2])
        objPoints_panel = objPoints_panel.reshape(
            [objPoints_panel.shape[0], 3])
        imgPoints_panel = imgPoints_panel.reshape(
            [imgPoints_panel.shape[0], 2])

        #Get pose of both ground and panel markers from detected corners
        retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(
            objPoints_ground, imgPoints_ground, CamParam.camMatrix,
            CamParam.distCoeff)
        Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
        retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(
            objPoints_panel, imgPoints_panel, CamParam.camMatrix,
            CamParam.distCoeff)
        Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)

        frame_with_markers_and_axis = cv2.aruco.drawAxis(
            frame_with_markers_and_axis, CamParam.camMatrix,
            CamParam.distCoeff, Rca_ground, tvec_ground, 0.2)
        frame_with_markers_and_axis = cv2.aruco.drawAxis(
            frame_with_markers_and_axis, CamParam.camMatrix,
            CamParam.distCoeff, Rca_panel, tvec_panel, 0.2)

        rvec_all_markers_ground, tvec_all_markers_ground, _ = aruco.estimatePoseSingleMarkers(
            corners[0:20], 0.04, CamParam.camMatrix, CamParam.distCoeff)
        rvec_all_markers_panel, tvec_all_markers_panel, _ = aruco.estimatePoseSingleMarkers(
            corners[20:45], 0.025, CamParam.camMatrix, CamParam.distCoeff)
        tvec_all = np.concatenate(
            (tvec_all_markers_ground, tvec_all_markers_panel), axis=0)
        for i_ids, i_corners, i_tvec in zip(ids, corners, tvec_all):
            #print 'i_corners',i_corners,i_corners.reshape([1,8])
            UV[i_ids, :] = i_corners.reshape(
                [1, 8])  #np.average(i_corners, axis=1)
            P[i_ids, :] = i_tvec

        #used to find the height of the tags and the delta change of height, z height at desired position
        Z = 1 * P[20:40,
                  2]  #- [0.68184539, 0.68560932, 0.68966803, 0.69619578])

        #check if all tags detected
        if retVal_ground != 0 and retVal_panel != 0:
            dutmp = []
            dvtmp = []

            #Pixel estimates of the ideal ground tag location
            reprojected_imagePoints_ground_2, jacobian2 = cv2.projectPoints(
                loaded_object_points_ground_in_panel_system.transpose(),
                rvec_panel, tvec_panel, CamParam.camMatrix, CamParam.distCoeff)
            reprojected_imagePoints_ground_2 = reprojected_imagePoints_ground_2.reshape(
                [reprojected_imagePoints_ground_2.shape[0], 2])
            #            print "Image Points Ground:", imgPoints_ground
            #            print "Reprojected Image Points Ground2:", reprojected_imagePoints_ground_2
            #            print "Reprojectoin error:",imgPoints_ground-reprojected_imagePoints_ground_2
            #            print "Average Reprojectoin error: ",np.mean(imgPoints_ground-reprojected_imagePoints_ground_2, axis=0)

            #print "Reprojected Image Points Ground2 type:", type(reprojected_imagePoints_ground_2)

            #plot image points for ground tag from corner detection and from re-projections
            for point1, point2 in zip(
                    imgPoints_ground,
                    np.float32(reprojected_imagePoints_ground_2)):
                cv2.circle(frame_with_markers_and_axis, tuple(point1), 5,
                           (0, 0, 255), 3)
                cv2.circle(frame_with_markers_and_axis, tuple(point2), 5,
                           (255, 0, 0), 3)

#            cv2.namedWindow('Aqcuired Image',cv2.WINDOW_NORMAL)
#            cv2.imshow('Acquired Image',frame_with_markers_and_axis)
#            cv2.waitKey(1)
#Save
            filename_image = "/home/rpi-cats/Desktop/DJ/Code/Images/Acquisition_" + str(
                t1) + "_" + str(iteration) + ".jpg"
            scipy.misc.imsave(filename_image, frame_with_markers_and_axis)

            #Go through a particular point in all tags to build the complete Jacobian
            for ic in range(4):
                #uses first set of tags, numbers used to offset camera frame, come from camera parameters
                #UV_target = np.vstack([UV[9:13,2*ic]-1209.151959040735,UV[9:13,2*ic+1]-1055.254852652610]).T - UV_shift[:,(2*ic):(2*ic+2)]		#shift corner estimates to the image centers. Necessary for the image jacobian to work.
                reprojected_imagePoints_ground_2 = np.reshape(
                    reprojected_imagePoints_ground_2, (20, 8))
                UV_target = np.vstack([
                    reprojected_imagePoints_ground_2[:, 2 * ic] -
                    1209.151959040735,
                    reprojected_imagePoints_ground_2[:, 2 * ic + 1] -
                    1055.254852652610
                ]).T
                uc = UV_target[:, 0]
                vc = UV_target[:, 1]

                #                print 'UV_target', UV_target
                UV_current = np.vstack([
                    UV[20:40, 2 * ic] - 1209.151959040735,
                    UV[20:40, 2 * ic + 1] - 1055.254852652610
                ]).T
                #find difference between current and desired tag difference
                delta_UV = UV_target - UV_current
                #                print 'delta_UV: ',ic, delta_UV
                dutmp.append(np.mean(delta_UV[:, 0]))
                dvtmp.append(np.mean(delta_UV[:, 1]))
                for tag_i in range(20):
                    if tag_i == 0:
                        J_cam_tmp = 1.0 * np.array(
                            [[
                                -f_hat_u / Z[tag_i], 0.0, uc[tag_i] / Z[tag_i],
                                uc[tag_i] * vc[tag_i] / f_hat_u, -1.0 *
                                (uc[tag_i] * uc[tag_i] / f_hat_u + f_hat_u),
                                vc[tag_i]
                            ],
                             [
                                 0.0, -f_hat_v / Z[tag_i], vc[tag_i] /
                                 Z[tag_i], vc[tag_i] * vc[tag_i] / f_hat_v +
                                 f_hat_v, -1.0 * uc[tag_i] * vc[tag_i] /
                                 f_hat_v, -uc[tag_i]
                             ]])
                    else:
                        J_cam_tmp = np.concatenate(
                            (J_cam_tmp,
                             1.0 * np.array(
                                 [[
                                     -f_hat_u / Z[tag_i], 0.0,
                                     uc[tag_i] / Z[tag_i],
                                     uc[tag_i] * vc[tag_i] / f_hat_u, -1.0 *
                                     (uc[tag_i] * uc[tag_i] / f_hat_u +
                                      f_hat_u), vc[tag_i]
                                 ],
                                  [
                                      0.0, -f_hat_v / Z[tag_i], vc[tag_i] /
                                      Z[tag_i], vc[tag_i] * vc[tag_i] / f_hat_v
                                      + f_hat_v, -1.0 * uc[tag_i] * vc[tag_i] /
                                      f_hat_v, -uc[tag_i]
                                  ]])),
                            axis=0)
                #camera jacobian
                if ic == 0:
                    J_cam = J_cam_tmp
                    delta_UV_all = delta_UV.reshape(40, 1)
                    UV_target_all = UV_target.reshape(40, 1)
                else:
                    J_cam = np.vstack([J_cam, J_cam_tmp])
                    delta_UV_all = np.vstack(
                        [delta_UV_all, delta_UV.reshape(40, 1)])
                    UV_target_all = np.vstack(
                        [UV_target_all,
                         UV_target.reshape(40, 1)])

            print 'dutmp: ', dutmp
            print 'dvtmp: ', dvtmp
            du = np.mean(np.absolute(dutmp))
            dv = np.mean(np.absolute(dvtmp))
            print 'Average du of all points:', du, 'Average dv of all points:', dv
            du_array.append(du)
            dv_array.append(dv)
            #            print 'delta_UV_all',delta_UV_all
            dx = np.matmul(np.linalg.pinv(J_cam), np.array(delta_UV_all))
            dx_array.append(dx)
            #            print 'dx:',dx             #translational and angular velocity
            #print '1:',dx[0:3,0]
            #print '2:',np.hstack([0,-dx[0:3,0]])
            #print '3:',np.hstack([dx[0:3,0].reshape(3,1),rox.hat(np.array(dx[0:3,0]))])
            #coordinate system change, replace with tf transform
            #dx_w = np.array([dx[3,0],-dx[4,0],-dx[5,0]])        #angular velocity
            #map omega to current quaternion
            #Omega = 0.5*np.vstack([np.hstack([0,-dx_w]),np.hstack([dx_w.reshape(3,1),rox.hat(np.array(dx_w))])])
            #print Omega
            #rot_current = [Cur_Pose.pose.orientation.w, Cur_Pose.pose.orientation.x,Cur_Pose.pose.orientation.y,Cur_Pose.pose.orientation.z]
            #rot = np.matmul(expm(Omega*dt),np.array(rot_current))
            #trans_current = [Cur_Pose.pose.position.x,Cur_Pose.pose.position.y,Cur_Pose.pose.position.z]
            #trans = trans_current + np.array([dx[0,0],-dx[1,0],-dx[2,0]])*TimeGain

            #pose_target2 = rox.Transform(rox.q2R([rot[0], rot[1], rot[2], rot[3]]), trans)
            #Vz=0
            dx = np.array([
                dx[3, 0], -dx[4, 0], -dx[5, 0], dx[0, 0], -dx[1, 0], -dx[2, 0]
            ])

            if stage == 2:
                T = listener.lookupTransform("base", "link_6", rospy.Time(0))
                rg = 9.8 * np.matmul(
                    np.matmul(T.R, Tran_z).transpose(),
                    np.array([0, 0, 1]).transpose())
                A1 = np.hstack([
                    rox.hat(rg).transpose(),
                    np.zeros([3, 1]),
                    np.eye(3),
                    np.zeros([3, 3])
                ])
                A2 = np.hstack([
                    np.zeros([3, 3]),
                    rg.reshape([3, 1]),
                    np.zeros([3, 3]),
                    np.eye(3)
                ])
                A = np.vstack([A1, A2])
                FTdata_est = np.matmul(A, Vec_wrench)
                FTread = FTdata - FTdata_0 - FTdata_est + FTdata_0est
                print 'FTread:', FTread

                print 'Z', FTread[-1]
                if FTread[-1] > -100:
                    F_d = -150
                    Kc = 0.0001
                else:
                    Kc = 0.0001
                    F_d = -200

                Vz = Kc * (F_d - FTread[-1])
                print 'Vz', Vz
                dx[-1] = Vz + dx[-1]
                FTread_array.append(FTread)
                FTdata_array.append(FTdata)

            current_joint_angles = controller_commander.get_current_joint_values(
            )
            J = rox.robotjacobian(robot, current_joint_angles)
            joints_vel = np.linalg.pinv(J).dot(np.array(dx))

            print 'vel_norm:', np.linalg.norm(joints_vel)

            t_duration = np.log(np.linalg.norm(joints_vel) * 10 + 1) + 0.1
            if t_duration < 0.1:
                t_duration = 0.1

            goal = trapezoid_gen(
                np.array(current_joint_angles) + joints_vel.dot(dt),
                np.array(current_joint_angles), t_duration)
            '''
            plan=RobotTrajectory()    
            plan.joint_trajectory.joint_names=['joint_1', 'joint_2', 'joint_3', 'joint_4', 'joint_5', 'joint_6']
        
            plan.joint_trajectory.header.frame_id='/world'
            p1=JointTrajectoryPoint()
            p1.positions = current_joint_angles
            p1.velocities = np.zeros((6,))
            p1.accelerations = np.zeros((6,))
            p1.time_from_start = rospy.Duration(0)     
                 
            p2=JointTrajectoryPoint()
            p2.positions = np.array(p1.positions) + joints_vel.dot(dt)
            p2.velocities = np.zeros((6,))
            p2.accelerations = np.zeros((6,))
            p2.time_from_start = rospy.Duration(dt)

            controller_commander.set_controller_mode(controller_commander.MODE_AUTO_TRAJECTORY, 1.0, [], [])

        
            plan.joint_trajectory.points.append(p1)
            plan.joint_trajectory.points.append(p2)
            '''

            client = actionlib.SimpleActionClient("joint_trajectory_action",
                                                  FollowJointTrajectoryAction)
            client.wait_for_server()

            client.send_goal(goal)
            client.wait_for_result()
            res = client.get_result()
            if (res.error_code != 0):
                raise Exception("Trajectory execution returned error")

            print res

            #break
            #raw_input("confirm move...")
            '''
            print "============ Executing Current Iteration movement"
            try:
                controller_commander.execute(plan)
                #controller_commander.compute_cartesian_path_and_move(pose_target2, avoid_collisions=False)
            except:
                pass
            '''

            print 'Current Iteration Finished.'


#            filename = "delta_UV_all.txt"
#            f_handle = file(filename, 'a')
#            np.savetxt(f_handle, delta_UV_all)
#            f_handle.close()
#
#            filename = "UV_target_all.txt"
#            f_handle = file(filename, 'a')
#            np.savetxt(f_handle, UV_target_all)
#            f_handle.close()
#
#            filename = "P.txt"
#            f_handle = file(filename, 'a')
#            np.savetxt(f_handle, P)
#            f_handle.close()
#
#
#            filename = "Robot.txt"
#            f_handle = file(filename, 'a')
#            np.savetxt(f_handle, np.hstack([np.array(trans_current),np.array(rot_current)]))
#            f_handle.close()

#Saving iteration data to file
    filename_data = "/home/rpi-cats/Desktop/DJ/Code/Data/Data_" + str(
        t1) + ".mat"
    scipy.io.savemat(filename_data,
                     mdict={
                         'du_array': du_array,
                         'dv_array': dv_array,
                         'dx_array': dx_array
                     })

    #Saving force data to file
    filename_force_data = "/home/rpi-cats/Desktop/DJ/Code/Data/Data_force_" + str(
        t1) + ".mat"
    scipy.io.savemat(filename_force_data,
                     mdict={
                         'FTread': FTread_array,
                         'FTdata': FTdata_array,
                         't_now_array': t_now_array
                     })

    #    ####Hovering error calculation
    #    #Read new image
    #    last_ros_image_stamp = object_commander.ros_image_stamp
    #    try:
    #        ros_gripper_2_trigger.wait_for_service(timeout=0.1)
    #        ros_gripper_2_trigger(False)
    #    except:
    #        pass
    #    wait_count=0
    #    while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp:
    #        if wait_count > 50:
    #            raise Exception("Image receive timeout")
    #        time.sleep(0.25)
    #        wait_count += 1
    #    result = object_commander.ros_image
    #
    #    #Detect tag corners in aqcuired image using aruco
    #    corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(result, aruco_dict, parameters=parameters)
    #
    #    #Sort corners and ids according to ascending order of ids
    #    corners_original=copy.deepcopy(corners)
    #    ids_original=np.copy(ids)
    #    sorting_indices=np.argsort(ids_original,0)
    #    ids_sorted=ids_original[sorting_indices]
    #    ids_sorted=ids_sorted.reshape([len(ids_original),1])
    #    combined_list=zip(np.ndarray.tolist(ids.flatten()),corners_original)
    #    combined_list.sort()
    #    corners_sorted=[x for y,x in combined_list]
    #    ids=np.copy(ids_sorted)
    #    corners=copy.deepcopy(corners_sorted)
    #
    #    #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image)
    #    false_ids_ind = np.where(ids>73)
    #    mask = np.ones(ids.shape, dtype=bool)
    #    mask[false_ids_ind] = False
    #    ids = ids[mask]
    #    corners = np.array(corners)
    #    corners = corners[mask.flatten(),:]
    #    corners = list(corners)
    #
    #    #Define object and image points of both tags
    #    objPoints_ground, imgPoints_ground	=	aruco.getBoardObjectAndImagePoints(board_ground, corners, ids)
    #    objPoints_panel, imgPoints_panel	=	aruco.getBoardObjectAndImagePoints(board_panel, corners, ids)
    #    objPoints_ground=objPoints_ground.reshape([objPoints_ground.shape[0],3])
    #    imgPoints_ground=imgPoints_ground.reshape([imgPoints_ground.shape[0],2])
    #    objPoints_panel=objPoints_panel.reshape([objPoints_panel.shape[0],3])
    #    imgPoints_panel=imgPoints_panel.reshape([imgPoints_panel.shape[0],2])
    #
    #    #Save pose of marker boards after the iterations end(while in the final hovering position above nest)
    #    #Get pose of both ground and panel markers from detected corners
    #    retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(objPoints_ground, imgPoints_ground, CamParam.camMatrix, CamParam.distCoeff)
    #    Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
    #    retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(objPoints_panel, imgPoints_panel, CamParam.camMatrix, CamParam.distCoeff)
    #    Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)
    #
    #    print"============== Observed Pose difference in hovering position"
    #    observed_tvec_difference=tvec_ground-tvec_panel
    #    observed_rvec_difference=rvec_ground-rvec_panel
    #    print "tvec difference: ",observed_tvec_difference
    #    print "rvec differnece: ",observed_rvec_difference
    #
    #    #Load ideal pose differnece information from file
    #    loaded_rvec_difference = loadmat('/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat')['rvec_difference']
    #    loaded_tvec_difference = loadmat('/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat')['tvec_difference']
    #
    #    print"============== Ideal Pose difference in hovering position"
    #    print "tvec difference: ",loaded_tvec_difference
    #    print "rvec differnece: ",loaded_rvec_difference
    #
    #    print"============== Difference in pose difference in hovering position"
    #    print "tvec difference: ",loaded_tvec_difference-observed_tvec_difference
    #    print "rvec differnece: ",loaded_rvec_difference-observed_rvec_difference
    #
    #    #Saving pose information to file
    #    filename_pose1 = "/home/armabb6640/Desktop/DJ/Code/Data/Above_Nest_Pose_"+str(t1)+".mat"
    #    scipy.io.savemat(filename_pose1, mdict={'tvec_ground_Above_Nest':tvec_ground, 'tvec_panel_Above_Nest':tvec_panel,
    #    'Rca_ground_Above_Nest':Rca_ground, 'Rca_panel_Above_Nest': Rca_panel, 'tvec_difference_Above_Nest': tvec_ground-tvec_panel, 'rvec_difference_Above_Nest': rvec_ground-rvec_panel})

    print "============ Final Push Down to Nest"
    # while (1):
    #DJ Final push from hovering above nest into resting in the nest
    controller_commander.set_controller_mode(
        controller_commander.MODE_AUTO_TRAJECTORY, 0.2, [], [])
    #    pose_target2.p[2] = 0.115
    #    pose_target2.p[2] = 0.15

    Cur_Pose = controller_commander.get_current_pose_msg()
    rot_current = [
        Cur_Pose.pose.orientation.w, Cur_Pose.pose.orientation.x,
        Cur_Pose.pose.orientation.y, Cur_Pose.pose.orientation.z
    ]
    trans_current = [
        Cur_Pose.pose.position.x, Cur_Pose.pose.position.y,
        Cur_Pose.pose.position.z
    ]
    pose_target2.R = rox.q2R(
        [rot_current[0], rot_current[1], rot_current[2], rot_current[3]])
    pose_target2.p = trans_current
    pose_target2.p[2] -= 0.11
    #    pose_target2.p[0] += 0.005
    #    pose_target2.p[1] -= 0.002

    #    controller_commander.compute_cartesian_path_and_move(pose_target2, avoid_collisions=False)

    #### Final Nest Placement Error Calculation ===============================
    #Read new image
    last_ros_image_stamp = object_commander.ros_image_stamp
    try:
        ros_gripper_2_trigger.wait_for_service(timeout=0.1)
        ros_gripper_2_trigger(False)
    except:
        pass
    wait_count = 0
    while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp:
        if wait_count > 50:
            raise Exception("Image receive timeout")
        time.sleep(0.25)
        wait_count += 1
    result = object_commander.ros_image

    #Detect tag corners in aqcuired image using aruco
    corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
        result, aruco_dict, parameters=parameters)

    #Sort corners and ids according to ascending order of ids
    corners_original = copy.deepcopy(corners)
    ids_original = np.copy(ids)
    sorting_indices = np.argsort(ids_original, 0)
    ids_sorted = ids_original[sorting_indices]
    ids_sorted = ids_sorted.reshape([len(ids_original), 1])
    combined_list = zip(np.ndarray.tolist(ids.flatten()), corners_original)
    combined_list.sort()
    corners_sorted = [x for y, x in combined_list]
    ids = np.copy(ids_sorted)
    corners = copy.deepcopy(corners_sorted)

    #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image)
    false_ids_ind = np.where(ids > 73)
    mask = np.ones(ids.shape, dtype=bool)
    mask[false_ids_ind] = False
    ids = ids[mask]
    corners = np.array(corners)
    corners = corners[mask.flatten(), :]
    corners = list(corners)

    #Define object and image points of both tags
    objPoints_ground, imgPoints_ground = aruco.getBoardObjectAndImagePoints(
        board_ground, corners, ids)
    objPoints_panel, imgPoints_panel = aruco.getBoardObjectAndImagePoints(
        board_panel, corners, ids)
    objPoints_ground = objPoints_ground.reshape([objPoints_ground.shape[0], 3])
    imgPoints_ground = imgPoints_ground.reshape([imgPoints_ground.shape[0], 2])
    objPoints_panel = objPoints_panel.reshape([objPoints_panel.shape[0], 3])
    imgPoints_panel = imgPoints_panel.reshape([imgPoints_panel.shape[0], 2])

    #Save pose of marker boards after the iterations end(while in the final hovering position above nest)
    #Get pose of both ground and panel markers from detected corners
    retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(
        objPoints_ground, imgPoints_ground, CamParam.camMatrix,
        CamParam.distCoeff)
    Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
    retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(
        objPoints_panel, imgPoints_panel, CamParam.camMatrix,
        CamParam.distCoeff)
    Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)

    print "============== Observed Pose difference in nest position"
    observed_tvec_difference = tvec_ground - tvec_panel
    observed_rvec_difference = rvec_ground - rvec_panel
    print "tvec difference: ", observed_tvec_difference
    print "rvec differnece: ", observed_rvec_difference

    #Load ideal pose differnece information from file
    loaded_rvec_difference = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['rvec_difference']
    loaded_tvec_difference = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['tvec_difference']

    print "============== Ideal Pose difference in nest position"
    print "tvec difference: ", loaded_tvec_difference
    print "rvec differnece: ", loaded_rvec_difference

    print "============== Difference in pose difference in nest position"
    print "tvec difference: ", loaded_tvec_difference - observed_tvec_difference
    print "rvec differnece: ", loaded_rvec_difference - observed_rvec_difference

    #Saving pose information to file
    filename_pose2 = "/home/rpi-cats/Desktop/DJ/Code/Data/In_Nest_Pose_" + str(
        t1) + ".mat"
    scipy.io.savemat(filename_pose2,
                     mdict={
                         'tvec_ground_In_Nest': tvec_ground,
                         'tvec_panel_In_Nest': tvec_panel,
                         'Rca_ground_In_Nest': Rca_ground,
                         'Rca_panel_In_Nest': Rca_panel,
                         'tvec_difference_In_Nest': tvec_ground - tvec_panel,
                         'rvec_difference_In_Nest': rvec_ground - rvec_panel,
                         'loaded_tvec_difference': loaded_tvec_difference,
                         'loaded_rvec_difference': loaded_rvec_difference,
                         'observed_tvec_difference': observed_tvec_difference,
                         'observed_rvec_difference': observed_rvec_difference
                     })

    #    print "============ Lift gripper!"
    #    controller_commander.set_controller_mode(controller_commander.MODE_AUTO_TRAJECTORY, 0.7, [])
    #    raw_input("confirm release vacuum")
    #    #rapid_node.set_digital_io("Vacuum_enable", 0)
    #    print "VACUUM OFF!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
    #    time.sleep(0.5)
    #    pose_target3.p[2] += 0.2
    #
    #
    #    #print 'Target:',pose_target3
    #
    #    #print "============ Executing plan4"
    #    #controller_commander.compute_cartesian_path_and_move(pose_target3, avoid_collisions=False)

    t2 = time.time()
    print 'Execution Finished.'
    print "Execution time: " + str(t2 - t1) + " seconds"
def main():
            
    #Start timer to measure execution time        
    t1 = time.time()
    
    #Subscribe to controller_state 
    rospy.Subscriber("controller_state", controllerstate, callback)
    last_ros_image_stamp = rospy.Time(0)
    
    #Force-torque    
    if not "disable-ft" in sys.argv:
        ft_threshold1=ft_threshold
    else:
        ft_threshold1=[]
    
    #Initialize ros node of this process
    rospy.init_node('user_defined_motion', anonymous=True)
    
    #print "============ Starting setup"   
    
    listener = PayloadTransformListener()
    rapid_node = rapid_node_pkg.RAPIDCommander()
    controller_commander = controller_commander_pkg.ControllerCommander()

    object_commander=ObjectRecognitionCommander()
    robot = urdf.robot_from_parameter_server()   
  
    #subscribe to Gripper camera node for image acquisition     
    ros_gripper_2_img_sub=rospy.Subscriber('/gripper_camera_2/image', Image, object_commander.ros_raw_gripper_2_image_cb)
    ros_gripper_2_trigger=rospy.ServiceProxy('/gripper_camera_2/continuous_trigger', CameraTrigger)

    #Set controller command mode
    controller_commander.set_controller_mode(controller_commander.MODE_AUTO_TRAJECTORY, 0.4, ft_threshold_place,[])
    time.sleep(0.5)
    
    
    #Set Location above the panel where the end effector goes first (in the world/base frame) Ideal location of panel.
    current_joint_angles = controller_commander.get_current_joint_values()
    Cur_Pose = controller_commander.get_current_pose_msg()
    rot_o = [Cur_Pose.pose.orientation.w, Cur_Pose.pose.orientation.x,Cur_Pose.pose.orientation.y,Cur_Pose.pose.orientation.z]
    trans_o = np.array([Cur_Pose.pose.position.x,Cur_Pose.pose.position.y,Cur_Pose.pose.position.z])

    pos_command = []
    quat_command = []
    pos_read = []
    quat_read = []
    time_all = []


    for i in range(4):
        rot_set = rox.q2R(rot_o)
        tran_set = trans_o+np.array([0,0,i*0.25])


        pose_target2 = rox.Transform(rot_set, tran_set)
    
         
        #Execute movement to set location
        print "Executing initial path"
        controller_commander.compute_cartesian_path_and_move(pose_target2, avoid_collisions=False)
        time.sleep(1)
    
        Cur_Pose = controller_commander.get_current_pose_msg()
        rot_cur = [Cur_Pose.pose.orientation.w, Cur_Pose.pose.orientation.x,Cur_Pose.pose.orientation.y,Cur_Pose.pose.orientation.z]
        trans_cur = [Cur_Pose.pose.position.x,Cur_Pose.pose.position.y,Cur_Pose.pose.position.z]

        pos_command.append(tran_set)
        quat_command.append(rot_set)
        pos_read.append(trans_cur)
        quat_read.append(rot_cur)
        time_all.append(time.time())
    

    #Saving iteration data to file
    filename_data = "/home/rpi-cats/Desktop/YC/Data/Motion Capture/Data_"+str(t1)+".mat"
    scipy.io.savemat(filename_data, mdict={'pos_command':pos_command, 'quat_command':quat_command, 'pos_read':pos_read, 'quat_read':quat_read, 'time_all':time_all})
	

    t2 = time.time()
    print 'Execution Finished.'
    print "Execution time: " + str(t2-t1) + " seconds"