예제 #1
0
def estimatePoseBoard(corners, ids, board, cameraMatrix, distCoeffs, rvec=None, tvec=None, useExtrinsicGuess=False):
    assert len(corners) == len(ids)
    objPoints, imgPoints = aruco.getBoardObjectAndImagePoints(board, corners, ids)
    if objPoints is None or len(objPoints) == 0:
        return 0, None, None

    assert len(imgPoints) == len(objPoints)
    _, rvec, tvec = cv2.solvePnP(objPoints, imgPoints, cameraMatrix, distCoeffs, flags=cv2.SOLVEPNP_IPPE)
    return int(len(objPoints) / 4), rvec, tvec
예제 #2
0
def aruco():

    print("getting data from file")
    cam_matrix, dist_coefs, rvecs, tvecs = get_cam_matrix(
        "camera_matrix_aruco.yaml")

    # aruco data

    aruco_dict = aruco.Dictionary_get(aruco.DICT_ARUCO_ORIGINAL)
    parameters = aruco.DetectorParameters_create()

    corner_points = []
    count = 1
    img = vs.read()
    height, width, channels = img.shape
    out = cv2.VideoWriter('outpy1.avi',
                          cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 30,
                          (width, height))
    while True:
        count = 1

        img = get_frame(vs)
        gray = convert_gray(img)
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, aruco_dict, parameters=parameters)

        if ids is not None and corners is not None:
            # obj = aruco.drawDetectedMarkers(img, corners,ids,(255,0,0))

            for cor in corners:
                r, t, _objpoints = aruco.estimatePoseSingleMarkers(
                    cor, 0.5, cam_matrix, dist_coefs)
                # aruco.drawAxis(img,cam_matrix,dist_coefs,r,t,0.2)

        # if len(corners) == len(ids):
        board = aruco.GridBoard_create(6, 8, 0.05, 0.01, aruco_dict)
        corners, ids, rejectedImgPoints, rec_idx = aruco.refineDetectedMarkers(
            gray, board, corners, ids, rejectedImgPoints)
        ret, rv, tv = aruco.estimatePoseBoard(corners, ids, board, cam_matrix,
                                              dist_coefs)
        if ret:
            aruco.drawAxis(img, cam_matrix, dist_coefs, rv, tv, 0.2)
            obj_points, imgpoints = aruco.getBoardObjectAndImagePoints(
                board, corners, ids)

            # imgpts = np.int32(imgpoints).reshape(-1,2)
            # # draw ground floor in green
            # img = cv2.drawContours(img, [imgpts[:4]],-1,(0,255,0),3)

        # out.write(img)
        cv2.imshow("Markers", img)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    out.release()
    cv2.destroyAllWindows()
예제 #3
0
 def get_pose(self, board, corners, ids, CamParam):
     #Define object and image points of both tags        
     objPoints, imgPoints 	=	aruco.getBoardObjectAndImagePoints(board, corners, ids)
     objPoints = objPoints.reshape([objPoints.shape[0],3])
     imgPoints = imgPoints.reshape([imgPoints.shape[0],2])
     
     #Get pose of both ground and panel markers from detected corners        
     retVal, rvec, tvec = cv2.solvePnP(objPoints, imgPoints, CamParam.camMatrix, CamParam.distCoeff)
     Rca, b = cv2.Rodrigues(rvec)
     
     return imgPoints,rvec, tvec, Rca, b
예제 #4
0
	def calibrate_camera_aruco(self,samples_dir="frames",outputfile="camera_matrix_aruco.yaml"):
		self.outputfile = outputfile
		board = aruco.GridBoard_create(self.MarkerX,self.MarkerY,0.05,0.01,self.aruco_dict)
		cam_matrix,dist_coeff,rvecs,tvecs= self.read_cam_matrix(outputfile)
		print("dist_coeff original")
		print(dist_coeff)
		print("cam_matrix original")
		print(cam_matrix)
		im = Imutils_Master()
		all_img = []
		all_obj = []
		h,w = 0,0
		file_count  =0
		total_makers_to_look = self.MarkerX * self.MarkerY
		for fname in os.listdir(samples_dir):
			file_count +=1
			img = cv2.imread(samples_dir+"/"+fname)
			h, w = img.shape[:2]
			gray = im.gray(img)
			corners,ids,rejectedImgPoints = aruco.detectMarkers(gray, self.aruco_dict, parameters=self.parameters)
			corners1,ids1,rejectedImgPoints1,rec_index = aruco.refineDetectedMarkers(gray,board, corners, ids,rejectedImgPoints)
			getcount = self.get_marker_count(ids1)

			if ids1 is not None and getcount==total_makers_to_look:
				ret,rv,tv = aruco.estimatePoseBoard(corners1,ids1,board,cam_matrix,dist_coeff,rvecs,tvecs)
				if ret>0:
					aruco.drawDetectedMarkers(img, corners,ids,(0,0,255))
					obj_points,img_points = aruco.getBoardObjectAndImagePoints(board,corners1,ids1)
					all_obj.append(obj_points)
					all_img.append(img_points)
				else:
					print("not able to estimate board")
			cv2.imshow("frame",img)
			if cv2.waitKey(1) & 0xFF == ord('q'):
				break

		cv2.destroyAllWindows()
		print("calibrating starts... may take while")
		
		if len(all_obj)==len(all_img):
			rms, cam_matrix, dist_coeff, rvecs, tvecs = cv2.calibrateCamera(all_obj, all_img, (w, h), None, None)
			if rms:
				data = {'camera_matrix': np.asarray(cam_matrix).tolist(),
				 		'dist_coeff': np.asarray(dist_coeff).tolist(),
				  		'rvecs': np.asarray(rvecs).tolist(), 
				  		'tvecs': np.asarray(tvecs).tolist()}
				flag = self.write_cam_matrix(outputfile,data)
				if flag:
					print("camera details is written to file")
					cam_matrix,dist_coeff,rvecs,tvecs= self.read_cam_matrix(outputfile)
					print("new cam_matrix")
					print(cam_matrix)
					print("new dist_coeff")
					print(dist_coeff)
				else:
					print("error writing camera details to file")
			
		else:
			print("Number of object points is not equal to the number of samples taken")
			print(len(all_obj))
			print(len(all_img))
def main():

    #Start timer to measure execution time
    t1 = time.time()

    #Subscribe to controller_state
    rospy.Subscriber("controller_state", controllerstate, callback)
    last_ros_image_stamp = rospy.Time(0)

    #Force-torque
    if not "disable-ft" in sys.argv:
        ft_threshold1 = ft_threshold
    else:
        ft_threshold1 = []

    #Initialize ros node of this process
    rospy.init_node('Placement_DJ_1', anonymous=True)
    process_client = actionlib.SimpleActionClient('process_step',
                                                  ProcessStepAction)
    process_state_pub = rospy.Publisher("GUI_state",
                                        ProcessState,
                                        queue_size=100,
                                        latch=True)
    process_client.wait_for_server()
    listener = PayloadTransformListener()
    rapid_node = rapid_node_pkg.RAPIDCommander()
    controller_commander = controller_commander_pkg.ControllerCommander()

    object_commander = ObjectRecognitionCommander()
    robot = urdf.robot_from_parameter_server()

    #subscribe to Gripper camera node for image acquisition
    ros_gripper_2_img_sub = rospy.Subscriber(
        '/gripper_camera_2/image', Image,
        object_commander.ros_raw_gripper_2_image_cb)

    ros_gripper_2_trigger = rospy.ServiceProxy(
        '/gripper_camera_2/camera_trigger', Trigger)

    ros_gripper_2_trigger = rospy.ServiceProxy(
        'gripper_camera_2/camera_trigger', Trigger)

    #Set controller command mode
    controller_commander.set_controller_mode(
        controller_commander.MODE_AUTO_TRAJECTORY, 0.4, [], [])
    time.sleep(0.5)

    #open loop set the initial pose
    #Set Location above the panel where the end effector goes first (in the world/base frame) Ideal location of panel.
    #Cur_Pose = controller_commander.get_current_pose_msg()
    #print Cur_Pose
    #raw_input("confirm release vacuum")

    tran = np.array(
        [2.15484, 1.21372,
         0.25766])  #np.array([2.17209718963,-1.13702651252,0.224273328841])
    rot = rox.q2R(
        [0.02110, -0.03317, 0.99922, -0.00468]
    )  #rox.q2R([0.0145063655084, -0.0282932350764, 0.999322921073, -0.0185137145776])
    #tran = np.array([2.26476414097,-1.22419669418,0.411353037002])
    #rot = np.array([[-0.9959393, -0.0305329,  0.0846911], [-0.0310315,  0.9995079, -0.0045767], [-0.0845097, -0.0071862, -0.9963967]])
    pose_target2 = rox.Transform(rot, tran)
    pose_target3 = copy.deepcopy(pose_target2)

    ##Execute movement to set location
    print "Executing initial path"
    controller_commander.compute_cartesian_path_and_move(
        pose_target2, avoid_collisions=False)

    #Initilialize aruco boards and parameters
    aruco_dict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_ARUCO_ORIGINAL)
    parameters = cv2.aruco.DetectorParameters_create()
    parameters.cornerRefinementMethod = cv2.aruco.CORNER_REFINE_SUBPIX
    parameters.adaptiveThreshWinSizeMax = 30
    parameters.adaptiveThreshWinSizeStep = 7

    # 1st Panel tag info
    board_ground = cv2.aruco.GridBoard_create(4, 4, .04, .0075, aruco_dict, 32)
    board_panel = cv2.aruco.GridBoard_create(8, 3, .025, .0075, aruco_dict, 80)
    #Load object points ground tag in panel tag coordinate system from mat file
    loaded_object_points_ground_in_panel_system_stage_1 = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat'
    )['object_points_ground_in_panel_tag_system']
    loaded_object_points_ground_in_panel_system_stage_2 = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['object_points_ground_in_panel_tag_system']
    loaded_object_points_ground_in_panel_system = loaded_object_points_ground_in_panel_system_stage_1

    #    #Initialize camera intrinsic parameters #18285636_10_05_2018_5_params
    #18285636_10_05_2018_5_params_111122018
    CamParam = CameraParams(2283.391289766133, 1192.255485086403,
                            2279.484382094687, 1021.399092147012, 1.0,
                            -0.022101211408596, -0.095163053709332,
                            -0.003986991791212, -0.003479613658352,
                            0.179926705467534)

    #Subscribe tp controller state. Again?
    rospy.Subscriber("controller_state", controllerstate, callback)

    UV = np.zeros([150, 8])
    P = np.zeros([150, 3])

    #focal length in pixel units, this number is averaged values from f_hat for x and y
    f_hat_u = 2283.391289766133  #2342.561249990927#2282.523358266698#2281.339593273153 #2446.88
    f_hat_v = 2279.484382094687  #2338.448312671424#2280.155828279608
    #functions like a gain, used with velocity to track position
    dt = 0.1
    du = 100.0
    dv = 100.0
    dutmp = 100.0
    dvtmp = 100.0
    #TimeGain = [0.1,0.1, 0.1]
    du_array = []
    dv_array = []
    dx_array = []
    iteration = 0
    stage = 1
    #step_ts = 0.004
    Kc = 0.0002
    #time_save = []
    #FTdata_save = []
    Tran_z = np.array([[0, 0, -1], [0, -1, 0], [1, 0, 0]])
    Vec_wrench = 100 * np.array([
        0.019296738361905, 0.056232033265447, 0.088644197659430,
        0.620524934626544, -0.517896661195076, 0.279323567303444,
        -0.059640563813256, 0.631460085138371, -0.151143175570223,
        -6.018321330845553
    ]).transpose()

    FTdata_0 = FTdata
    T = listener.lookupTransform("base", "link_6", rospy.Time(0))
    rg = 9.8 * np.matmul(
        np.matmul(T.R, Tran_z).transpose(),
        np.array([0, 0, 1]).transpose())
    A1 = np.hstack([
        rox.hat(rg).transpose(),
        np.zeros([3, 1]),
        np.eye(3),
        np.zeros([3, 3])
    ])
    A2 = np.hstack(
        [np.zeros([3, 3]),
         rg.reshape([3, 1]),
         np.zeros([3, 3]),
         np.eye(3)])
    A = np.vstack([A1, A2])
    FTdata_0est = np.matmul(A, Vec_wrench)
    FTread_array = []
    FTdata_array = []
    t_now_array = []
    step_size = []

    cv2.namedWindow('Image', cv2.WINDOW_NORMAL)
    cv2.resizeWindow('Image', 490, 410)

    ### PBVS set the initial pose
    #### Final Nest Placement Error Calculation ===============================
    #Read new image
    last_ros_image_stamp = object_commander.ros_image_stamp
    try:
        ros_gripper_2_trigger.wait_for_service(timeout=0.1)
        ros_gripper_2_trigger()
    except:
        pass
    wait_count = 0
    while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp:

        if wait_count > 50:
            raise Exception("Image receive timeout")
        time.sleep(0.25)
        wait_count += 1
    result = object_commander.ros_image

    #Detect tag corners in aqcuired image using aruco
    corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
        result, aruco_dict, parameters=parameters)

    #Sort corners and ids according to ascending order of ids
    corners_original = copy.deepcopy(corners)
    ids_original = np.copy(ids)
    sorting_indices = np.argsort(ids_original, 0)
    ids_sorted = ids_original[sorting_indices]
    ids_sorted = ids_sorted.reshape([len(ids_original), 1])
    combined_list = zip(np.ndarray.tolist(ids.flatten()), corners_original)
    combined_list.sort()
    corners_sorted = [x for y, x in combined_list]
    ids = np.copy(ids_sorted)
    corners = copy.deepcopy(corners_sorted)

    #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image)
    #false_ids_ind = np.where(ids>73)
    mask = np.ones(ids.shape, dtype=bool)
    #mask[false_ids_ind] = False
    ids = ids[mask]
    corners = np.array(corners)
    corners = corners[mask.flatten(), :]
    corners = list(corners)

    #Define object and image points of both tags
    objPoints_ground, imgPoints_ground = aruco.getBoardObjectAndImagePoints(
        board_ground, corners, ids)
    objPoints_panel, imgPoints_panel = aruco.getBoardObjectAndImagePoints(
        board_panel, corners, ids)
    objPoints_ground = objPoints_ground.reshape([objPoints_ground.shape[0], 3])
    imgPoints_ground = imgPoints_ground.reshape([imgPoints_ground.shape[0], 2])
    objPoints_panel = objPoints_panel.reshape([objPoints_panel.shape[0], 3])
    imgPoints_panel = imgPoints_panel.reshape([imgPoints_panel.shape[0], 2])

    #Save pose of marker boards after the iterations end(while in the final hovering position above nest)
    #Get pose of both ground and panel markers from detected corners
    retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(
        objPoints_ground, imgPoints_ground, CamParam.camMatrix,
        CamParam.distCoeff)
    Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
    retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(
        objPoints_panel, imgPoints_panel, CamParam.camMatrix,
        CamParam.distCoeff)
    Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)

    print "============== Observed Pose difference in nest position"
    observed_tvec_difference = tvec_ground - tvec_panel
    observed_rvec_difference = rvec_ground - rvec_panel
    print "tvec difference: ", observed_tvec_difference
    print "rvec differnece: ", observed_rvec_difference

    #Load ideal pose differnece information from file
    loaded_rvec_difference = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat'
    )['rvec_difference']
    loaded_tvec_difference = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat'
    )['tvec_difference']

    print "============== Ideal Pose difference in nest position"
    print "tvec difference: ", loaded_tvec_difference
    print "rvec differnece: ", loaded_rvec_difference

    print "============== Difference in pose difference in nest position"
    tvec_err = loaded_tvec_difference - observed_tvec_difference
    rvec_err = loaded_rvec_difference - observed_rvec_difference
    print "tvec difference: ", tvec_err
    print "rvec differnece: ", rvec_err

    # Adjustment
    print "Adjustment ===================="
    current_joint_angles = controller_commander.get_current_joint_values()
    dx = np.array([0, 0, 0, -tvec_err[0], tvec_err[1] + 0.03, tvec_err[2]
                   ]) * 0.75
    joints_vel = QP_abbirb6640(
        np.array(current_joint_angles).reshape(6, 1), np.array(dx))
    goal = trapezoid_gen(
        np.array(current_joint_angles) + joints_vel.dot(1),
        np.array(current_joint_angles), 0.25, np.array(dx))
    client = actionlib.SimpleActionClient("joint_trajectory_action",
                                          FollowJointTrajectoryAction)
    client.wait_for_server()
    client.send_goal(goal)
    client.wait_for_result()
    res = client.get_result()
    if (res.error_code != 0):
        raise Exception("Trajectory execution returned error")

    print res
    #raw_input("confirm move...")
    print "End of Initial Pose ===================="

    ### End of initial pose

    step_size_min = 100000
    stage = 2
    loaded_object_points_ground_in_panel_system = loaded_object_points_ground_in_panel_system_stage_2

    while (
        (du > 4) | (dv > 4) and (iteration < 55)
    ):  #try changing du and dv to lower values(number of iterations increases)
        iteration += 1
        t_now_array.append(time.time())

        #Go to stage2 of movement(mostly downward z motion)
        if ((du < 4) and (dv < 4) and (stage == 1)):
            #Save pose of marker boards after the iterations end(while in the final hovering position above nest)
            #Get pose of both ground and panel markers from detected corners
            retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(
                objPoints_ground, imgPoints_ground, CamParam.camMatrix,
                CamParam.distCoeff)
            Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
            retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(
                objPoints_panel, imgPoints_panel, CamParam.camMatrix,
                CamParam.distCoeff)
            Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)

            print "============== Observed Pose difference in hovering position"
            observed_tvec_difference = tvec_ground - tvec_panel
            observed_rvec_difference = rvec_ground - rvec_panel
            print "tvec difference: ", observed_tvec_difference
            print "rvec differnece: ", observed_rvec_difference

            #Load ideal pose differnece information from file
            loaded_rvec_difference = loadmat(
                '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat'
            )['rvec_difference']
            loaded_tvec_difference = loadmat(
                '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat'
            )['tvec_difference']

            print "============== Ideal Pose difference in hovering position"
            print "tvec difference: ", loaded_tvec_difference
            print "rvec differnece: ", loaded_rvec_difference

            tvec_difference_Above_Nest = loaded_tvec_difference - observed_tvec_difference
            rvec_difference_Above_Nest = loaded_rvec_difference - observed_rvec_difference

            print "============== Difference in pose difference in hovering position"
            print "tvec difference: ", tvec_difference_Above_Nest
            print "rvec differnece: ", rvec_difference_Above_Nest

            #Saving pose information to file
            filename_pose1 = "/home/rpi-cats/Desktop/DJ/Code/Data/Panel1_Above_Nest_Pose_" + str(
                t1) + ".mat"
            scipy.io.savemat(filename_pose1,
                             mdict={
                                 'tvec_ground_Above_Nest':
                                 tvec_ground,
                                 'tvec_panel_Above_Nest':
                                 tvec_panel,
                                 'Rca_ground_Above_Nest':
                                 Rca_ground,
                                 'Rca_panel_Above_Nest':
                                 Rca_panel,
                                 'tvec_difference_Above_Nest':
                                 tvec_ground - tvec_panel,
                                 'rvec_difference_Above_Nest':
                                 rvec_ground - rvec_panel,
                                 'loaded_tvec_difference':
                                 loaded_tvec_difference,
                                 'loaded_rvec_difference':
                                 loaded_rvec_difference,
                                 'observed_tvec_difference':
                                 observed_tvec_difference,
                                 'observed_rvec_difference':
                                 observed_rvec_difference
                             })

            #raw_input("Confirm Stage 2")
            stage = 2
            #			dt=0.02
            loaded_object_points_ground_in_panel_system = loaded_object_points_ground_in_panel_system_stage_2
        #print pose_target2.p[2]

        #Print current robot pose at the beginning of this iteration
        Cur_Pose = controller_commander.get_current_pose_msg()
        print "============ Current Robot Pose"
        print Cur_Pose

        #Read new image
        last_ros_image_stamp = object_commander.ros_image_stamp
        try:
            ros_gripper_2_trigger.wait_for_service(timeout=0.1)
            ros_gripper_2_trigger()
        except:
            pass
        wait_count = 0
        while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp:
            if wait_count > 50:
                raise Exception("Image receive timeout")
            time.sleep(0.25)
            wait_count += 1
        result = object_commander.ros_image
        #Save
        #        filename = "Acquisition3_%d.jpg" % (time.time())
        #        scipy.misc.imsave(filename, result)
        #Display
        #        cv2.namedWindow('Aqcuired Image',cv2.WINDOW_NORMAL)
        #        cv2.imshow('Acquired Image',result)
        #        cv2.waitKey(1)

        #Detect tag corners in aqcuired image using aruco
        corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
            result, aruco_dict, parameters=parameters)
        frame_with_markers_and_axis = result

        #Sort corners and ids according to ascending order of ids
        corners_original = copy.deepcopy(corners)
        ids_original = np.copy(ids)
        sorting_indices = np.argsort(ids_original, 0)
        ids_sorted = ids_original[sorting_indices]
        ids_sorted = ids_sorted.reshape([len(ids_original), 1])
        combined_list = zip(np.ndarray.tolist(ids.flatten()), corners_original)
        combined_list.sort()
        corners_sorted = [x for y, x in combined_list]
        ids = np.copy(ids_sorted)
        corners = copy.deepcopy(corners_sorted)

        #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image)
        false_ids_ind = np.where(ids > 150)
        mask = np.ones(ids.shape, dtype=bool)
        mask[false_ids_ind] = False
        ids = ids[mask]
        corners = np.array(corners)
        corners = corners[mask.flatten(), :]
        corners = list(corners)

        #Define object and image points of both tags
        objPoints_ground, imgPoints_ground = aruco.getBoardObjectAndImagePoints(
            board_ground, corners, ids)
        objPoints_panel, imgPoints_panel = aruco.getBoardObjectAndImagePoints(
            board_panel, corners, ids)
        objPoints_ground = objPoints_ground.reshape(
            [objPoints_ground.shape[0], 3])
        imgPoints_ground = imgPoints_ground.reshape(
            [imgPoints_ground.shape[0], 2])
        objPoints_panel = objPoints_panel.reshape(
            [objPoints_panel.shape[0], 3])
        imgPoints_panel = imgPoints_panel.reshape(
            [imgPoints_panel.shape[0], 2])

        #Get pose of both ground and panel markers from detected corners
        retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(
            objPoints_ground, imgPoints_ground, CamParam.camMatrix,
            CamParam.distCoeff)
        Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
        retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(
            objPoints_panel, imgPoints_panel, CamParam.camMatrix,
            CamParam.distCoeff)
        Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)

        frame_with_markers_and_axis = cv2.aruco.drawAxis(
            frame_with_markers_and_axis, CamParam.camMatrix,
            CamParam.distCoeff, Rca_ground, tvec_ground, 0.2)
        frame_with_markers_and_axis = cv2.aruco.drawAxis(
            frame_with_markers_and_axis, CamParam.camMatrix,
            CamParam.distCoeff, Rca_panel, tvec_panel, 0.2)

        rvec_all_markers_ground, tvec_all_markers_ground, _ = aruco.estimatePoseSingleMarkers(
            corners[0:20], 0.04, CamParam.camMatrix, CamParam.distCoeff)
        rvec_all_markers_panel, tvec_all_markers_panel, _ = aruco.estimatePoseSingleMarkers(
            corners[20:45], 0.025, CamParam.camMatrix, CamParam.distCoeff)
        tvec_all = np.concatenate(
            (tvec_all_markers_ground, tvec_all_markers_panel), axis=0)
        for i_ids, i_corners, i_tvec in zip(ids, corners, tvec_all):
            UV[i_ids, :] = i_corners.reshape(
                [1, 8])  #np.average(i_corners, axis=1)
            P[i_ids, :] = i_tvec

        #print 'P',P
        #used to find the height of the tags and the delta change of height, z height at desired position
        Z = 1 * P[32:48,
                  2]  #- [0.68184539, 0.68560932, 0.68966803, 0.69619578])

        #check if all tags detected
        if retVal_ground != 0 and retVal_panel != 0:
            dutmp = []
            dvtmp = []

            #Pixel estimates of the ideal ground tag location
            reprojected_imagePoints_ground_2, jacobian2 = cv2.projectPoints(
                loaded_object_points_ground_in_panel_system.transpose(),
                rvec_panel, tvec_panel, CamParam.camMatrix, CamParam.distCoeff)
            reprojected_imagePoints_ground_2 = reprojected_imagePoints_ground_2.reshape(
                [reprojected_imagePoints_ground_2.shape[0], 2])

            #plot image points for ground tag from corner detection and from re-projections
            for point1, point2 in zip(
                    imgPoints_ground,
                    np.float32(reprojected_imagePoints_ground_2)):
                cv2.circle(frame_with_markers_and_axis, tuple(point1), 5,
                           (0, 0, 255), 3)
                cv2.circle(frame_with_markers_and_axis, tuple(point2), 5,
                           (255, 0, 0), 3)

            cv2.imshow('Image', frame_with_markers_and_axis)
            cv2.waitKey(1)
            #Save
            filename_image = "/home/rpi-cats/Desktop/DJ/Code/Images/Panel1_Acquisition_" + str(
                t1) + "_" + str(iteration) + ".jpg"
            scipy.misc.imsave(filename_image, frame_with_markers_and_axis)

            #Go through a particular point in all tags to build the complete Jacobian
            for ic in range(4):
                #uses first set of tags, numbers used to offset camera frame, come from camera parameters
                #UV_target = np.vstack([UV[9:13,2*ic]-1209.151959040735,UV[9:13,2*ic+1]-1055.254852652610]).T - UV_shift[:,(2*ic):(2*ic+2)]		#shift corner estimates to the image centers. Necessary for the image jacobian to work.
                reprojected_imagePoints_ground_2 = np.reshape(
                    reprojected_imagePoints_ground_2, (16, 8))
                UV_target = np.vstack([
                    reprojected_imagePoints_ground_2[:, 2 * ic] -
                    1192.255485086403,
                    reprojected_imagePoints_ground_2[:, 2 * ic + 1] -
                    1021.399092147012
                ]).T
                uc = UV_target[:, 0]
                vc = UV_target[:, 1]

                #                print 'UV_target', UV_target
                UV_current = np.vstack([
                    UV[32:48, 2 * ic] - 1192.255485086403,
                    UV[32:48, 2 * ic + 1] - 1021.399092147012
                ]).T
                #find difference between current and desired tag difference
                delta_UV = UV_target - UV_current
                dutmp.append(np.mean(delta_UV[:, 0]))
                dvtmp.append(np.mean(delta_UV[:, 1]))
                for tag_i in range(16):
                    if tag_i == 0:
                        J_cam_tmp = 1.0 * np.array(
                            [[
                                -f_hat_u / Z[tag_i], 0.0, uc[tag_i] / Z[tag_i],
                                uc[tag_i] * vc[tag_i] / f_hat_u, -1.0 *
                                (uc[tag_i] * uc[tag_i] / f_hat_u + f_hat_u),
                                vc[tag_i]
                            ],
                             [
                                 0.0, -f_hat_v / Z[tag_i], vc[tag_i] /
                                 Z[tag_i], vc[tag_i] * vc[tag_i] / f_hat_v +
                                 f_hat_v, -1.0 * uc[tag_i] * vc[tag_i] /
                                 f_hat_v, -uc[tag_i]
                             ]])
                    else:
                        J_cam_tmp = np.concatenate(
                            (J_cam_tmp,
                             1.0 * np.array(
                                 [[
                                     -f_hat_u / Z[tag_i], 0.0,
                                     uc[tag_i] / Z[tag_i],
                                     uc[tag_i] * vc[tag_i] / f_hat_u, -1.0 *
                                     (uc[tag_i] * uc[tag_i] / f_hat_u +
                                      f_hat_u), vc[tag_i]
                                 ],
                                  [
                                      0.0, -f_hat_v / Z[tag_i], vc[tag_i] /
                                      Z[tag_i], vc[tag_i] * vc[tag_i] / f_hat_v
                                      + f_hat_v, -1.0 * uc[tag_i] * vc[tag_i] /
                                      f_hat_v, -uc[tag_i]
                                  ]])),
                            axis=0)
                #camera jacobian
                if ic == 0:
                    J_cam = J_cam_tmp
                    delta_UV_all = delta_UV.reshape(32, 1)
                    UV_target_all = UV_target.reshape(32, 1)
                else:
                    J_cam = np.vstack([J_cam, J_cam_tmp])
                    delta_UV_all = np.vstack(
                        [delta_UV_all, delta_UV.reshape(32, 1)])
                    UV_target_all = np.vstack(
                        [UV_target_all,
                         UV_target.reshape(32, 1)])

            print 'dutmp: ', dutmp
            print 'dvtmp: ', dvtmp
            du = np.mean(np.absolute(dutmp))
            dv = np.mean(np.absolute(dvtmp))
            print 'Average du of all points:', du, 'Average dv of all points:', dv
            du_array.append(du)
            dv_array.append(dv)

            #print "Jcam",J_cam
            #print "UV",delta_UV_all
            #dx = np.matmul(np.linalg.pinv(J_cam),np.array(delta_UV_all))

            dx = QP_Cam(
                J_cam, delta_UV_all
            )  #np.matmul(np.linalg.pinv(J_cam),np.array(delta_UV_all))
            dx = dx.reshape([6, 1])

            dx_array.append(dx)

            #print '**************Jac:',dx, QP_Cam(J_cam,delta_UV_all)

            dx = np.array([
                dx[3, 0], -dx[4, 0], -dx[5, 0], dx[0, 0], -dx[1, 0], -dx[2, 0]
            ])

            if stage == 2:
                T = listener.lookupTransform("base", "link_6", rospy.Time(0))
                rg = 9.8 * np.matmul(
                    np.matmul(T.R, Tran_z).transpose(),
                    np.array([0, 0, 1]).transpose())
                A1 = np.hstack([
                    rox.hat(rg).transpose(),
                    np.zeros([3, 1]),
                    np.eye(3),
                    np.zeros([3, 3])
                ])
                A2 = np.hstack([
                    np.zeros([3, 3]),
                    rg.reshape([3, 1]),
                    np.zeros([3, 3]),
                    np.eye(3)
                ])
                A = np.vstack([A1, A2])
                FTdata_est = np.matmul(A, Vec_wrench)
                FTread = FTdata - FTdata_0 - FTdata_est + FTdata_0est
                print 'FTread:', FTread

                print 'Z', FTread[-1]
                if FTread[-1] > -100:
                    F_d = -150
                    Kc = 0.0001
                else:
                    Kc = 0.0001
                    F_d = -200

                Vz = Kc * (F_d - FTread[-1])
                print 'Vz', Vz
                dx[-1] = Vz + dx[-1]
                FTread_array.append(FTread)
                FTdata_array.append(FTdata)

            current_joint_angles = controller_commander.get_current_joint_values(
            )

            step_size_tmp = np.linalg.norm(dx)
            if step_size_tmp <= step_size_min:
                step_size_min = step_size_tmp
            else:
                dx = dx / step_size_tmp * step_size_min

            joints_vel = QP_abbirb6640(
                np.array(current_joint_angles).reshape(6, 1), np.array(dx))

            goal = trapezoid_gen(
                np.array(current_joint_angles) + joints_vel.dot(dt),
                np.array(current_joint_angles), 0.25, np.array(dx))
            step_size.append(np.linalg.norm(dx))

            client = actionlib.SimpleActionClient("joint_trajectory_action",
                                                  FollowJointTrajectoryAction)
            client.wait_for_server()

            client.send_goal(goal)
            client.wait_for_result()
            res = client.get_result()
            if (res.error_code != 0):
                raise Exception("Trajectory execution returned error")

            print res

            print 'Current Iteration Finished.'

    #Saving iteration data to file
    filename_data = "/home/rpi-cats/Desktop/YC/Data/Panel1_Data_" + str(
        t1) + ".mat"
    scipy.io.savemat(filename_data,
                     mdict={
                         'du_array': du_array,
                         'dv_array': dv_array,
                         'dx_array': dx_array,
                         'step_size': step_size,
                         'iteration': iteration
                     })

    #Saving force data to file
    filename_force_data = "/home/rpi-cats/Desktop/YC/Data/Panel1_Data_force_" + str(
        t1) + ".mat"
    scipy.io.savemat(filename_force_data,
                     mdict={
                         'FTread': FTread_array,
                         'FTdata': FTdata_array,
                         't_now_array': t_now_array
                     })

    print '###############################'
    print 'step_size', step_size
    print 'iteration', iteration
    print '###############################'
    '''
    print "============ Final Push Down to Nest"
    controller_commander.set_controller_mode(controller_commander.MODE_AUTO_TRAJECTORY, 0.2, [],[])

    
    Cur_Pose = controller_commander.get_current_pose_msg()
    rot_current = [Cur_Pose.pose.orientation.w, Cur_Pose.pose.orientation.x,Cur_Pose.pose.orientation.y,Cur_Pose.pose.orientation.z]
    trans_current = [Cur_Pose.pose.position.x,Cur_Pose.pose.position.y,Cur_Pose.pose.position.z]
    pose_target2.R = rox.q2R([rot_current[0], rot_current[1], rot_current[2], rot_current[3]])
    pose_target2.p = trans_current
    pose_target2.p[2] -= 0.11
    '''

    #### Final Nest Placement Error Calculation ===============================
    #Read new image
    last_ros_image_stamp = object_commander.ros_image_stamp
    try:
        ros_gripper_2_trigger.wait_for_service(timeout=0.1)
        ros_gripper_2_trigger()
    except:
        pass
    wait_count = 0
    while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp:
        if wait_count > 50:
            raise Exception("Image receive timeout")
        time.sleep(0.25)
        wait_count += 1
    result = object_commander.ros_image

    #Detect tag corners in aqcuired image using aruco
    corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
        result, aruco_dict, parameters=parameters)

    #Sort corners and ids according to ascending order of ids
    corners_original = copy.deepcopy(corners)
    ids_original = np.copy(ids)
    sorting_indices = np.argsort(ids_original, 0)
    ids_sorted = ids_original[sorting_indices]
    ids_sorted = ids_sorted.reshape([len(ids_original), 1])
    combined_list = zip(np.ndarray.tolist(ids.flatten()), corners_original)
    combined_list.sort()
    corners_sorted = [x for y, x in combined_list]
    ids = np.copy(ids_sorted)
    corners = copy.deepcopy(corners_sorted)

    #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image)
    #false_ids_ind = np.where(ids>73)
    mask = np.ones(ids.shape, dtype=bool)
    #mask[false_ids_ind] = False
    ids = ids[mask]
    corners = np.array(corners)
    corners = corners[mask.flatten(), :]
    corners = list(corners)

    #Define object and image points of both tags
    objPoints_ground, imgPoints_ground = aruco.getBoardObjectAndImagePoints(
        board_ground, corners, ids)
    objPoints_panel, imgPoints_panel = aruco.getBoardObjectAndImagePoints(
        board_panel, corners, ids)
    objPoints_ground = objPoints_ground.reshape([objPoints_ground.shape[0], 3])
    imgPoints_ground = imgPoints_ground.reshape([imgPoints_ground.shape[0], 2])
    objPoints_panel = objPoints_panel.reshape([objPoints_panel.shape[0], 3])
    imgPoints_panel = imgPoints_panel.reshape([imgPoints_panel.shape[0], 2])

    #Save pose of marker boards after the iterations end(while in the final hovering position above nest)
    #Get pose of both ground and panel markers from detected corners
    retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(
        objPoints_ground, imgPoints_ground, CamParam.camMatrix,
        CamParam.distCoeff)
    Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
    retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(
        objPoints_panel, imgPoints_panel, CamParam.camMatrix,
        CamParam.distCoeff)
    Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)

    print "============== Observed Pose difference in nest position"
    observed_tvec_difference = tvec_ground - tvec_panel
    observed_rvec_difference = rvec_ground - rvec_panel
    print "tvec difference: ", observed_tvec_difference
    print "rvec differnece: ", observed_rvec_difference

    #Load ideal pose differnece information from file
    loaded_rvec_difference = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['rvec_difference']
    loaded_tvec_difference = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['tvec_difference']

    print "============== Ideal Pose difference in nest position"
    print "tvec difference: ", loaded_tvec_difference
    print "rvec differnece: ", loaded_rvec_difference

    print "============== Difference in pose difference in nest position"
    tvec_err = loaded_tvec_difference - observed_tvec_difference
    rvec_err = loaded_rvec_difference - observed_rvec_difference
    print "tvec difference: ", tvec_err
    print "rvec differnece: ", rvec_err

    # Adjustment
    print "Adjustment ===================="
    current_joint_angles = controller_commander.get_current_joint_values()
    dx = np.array([0, 0, 0, -tvec_err[0], tvec_err[1], 0])
    joints_vel = QP_abbirb6640(
        np.array(current_joint_angles).reshape(6, 1), np.array(dx))
    goal = trapezoid_gen(
        np.array(current_joint_angles) + joints_vel.dot(1),
        np.array(current_joint_angles), 0.25, np.array(dx))
    client = actionlib.SimpleActionClient("joint_trajectory_action",
                                          FollowJointTrajectoryAction)
    client.wait_for_server()
    client.send_goal(goal)
    client.wait_for_result()
    res = client.get_result()
    if (res.error_code != 0):
        raise Exception("Trajectory execution returned error")

    print res
    print "End of Adjustment ===================="

    #### Final Nest Placement Error Calculation ===============================
    #Read new image
    last_ros_image_stamp = object_commander.ros_image_stamp
    try:
        ros_gripper_2_trigger.wait_for_service(timeout=0.1)
        ros_gripper_2_trigger()
    except:
        pass
    wait_count = 0
    while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp:
        if wait_count > 50:
            raise Exception("Image receive timeout")
        time.sleep(0.25)
        wait_count += 1
    result = object_commander.ros_image

    #Detect tag corners in aqcuired image using aruco
    corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
        result, aruco_dict, parameters=parameters)

    #Sort corners and ids according to ascending order of ids
    corners_original = copy.deepcopy(corners)
    ids_original = np.copy(ids)
    sorting_indices = np.argsort(ids_original, 0)
    ids_sorted = ids_original[sorting_indices]
    ids_sorted = ids_sorted.reshape([len(ids_original), 1])
    combined_list = zip(np.ndarray.tolist(ids.flatten()), corners_original)
    combined_list.sort()
    corners_sorted = [x for y, x in combined_list]
    ids = np.copy(ids_sorted)
    corners = copy.deepcopy(corners_sorted)

    #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image)
    #false_ids_ind = np.where(ids>73)
    mask = np.ones(ids.shape, dtype=bool)
    #mask[false_ids_ind] = False
    ids = ids[mask]
    corners = np.array(corners)
    corners = corners[mask.flatten(), :]
    corners = list(corners)

    #Define object and image points of both tags
    objPoints_ground, imgPoints_ground = aruco.getBoardObjectAndImagePoints(
        board_ground, corners, ids)
    objPoints_panel, imgPoints_panel = aruco.getBoardObjectAndImagePoints(
        board_panel, corners, ids)
    objPoints_ground = objPoints_ground.reshape([objPoints_ground.shape[0], 3])
    imgPoints_ground = imgPoints_ground.reshape([imgPoints_ground.shape[0], 2])
    objPoints_panel = objPoints_panel.reshape([objPoints_panel.shape[0], 3])
    imgPoints_panel = imgPoints_panel.reshape([imgPoints_panel.shape[0], 2])

    #Save pose of marker boards after the iterations end(while in the final hovering position above nest)
    #Get pose of both ground and panel markers from detected corners
    retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(
        objPoints_ground, imgPoints_ground, CamParam.camMatrix,
        CamParam.distCoeff)
    Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
    retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(
        objPoints_panel, imgPoints_panel, CamParam.camMatrix,
        CamParam.distCoeff)
    Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)

    print "============== Observed Pose difference in nest position"
    observed_tvec_difference = tvec_ground - tvec_panel
    observed_rvec_difference = rvec_ground - rvec_panel
    print "tvec difference: ", observed_tvec_difference
    print "rvec differnece: ", observed_rvec_difference

    #Load ideal pose differnece information from file
    loaded_rvec_difference = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['rvec_difference']
    loaded_tvec_difference = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['tvec_difference']

    print "============== Ideal Pose difference in nest position"
    print "tvec difference: ", loaded_tvec_difference
    print "rvec differnece: ", loaded_rvec_difference

    print "============== Difference in pose difference in nest position"
    tvec_err = loaded_tvec_difference - observed_tvec_difference
    rvec_err = loaded_rvec_difference - observed_rvec_difference
    rospy.loginfo("tvec difference: %f, %f, %f", tvec_err[0], tvec_err[1],
                  tvec_err[2])
    rospy.loginfo("rvec difference: %f, %f, %f", rvec_err[0], rvec_err[1],
                  rvec_err[2])
    print "rvec differnece: ", tvec_err
    print "rvec differnece: ", rvec_err

    #Saving pose information to file
    filename_pose2 = "/home/rpi-cats/Desktop/DJ/Code/Data/In_Nest_Pose_" + str(
        t1) + ".mat"
    scipy.io.savemat(filename_pose2,
                     mdict={
                         'tvec_ground_In_Nest': tvec_ground,
                         'tvec_panel_In_Nest': tvec_panel,
                         'Rca_ground_In_Nest': Rca_ground,
                         'Rca_panel_In_Nest': Rca_panel,
                         'tvec_difference_In_Nest': tvec_ground - tvec_panel,
                         'rvec_difference_In_Nest': rvec_ground - rvec_panel,
                         'loaded_tvec_difference': loaded_tvec_difference,
                         'loaded_rvec_difference': loaded_rvec_difference,
                         'observed_tvec_difference': observed_tvec_difference,
                         'observed_rvec_difference': observed_rvec_difference
                     })
    controller_commander.set_controller_mode(
        controller_commander.MODE_AUTO_TRAJECTORY, 0.7, [])
    #    raw_input("confirm release vacuum")
    rapid_node.set_digital_io("Vacuum_enable", 0)
    g = ProcessStepGoal('plan_place_set_second_step', "")

    process_client.send_goal(g)

    #self.in_process=True

    print process_client.get_result()
    print "VACUUM OFF!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
    time.sleep(0.5)

    tran = np.array([2.17209718963, -1.13702651252, 0.224273328841])
    rot = rox.q2R(
        [0.0145063655084, -0.0282932350764, 0.999322921073, -0.0185137145776])
    pose_target2 = rox.Transform(rot, tran)

    Cur_Pose = controller_commander.get_current_pose_msg()
    rot_current = [
        Cur_Pose.pose.orientation.w, Cur_Pose.pose.orientation.x,
        Cur_Pose.pose.orientation.y, Cur_Pose.pose.orientation.z
    ]
    trans_current = [
        Cur_Pose.pose.position.x, Cur_Pose.pose.position.y,
        Cur_Pose.pose.position.z
    ]
    pose_target2.R = rox.q2R(
        [rot_current[0], rot_current[1], rot_current[2], rot_current[3]])
    pose_target2.p = trans_current
    pose_target2.p[2] += 0.5
    #
    #
    #    #print 'Target:',pose_target3
    #
    #    #print "============ Executing plan4"
    controller_commander.compute_cartesian_path_and_move(
        pose_target2, avoid_collisions=False)

    t2 = time.time()
    print 'Execution Finished.'
    print "Execution time: " + str(t2 - t1) + " seconds"
    s = ProcessState()
    s.state = "place_set"
    s.payload = "leeward_mid_panel"
    s.target = ""
    process_state_pub.publish(s)
def main():

    #Start timer to measure execution time
    t1 = time.time()

    #Subscribe to controller_state
    rospy.Subscriber("controller_state", controllerstate, callback)
    last_ros_image_stamp = rospy.Time(0)

    #Force-torque
    if not "disable-ft" in sys.argv:
        ft_threshold1 = ft_threshold
    else:
        ft_threshold1 = []

    #Initialize ros node of this process
    rospy.init_node('Placement_DJ_1', anonymous=True)

    #print "============ Starting setup"

    listener = PayloadTransformListener()
    rapid_node = rapid_node_pkg.RAPIDCommander()
    controller_commander = controller_commander_pkg.ControllerCommander()

    object_commander = ObjectRecognitionCommander()
    robot = urdf.robot_from_parameter_server()

    #subscribe to Gripper camera node for image acquisition
    ros_gripper_2_img_sub = rospy.Subscriber(
        '/gripper_camera_2/image', Image,
        object_commander.ros_raw_gripper_2_image_cb)
    ros_gripper_2_trigger = rospy.ServiceProxy(
        '/gripper_camera_2/continuous_trigger', CameraTrigger)

    #Set controller command mode
    controller_commander.set_controller_mode(
        controller_commander.MODE_AUTO_TRAJECTORY, 0.4, ft_threshold_place, [])
    time.sleep(0.5)

    #Set Location above the panel where the end effector goes first (in the world/base frame) Ideal location of panel.
    tran = np.array([2.20120663258, 1.2145882986, 0.0798989466944])
    rot = np.array([[-0.9971185, 0.0071821, 0.0755188],
                    [0.0056502, 0.9997743, -0.0204797],
                    [-0.0756488, -0.0199939, -0.9969341]])
    pose_target2 = rox.Transform(rot, tran)
    pose_target3 = copy.deepcopy(pose_target2)
    pose_target2.p[2] += 0.35  #20 cm above ideal location of panel

    #Execute movement to set location
    print "Executing initial path"
    controller_commander.compute_cartesian_path_and_move(
        pose_target2, avoid_collisions=False)

    #Initilialize aruco boards and parameters
    aruco_dict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_ARUCO_ORIGINAL)
    parameters = cv2.aruco.DetectorParameters_create()
    parameters.cornerRefinementMethod = cv2.aruco.CORNER_REFINE_SUBPIX
    board_ground = cv2.aruco.GridBoard_create(5, 4, 0.04, 0.01, aruco_dict, 20)
    board_panel = cv2.aruco.GridBoard_create(8, 3, 0.025, 0.0075, aruco_dict,
                                             50)
    #    tag_ids=["vacuum_gripper_marker_1","leeward_mid_panel_marker_1", "aligner_ref_1", "aligner_ref_2", "aligner_ref_3", "aligner_ref_4"]
    #    boards=[gripper_board, panel_board, ref1, ref2, ref3, ref4]

    #Initialize camera intrinsic parameters #18285636_10_05_2018_5_params
    CamParam = CameraParams(2342.561249990927, 1209.151959040735,
                            2338.448312671424, 1055.254852652610, 1.0,
                            -0.014840837133389, -0.021008029929566,
                            3.320024219653553e-04, -0.002187550225028,
                            -0.025059986937316)

    #Subscribe tp controller state. Again?
    rospy.Subscriber("controller_state", controllerstate, callback)

    UV = np.zeros([74, 8])
    P = np.zeros([74, 3])

    #Load object points ground tag in panel tag coordinate system from mat file
    loaded_object_points_ground_in_panel_system_stage_1 = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat'
    )['object_points_ground_in_panel_tag_system']
    loaded_object_points_ground_in_panel_system_stage_2 = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['object_points_ground_in_panel_tag_system']
    loaded_object_points_ground_in_panel_system = loaded_object_points_ground_in_panel_system_stage_1

    #focal length in pixel units, this number is averaged values from f_hat for x and y
    f_hat_u = 2342.561249990927  #2282.523358266698#2281.339593273153 #2446.88
    f_hat_v = 2338.448312671424  #2280.155828279608
    #functions like a gain, used with velocity to track position
    dt = 0.1
    du = 100.0
    dv = 100.0
    dutmp = 100.0
    dvtmp = 100.0
    TimeGain = [0.1, 0.1, 0.1]

    du_array = []
    dv_array = []
    dx_array = []
    iteration = 0
    stage = 1
    step_ts = 0.004

    Kc = 0.0002
    time_save = []
    FTdata_save = []
    Tran_z = np.array([[0, 0, -1], [0, -1, 0], [1, 0, 0]])
    Vec_wrench = 100 * np.array([
        0.019296738361905, 0.056232033265447, 0.088644197659430,
        0.620524934626544, -0.517896661195076, 0.279323567303444,
        -0.059640563813256, 0.631460085138371, -0.151143175570223,
        -6.018321330845553
    ]).transpose()

    FTdata_0 = FTdata
    T = listener.lookupTransform("base", "link_6", rospy.Time(0))
    rg = 9.8 * np.matmul(
        np.matmul(T.R, Tran_z).transpose(),
        np.array([0, 0, 1]).transpose())
    A1 = np.hstack([
        rox.hat(rg).transpose(),
        np.zeros([3, 1]),
        np.eye(3),
        np.zeros([3, 3])
    ])
    A2 = np.hstack(
        [np.zeros([3, 3]),
         rg.reshape([3, 1]),
         np.zeros([3, 3]),
         np.eye(3)])
    A = np.vstack([A1, A2])
    FTdata_0est = np.matmul(A, Vec_wrench)
    FTread_array = []
    FTdata_array = []
    t_now_array = []

    controller_commander.set_controller_mode(
        controller_commander.MODE_AUTO_TRAJECTORY, 1.0, [], [])
    #while (pose_target2.p[2] > 0.185):
    #    while ((du>10) | (dv>10) | (pose_target2.p[2] > 0.172)):    #try changing du and dv to lower values(number of iterations increases)
    while (
        (du > 1) | (dv > 1) and (iteration < 125)
    ):  #try changing du and dv to lower values(number of iterations increases)
        #    while ((np.max(np.abs(dutmp))>0.5) | (np.max(np.abs(dvtmp))>0.5)):    #try changing du and dv to lower values(number of iterations increases)
        iteration += 1
        t_now_array.append(time.time())

        #Go to stage2 of movement(mostly downward z motion)
        if ((du < 2) and (dv < 2) and (stage == 1)):
            #Save pose of marker boards after the iterations end(while in the final hovering position above nest)
            #Get pose of both ground and panel markers from detected corners
            retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(
                objPoints_ground, imgPoints_ground, CamParam.camMatrix,
                CamParam.distCoeff)
            Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
            retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(
                objPoints_panel, imgPoints_panel, CamParam.camMatrix,
                CamParam.distCoeff)
            Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)

            print "============== Observed Pose difference in hovering position"
            observed_tvec_difference = tvec_ground - tvec_panel
            observed_rvec_difference = rvec_ground - rvec_panel
            print "tvec difference: ", observed_tvec_difference
            print "rvec differnece: ", observed_rvec_difference

            #Load ideal pose differnece information from file
            loaded_rvec_difference = loadmat(
                '/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat'
            )['rvec_difference']
            loaded_tvec_difference = loadmat(
                '/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat'
            )['tvec_difference']

            print "============== Ideal Pose difference in hovering position"
            print "tvec difference: ", loaded_tvec_difference
            print "rvec differnece: ", loaded_rvec_difference

            tvec_difference_Above_Nest = loaded_tvec_difference - observed_tvec_difference
            rvec_difference_Above_Nest = loaded_rvec_difference - observed_rvec_difference

            print "============== Difference in pose difference in hovering position"
            print "tvec difference: ", tvec_difference_Above_Nest
            print "rvec differnece: ", rvec_difference_Above_Nest

            #Saving pose information to file
            filename_pose1 = "/home/rpi-cats/Desktop/DJ/Code/Data/Above_Nest_Pose_" + str(
                t1) + ".mat"
            scipy.io.savemat(filename_pose1,
                             mdict={
                                 'tvec_ground_Above_Nest':
                                 tvec_ground,
                                 'tvec_panel_Above_Nest':
                                 tvec_panel,
                                 'Rca_ground_Above_Nest':
                                 Rca_ground,
                                 'Rca_panel_Above_Nest':
                                 Rca_panel,
                                 'tvec_difference_Above_Nest':
                                 tvec_ground - tvec_panel,
                                 'rvec_difference_Above_Nest':
                                 rvec_ground - rvec_panel,
                                 'loaded_tvec_difference':
                                 loaded_tvec_difference,
                                 'loaded_rvec_difference':
                                 loaded_rvec_difference,
                                 'observed_tvec_difference':
                                 observed_tvec_difference,
                                 'observed_rvec_difference':
                                 observed_rvec_difference
                             })

            raw_input("Confirm Stage 2")
            stage = 2
            #			dt=0.02
            loaded_object_points_ground_in_panel_system = loaded_object_points_ground_in_panel_system_stage_2
        #print pose_target2.p[2]

        #Print current robot pose at the beginning of this iteration
        Cur_Pose = controller_commander.get_current_pose_msg()
        print "============ Current Robot Pose"
        print Cur_Pose

        #Read new image
        last_ros_image_stamp = object_commander.ros_image_stamp
        try:
            ros_gripper_2_trigger.wait_for_service(timeout=0.1)
            ros_gripper_2_trigger(False)
        except:
            pass
        wait_count = 0
        while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp:
            if wait_count > 50:
                raise Exception("Image receive timeout")
            time.sleep(0.25)
            wait_count += 1
        result = object_commander.ros_image
        #Save
        #        filename = "Acquisition3_%d.jpg" % (time.time())
        #        scipy.misc.imsave(filename, result)
        #Display
        #        cv2.namedWindow('Aqcuired Image',cv2.WINDOW_NORMAL)
        #        cv2.imshow('Acquired Image',result)
        #        cv2.waitKey(1)

        #Detect tag corners in aqcuired image using aruco
        corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
            result, aruco_dict, parameters=parameters)
        frame_with_markers_and_axis = result

        #Sort corners and ids according to ascending order of ids
        corners_original = copy.deepcopy(corners)
        ids_original = np.copy(ids)
        sorting_indices = np.argsort(ids_original, 0)
        ids_sorted = ids_original[sorting_indices]
        ids_sorted = ids_sorted.reshape([len(ids_original), 1])
        combined_list = zip(np.ndarray.tolist(ids.flatten()), corners_original)
        #        print "combined_list:",combined_list
        combined_list.sort()
        corners_sorted = [x for y, x in combined_list]
        ids = np.copy(ids_sorted)
        corners = copy.deepcopy(corners_sorted)

        #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image)
        false_ids_ind = np.where(ids > 73)
        mask = np.ones(ids.shape, dtype=bool)
        mask[false_ids_ind] = False
        ids = ids[mask]
        corners = np.array(corners)
        corners = corners[mask.flatten(), :]
        corners = list(corners)

        #Define object and image points of both tags
        objPoints_ground, imgPoints_ground = aruco.getBoardObjectAndImagePoints(
            board_ground, corners, ids)
        objPoints_panel, imgPoints_panel = aruco.getBoardObjectAndImagePoints(
            board_panel, corners, ids)
        objPoints_ground = objPoints_ground.reshape(
            [objPoints_ground.shape[0], 3])
        imgPoints_ground = imgPoints_ground.reshape(
            [imgPoints_ground.shape[0], 2])
        objPoints_panel = objPoints_panel.reshape(
            [objPoints_panel.shape[0], 3])
        imgPoints_panel = imgPoints_panel.reshape(
            [imgPoints_panel.shape[0], 2])

        #Get pose of both ground and panel markers from detected corners
        retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(
            objPoints_ground, imgPoints_ground, CamParam.camMatrix,
            CamParam.distCoeff)
        Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
        retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(
            objPoints_panel, imgPoints_panel, CamParam.camMatrix,
            CamParam.distCoeff)
        Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)

        frame_with_markers_and_axis = cv2.aruco.drawAxis(
            frame_with_markers_and_axis, CamParam.camMatrix,
            CamParam.distCoeff, Rca_ground, tvec_ground, 0.2)
        frame_with_markers_and_axis = cv2.aruco.drawAxis(
            frame_with_markers_and_axis, CamParam.camMatrix,
            CamParam.distCoeff, Rca_panel, tvec_panel, 0.2)

        rvec_all_markers_ground, tvec_all_markers_ground, _ = aruco.estimatePoseSingleMarkers(
            corners[0:20], 0.04, CamParam.camMatrix, CamParam.distCoeff)
        rvec_all_markers_panel, tvec_all_markers_panel, _ = aruco.estimatePoseSingleMarkers(
            corners[20:45], 0.025, CamParam.camMatrix, CamParam.distCoeff)
        tvec_all = np.concatenate(
            (tvec_all_markers_ground, tvec_all_markers_panel), axis=0)
        for i_ids, i_corners, i_tvec in zip(ids, corners, tvec_all):
            #print 'i_corners',i_corners,i_corners.reshape([1,8])
            UV[i_ids, :] = i_corners.reshape(
                [1, 8])  #np.average(i_corners, axis=1)
            P[i_ids, :] = i_tvec

        #used to find the height of the tags and the delta change of height, z height at desired position
        Z = 1 * P[20:40,
                  2]  #- [0.68184539, 0.68560932, 0.68966803, 0.69619578])

        #check if all tags detected
        if retVal_ground != 0 and retVal_panel != 0:
            dutmp = []
            dvtmp = []

            #Pixel estimates of the ideal ground tag location
            reprojected_imagePoints_ground_2, jacobian2 = cv2.projectPoints(
                loaded_object_points_ground_in_panel_system.transpose(),
                rvec_panel, tvec_panel, CamParam.camMatrix, CamParam.distCoeff)
            reprojected_imagePoints_ground_2 = reprojected_imagePoints_ground_2.reshape(
                [reprojected_imagePoints_ground_2.shape[0], 2])
            #            print "Image Points Ground:", imgPoints_ground
            #            print "Reprojected Image Points Ground2:", reprojected_imagePoints_ground_2
            #            print "Reprojectoin error:",imgPoints_ground-reprojected_imagePoints_ground_2
            #            print "Average Reprojectoin error: ",np.mean(imgPoints_ground-reprojected_imagePoints_ground_2, axis=0)

            #print "Reprojected Image Points Ground2 type:", type(reprojected_imagePoints_ground_2)

            #plot image points for ground tag from corner detection and from re-projections
            for point1, point2 in zip(
                    imgPoints_ground,
                    np.float32(reprojected_imagePoints_ground_2)):
                cv2.circle(frame_with_markers_and_axis, tuple(point1), 5,
                           (0, 0, 255), 3)
                cv2.circle(frame_with_markers_and_axis, tuple(point2), 5,
                           (255, 0, 0), 3)

#            cv2.namedWindow('Aqcuired Image',cv2.WINDOW_NORMAL)
#            cv2.imshow('Acquired Image',frame_with_markers_and_axis)
#            cv2.waitKey(1)
#Save
            filename_image = "/home/rpi-cats/Desktop/DJ/Code/Images/Acquisition_" + str(
                t1) + "_" + str(iteration) + ".jpg"
            scipy.misc.imsave(filename_image, frame_with_markers_and_axis)

            #Go through a particular point in all tags to build the complete Jacobian
            for ic in range(4):
                #uses first set of tags, numbers used to offset camera frame, come from camera parameters
                #UV_target = np.vstack([UV[9:13,2*ic]-1209.151959040735,UV[9:13,2*ic+1]-1055.254852652610]).T - UV_shift[:,(2*ic):(2*ic+2)]		#shift corner estimates to the image centers. Necessary for the image jacobian to work.
                reprojected_imagePoints_ground_2 = np.reshape(
                    reprojected_imagePoints_ground_2, (20, 8))
                UV_target = np.vstack([
                    reprojected_imagePoints_ground_2[:, 2 * ic] -
                    1209.151959040735,
                    reprojected_imagePoints_ground_2[:, 2 * ic + 1] -
                    1055.254852652610
                ]).T
                uc = UV_target[:, 0]
                vc = UV_target[:, 1]

                #                print 'UV_target', UV_target
                UV_current = np.vstack([
                    UV[20:40, 2 * ic] - 1209.151959040735,
                    UV[20:40, 2 * ic + 1] - 1055.254852652610
                ]).T
                #find difference between current and desired tag difference
                delta_UV = UV_target - UV_current
                #                print 'delta_UV: ',ic, delta_UV
                dutmp.append(np.mean(delta_UV[:, 0]))
                dvtmp.append(np.mean(delta_UV[:, 1]))
                for tag_i in range(20):
                    if tag_i == 0:
                        J_cam_tmp = 1.0 * np.array(
                            [[
                                -f_hat_u / Z[tag_i], 0.0, uc[tag_i] / Z[tag_i],
                                uc[tag_i] * vc[tag_i] / f_hat_u, -1.0 *
                                (uc[tag_i] * uc[tag_i] / f_hat_u + f_hat_u),
                                vc[tag_i]
                            ],
                             [
                                 0.0, -f_hat_v / Z[tag_i], vc[tag_i] /
                                 Z[tag_i], vc[tag_i] * vc[tag_i] / f_hat_v +
                                 f_hat_v, -1.0 * uc[tag_i] * vc[tag_i] /
                                 f_hat_v, -uc[tag_i]
                             ]])
                    else:
                        J_cam_tmp = np.concatenate(
                            (J_cam_tmp,
                             1.0 * np.array(
                                 [[
                                     -f_hat_u / Z[tag_i], 0.0,
                                     uc[tag_i] / Z[tag_i],
                                     uc[tag_i] * vc[tag_i] / f_hat_u, -1.0 *
                                     (uc[tag_i] * uc[tag_i] / f_hat_u +
                                      f_hat_u), vc[tag_i]
                                 ],
                                  [
                                      0.0, -f_hat_v / Z[tag_i], vc[tag_i] /
                                      Z[tag_i], vc[tag_i] * vc[tag_i] / f_hat_v
                                      + f_hat_v, -1.0 * uc[tag_i] * vc[tag_i] /
                                      f_hat_v, -uc[tag_i]
                                  ]])),
                            axis=0)
                #camera jacobian
                if ic == 0:
                    J_cam = J_cam_tmp
                    delta_UV_all = delta_UV.reshape(40, 1)
                    UV_target_all = UV_target.reshape(40, 1)
                else:
                    J_cam = np.vstack([J_cam, J_cam_tmp])
                    delta_UV_all = np.vstack(
                        [delta_UV_all, delta_UV.reshape(40, 1)])
                    UV_target_all = np.vstack(
                        [UV_target_all,
                         UV_target.reshape(40, 1)])

            print 'dutmp: ', dutmp
            print 'dvtmp: ', dvtmp
            du = np.mean(np.absolute(dutmp))
            dv = np.mean(np.absolute(dvtmp))
            print 'Average du of all points:', du, 'Average dv of all points:', dv
            du_array.append(du)
            dv_array.append(dv)
            #            print 'delta_UV_all',delta_UV_all
            dx = np.matmul(np.linalg.pinv(J_cam), np.array(delta_UV_all))
            dx_array.append(dx)
            #            print 'dx:',dx             #translational and angular velocity
            #print '1:',dx[0:3,0]
            #print '2:',np.hstack([0,-dx[0:3,0]])
            #print '3:',np.hstack([dx[0:3,0].reshape(3,1),rox.hat(np.array(dx[0:3,0]))])
            #coordinate system change, replace with tf transform
            #dx_w = np.array([dx[3,0],-dx[4,0],-dx[5,0]])        #angular velocity
            #map omega to current quaternion
            #Omega = 0.5*np.vstack([np.hstack([0,-dx_w]),np.hstack([dx_w.reshape(3,1),rox.hat(np.array(dx_w))])])
            #print Omega
            #rot_current = [Cur_Pose.pose.orientation.w, Cur_Pose.pose.orientation.x,Cur_Pose.pose.orientation.y,Cur_Pose.pose.orientation.z]
            #rot = np.matmul(expm(Omega*dt),np.array(rot_current))
            #trans_current = [Cur_Pose.pose.position.x,Cur_Pose.pose.position.y,Cur_Pose.pose.position.z]
            #trans = trans_current + np.array([dx[0,0],-dx[1,0],-dx[2,0]])*TimeGain

            #pose_target2 = rox.Transform(rox.q2R([rot[0], rot[1], rot[2], rot[3]]), trans)
            #Vz=0
            dx = np.array([
                dx[3, 0], -dx[4, 0], -dx[5, 0], dx[0, 0], -dx[1, 0], -dx[2, 0]
            ])

            if stage == 2:
                T = listener.lookupTransform("base", "link_6", rospy.Time(0))
                rg = 9.8 * np.matmul(
                    np.matmul(T.R, Tran_z).transpose(),
                    np.array([0, 0, 1]).transpose())
                A1 = np.hstack([
                    rox.hat(rg).transpose(),
                    np.zeros([3, 1]),
                    np.eye(3),
                    np.zeros([3, 3])
                ])
                A2 = np.hstack([
                    np.zeros([3, 3]),
                    rg.reshape([3, 1]),
                    np.zeros([3, 3]),
                    np.eye(3)
                ])
                A = np.vstack([A1, A2])
                FTdata_est = np.matmul(A, Vec_wrench)
                FTread = FTdata - FTdata_0 - FTdata_est + FTdata_0est
                print 'FTread:', FTread

                print 'Z', FTread[-1]
                if FTread[-1] > -100:
                    F_d = -150
                    Kc = 0.0001
                else:
                    Kc = 0.0001
                    F_d = -200

                Vz = Kc * (F_d - FTread[-1])
                print 'Vz', Vz
                dx[-1] = Vz + dx[-1]
                FTread_array.append(FTread)
                FTdata_array.append(FTdata)

            current_joint_angles = controller_commander.get_current_joint_values(
            )
            J = rox.robotjacobian(robot, current_joint_angles)
            joints_vel = np.linalg.pinv(J).dot(np.array(dx))

            print 'vel_norm:', np.linalg.norm(joints_vel)

            t_duration = np.log(np.linalg.norm(joints_vel) * 10 + 1) + 0.1
            if t_duration < 0.1:
                t_duration = 0.1

            goal = trapezoid_gen(
                np.array(current_joint_angles) + joints_vel.dot(dt),
                np.array(current_joint_angles), t_duration)
            '''
            plan=RobotTrajectory()    
            plan.joint_trajectory.joint_names=['joint_1', 'joint_2', 'joint_3', 'joint_4', 'joint_5', 'joint_6']
        
            plan.joint_trajectory.header.frame_id='/world'
            p1=JointTrajectoryPoint()
            p1.positions = current_joint_angles
            p1.velocities = np.zeros((6,))
            p1.accelerations = np.zeros((6,))
            p1.time_from_start = rospy.Duration(0)     
                 
            p2=JointTrajectoryPoint()
            p2.positions = np.array(p1.positions) + joints_vel.dot(dt)
            p2.velocities = np.zeros((6,))
            p2.accelerations = np.zeros((6,))
            p2.time_from_start = rospy.Duration(dt)

            controller_commander.set_controller_mode(controller_commander.MODE_AUTO_TRAJECTORY, 1.0, [], [])

        
            plan.joint_trajectory.points.append(p1)
            plan.joint_trajectory.points.append(p2)
            '''

            client = actionlib.SimpleActionClient("joint_trajectory_action",
                                                  FollowJointTrajectoryAction)
            client.wait_for_server()

            client.send_goal(goal)
            client.wait_for_result()
            res = client.get_result()
            if (res.error_code != 0):
                raise Exception("Trajectory execution returned error")

            print res

            #break
            #raw_input("confirm move...")
            '''
            print "============ Executing Current Iteration movement"
            try:
                controller_commander.execute(plan)
                #controller_commander.compute_cartesian_path_and_move(pose_target2, avoid_collisions=False)
            except:
                pass
            '''

            print 'Current Iteration Finished.'


#            filename = "delta_UV_all.txt"
#            f_handle = file(filename, 'a')
#            np.savetxt(f_handle, delta_UV_all)
#            f_handle.close()
#
#            filename = "UV_target_all.txt"
#            f_handle = file(filename, 'a')
#            np.savetxt(f_handle, UV_target_all)
#            f_handle.close()
#
#            filename = "P.txt"
#            f_handle = file(filename, 'a')
#            np.savetxt(f_handle, P)
#            f_handle.close()
#
#
#            filename = "Robot.txt"
#            f_handle = file(filename, 'a')
#            np.savetxt(f_handle, np.hstack([np.array(trans_current),np.array(rot_current)]))
#            f_handle.close()

#Saving iteration data to file
    filename_data = "/home/rpi-cats/Desktop/DJ/Code/Data/Data_" + str(
        t1) + ".mat"
    scipy.io.savemat(filename_data,
                     mdict={
                         'du_array': du_array,
                         'dv_array': dv_array,
                         'dx_array': dx_array
                     })

    #Saving force data to file
    filename_force_data = "/home/rpi-cats/Desktop/DJ/Code/Data/Data_force_" + str(
        t1) + ".mat"
    scipy.io.savemat(filename_force_data,
                     mdict={
                         'FTread': FTread_array,
                         'FTdata': FTdata_array,
                         't_now_array': t_now_array
                     })

    #    ####Hovering error calculation
    #    #Read new image
    #    last_ros_image_stamp = object_commander.ros_image_stamp
    #    try:
    #        ros_gripper_2_trigger.wait_for_service(timeout=0.1)
    #        ros_gripper_2_trigger(False)
    #    except:
    #        pass
    #    wait_count=0
    #    while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp:
    #        if wait_count > 50:
    #            raise Exception("Image receive timeout")
    #        time.sleep(0.25)
    #        wait_count += 1
    #    result = object_commander.ros_image
    #
    #    #Detect tag corners in aqcuired image using aruco
    #    corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(result, aruco_dict, parameters=parameters)
    #
    #    #Sort corners and ids according to ascending order of ids
    #    corners_original=copy.deepcopy(corners)
    #    ids_original=np.copy(ids)
    #    sorting_indices=np.argsort(ids_original,0)
    #    ids_sorted=ids_original[sorting_indices]
    #    ids_sorted=ids_sorted.reshape([len(ids_original),1])
    #    combined_list=zip(np.ndarray.tolist(ids.flatten()),corners_original)
    #    combined_list.sort()
    #    corners_sorted=[x for y,x in combined_list]
    #    ids=np.copy(ids_sorted)
    #    corners=copy.deepcopy(corners_sorted)
    #
    #    #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image)
    #    false_ids_ind = np.where(ids>73)
    #    mask = np.ones(ids.shape, dtype=bool)
    #    mask[false_ids_ind] = False
    #    ids = ids[mask]
    #    corners = np.array(corners)
    #    corners = corners[mask.flatten(),:]
    #    corners = list(corners)
    #
    #    #Define object and image points of both tags
    #    objPoints_ground, imgPoints_ground	=	aruco.getBoardObjectAndImagePoints(board_ground, corners, ids)
    #    objPoints_panel, imgPoints_panel	=	aruco.getBoardObjectAndImagePoints(board_panel, corners, ids)
    #    objPoints_ground=objPoints_ground.reshape([objPoints_ground.shape[0],3])
    #    imgPoints_ground=imgPoints_ground.reshape([imgPoints_ground.shape[0],2])
    #    objPoints_panel=objPoints_panel.reshape([objPoints_panel.shape[0],3])
    #    imgPoints_panel=imgPoints_panel.reshape([imgPoints_panel.shape[0],2])
    #
    #    #Save pose of marker boards after the iterations end(while in the final hovering position above nest)
    #    #Get pose of both ground and panel markers from detected corners
    #    retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(objPoints_ground, imgPoints_ground, CamParam.camMatrix, CamParam.distCoeff)
    #    Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
    #    retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(objPoints_panel, imgPoints_panel, CamParam.camMatrix, CamParam.distCoeff)
    #    Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)
    #
    #    print"============== Observed Pose difference in hovering position"
    #    observed_tvec_difference=tvec_ground-tvec_panel
    #    observed_rvec_difference=rvec_ground-rvec_panel
    #    print "tvec difference: ",observed_tvec_difference
    #    print "rvec differnece: ",observed_rvec_difference
    #
    #    #Load ideal pose differnece information from file
    #    loaded_rvec_difference = loadmat('/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat')['rvec_difference']
    #    loaded_tvec_difference = loadmat('/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat')['tvec_difference']
    #
    #    print"============== Ideal Pose difference in hovering position"
    #    print "tvec difference: ",loaded_tvec_difference
    #    print "rvec differnece: ",loaded_rvec_difference
    #
    #    print"============== Difference in pose difference in hovering position"
    #    print "tvec difference: ",loaded_tvec_difference-observed_tvec_difference
    #    print "rvec differnece: ",loaded_rvec_difference-observed_rvec_difference
    #
    #    #Saving pose information to file
    #    filename_pose1 = "/home/armabb6640/Desktop/DJ/Code/Data/Above_Nest_Pose_"+str(t1)+".mat"
    #    scipy.io.savemat(filename_pose1, mdict={'tvec_ground_Above_Nest':tvec_ground, 'tvec_panel_Above_Nest':tvec_panel,
    #    'Rca_ground_Above_Nest':Rca_ground, 'Rca_panel_Above_Nest': Rca_panel, 'tvec_difference_Above_Nest': tvec_ground-tvec_panel, 'rvec_difference_Above_Nest': rvec_ground-rvec_panel})

    print "============ Final Push Down to Nest"
    # while (1):
    #DJ Final push from hovering above nest into resting in the nest
    controller_commander.set_controller_mode(
        controller_commander.MODE_AUTO_TRAJECTORY, 0.2, [], [])
    #    pose_target2.p[2] = 0.115
    #    pose_target2.p[2] = 0.15

    Cur_Pose = controller_commander.get_current_pose_msg()
    rot_current = [
        Cur_Pose.pose.orientation.w, Cur_Pose.pose.orientation.x,
        Cur_Pose.pose.orientation.y, Cur_Pose.pose.orientation.z
    ]
    trans_current = [
        Cur_Pose.pose.position.x, Cur_Pose.pose.position.y,
        Cur_Pose.pose.position.z
    ]
    pose_target2.R = rox.q2R(
        [rot_current[0], rot_current[1], rot_current[2], rot_current[3]])
    pose_target2.p = trans_current
    pose_target2.p[2] -= 0.11
    #    pose_target2.p[0] += 0.005
    #    pose_target2.p[1] -= 0.002

    #    controller_commander.compute_cartesian_path_and_move(pose_target2, avoid_collisions=False)

    #### Final Nest Placement Error Calculation ===============================
    #Read new image
    last_ros_image_stamp = object_commander.ros_image_stamp
    try:
        ros_gripper_2_trigger.wait_for_service(timeout=0.1)
        ros_gripper_2_trigger(False)
    except:
        pass
    wait_count = 0
    while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp:
        if wait_count > 50:
            raise Exception("Image receive timeout")
        time.sleep(0.25)
        wait_count += 1
    result = object_commander.ros_image

    #Detect tag corners in aqcuired image using aruco
    corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
        result, aruco_dict, parameters=parameters)

    #Sort corners and ids according to ascending order of ids
    corners_original = copy.deepcopy(corners)
    ids_original = np.copy(ids)
    sorting_indices = np.argsort(ids_original, 0)
    ids_sorted = ids_original[sorting_indices]
    ids_sorted = ids_sorted.reshape([len(ids_original), 1])
    combined_list = zip(np.ndarray.tolist(ids.flatten()), corners_original)
    combined_list.sort()
    corners_sorted = [x for y, x in combined_list]
    ids = np.copy(ids_sorted)
    corners = copy.deepcopy(corners_sorted)

    #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image)
    false_ids_ind = np.where(ids > 73)
    mask = np.ones(ids.shape, dtype=bool)
    mask[false_ids_ind] = False
    ids = ids[mask]
    corners = np.array(corners)
    corners = corners[mask.flatten(), :]
    corners = list(corners)

    #Define object and image points of both tags
    objPoints_ground, imgPoints_ground = aruco.getBoardObjectAndImagePoints(
        board_ground, corners, ids)
    objPoints_panel, imgPoints_panel = aruco.getBoardObjectAndImagePoints(
        board_panel, corners, ids)
    objPoints_ground = objPoints_ground.reshape([objPoints_ground.shape[0], 3])
    imgPoints_ground = imgPoints_ground.reshape([imgPoints_ground.shape[0], 2])
    objPoints_panel = objPoints_panel.reshape([objPoints_panel.shape[0], 3])
    imgPoints_panel = imgPoints_panel.reshape([imgPoints_panel.shape[0], 2])

    #Save pose of marker boards after the iterations end(while in the final hovering position above nest)
    #Get pose of both ground and panel markers from detected corners
    retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(
        objPoints_ground, imgPoints_ground, CamParam.camMatrix,
        CamParam.distCoeff)
    Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
    retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(
        objPoints_panel, imgPoints_panel, CamParam.camMatrix,
        CamParam.distCoeff)
    Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)

    print "============== Observed Pose difference in nest position"
    observed_tvec_difference = tvec_ground - tvec_panel
    observed_rvec_difference = rvec_ground - rvec_panel
    print "tvec difference: ", observed_tvec_difference
    print "rvec differnece: ", observed_rvec_difference

    #Load ideal pose differnece information from file
    loaded_rvec_difference = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['rvec_difference']
    loaded_tvec_difference = loadmat(
        '/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['tvec_difference']

    print "============== Ideal Pose difference in nest position"
    print "tvec difference: ", loaded_tvec_difference
    print "rvec differnece: ", loaded_rvec_difference

    print "============== Difference in pose difference in nest position"
    print "tvec difference: ", loaded_tvec_difference - observed_tvec_difference
    print "rvec differnece: ", loaded_rvec_difference - observed_rvec_difference

    #Saving pose information to file
    filename_pose2 = "/home/rpi-cats/Desktop/DJ/Code/Data/In_Nest_Pose_" + str(
        t1) + ".mat"
    scipy.io.savemat(filename_pose2,
                     mdict={
                         'tvec_ground_In_Nest': tvec_ground,
                         'tvec_panel_In_Nest': tvec_panel,
                         'Rca_ground_In_Nest': Rca_ground,
                         'Rca_panel_In_Nest': Rca_panel,
                         'tvec_difference_In_Nest': tvec_ground - tvec_panel,
                         'rvec_difference_In_Nest': rvec_ground - rvec_panel,
                         'loaded_tvec_difference': loaded_tvec_difference,
                         'loaded_rvec_difference': loaded_rvec_difference,
                         'observed_tvec_difference': observed_tvec_difference,
                         'observed_rvec_difference': observed_rvec_difference
                     })

    #    print "============ Lift gripper!"
    #    controller_commander.set_controller_mode(controller_commander.MODE_AUTO_TRAJECTORY, 0.7, [])
    #    raw_input("confirm release vacuum")
    #    #rapid_node.set_digital_io("Vacuum_enable", 0)
    #    print "VACUUM OFF!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
    #    time.sleep(0.5)
    #    pose_target3.p[2] += 0.2
    #
    #
    #    #print 'Target:',pose_target3
    #
    #    #print "============ Executing plan4"
    #    #controller_commander.compute_cartesian_path_and_move(pose_target3, avoid_collisions=False)

    t2 = time.time()
    print 'Execution Finished.'
    print "Execution time: " + str(t2 - t1) + " seconds"
예제 #7
0
def main():
    t1 = time.time()
    rospy.init_node('Placement_DJ_1', anonymous=True)
    image_pub = rospy.Publisher('/gripper_camera_1/image',
                                Image,
                                queue_size=10)
    object_commander = ObjectRecognitionCommander()
    ros_gripper_2_img_sub = rospy.Subscriber(
        '/gripper_camera_2/image', Image,
        object_commander.ros_raw_gripper_2_image_cb)
    ros_gripper_2_trigger = rospy.ServiceProxy(
        '/gripper_camera_2/continuous_trigger', CameraTrigger)
    aruco_dict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_ARUCO_ORIGINAL)
    parameters = cv2.aruco.DetectorParameters_create()
    parameters.cornerRefinementMethod = cv2.aruco.CORNER_REFINE_SUBPIX
    parameters.adaptiveThreshWinSizeMax = 30
    parameters.adaptiveThreshWinSizeStep = 7
    last_ros_image_stamp = rospy.Time(0)
    placing_panel_2 = False

    board_ground = cv2.aruco.GridBoard_create(4, 4, .04, .0075, aruco_dict, 32)
    board_panel = cv2.aruco.GridBoard_create(8, 3, .025, .0075, aruco_dict, 80)
    board_mid_panel_2 = cv2.aruco.GridBoard_create(4, 4, .04, .0075,
                                                   aruco_dict, 16)
    board_tip_panel = cv2.aruco.GridBoard_create(8, 3, .025, .0075, aruco_dict,
                                                 50)
    CamParam = CameraParams(2283.391289766133, 1192.255485086403,
                            2279.484382094687, 1021.399092147012, 1.0,
                            -0.022101211408596, -0.095163053709332,
                            -0.003986991791212, -0.003479613658352,
                            0.179926705467534)
    UV = np.zeros([150, 8])
    P = np.zeros([150, 3])
    #loaded_object_points_ground_in_panel_system_stage_1 = loadmat('/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat') ['object_points_ground_in_panel_tag_system']
    loaded_object_points_ground_in_panel_system_stage_2 = loadmat(
        '/home/rpi-cats/Desktop/YC/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['object_points_ground_in_panel_tag_system']
    loaded_rvec_difference = loadmat(
        '/home/rpi-cats/Desktop/YC/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['rvec_difference']
    loaded_tvec_difference = loadmat(
        '/home/rpi-cats/Desktop/YC/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
    )['tvec_difference']
    loaded_object_points_ground_in_panel_system = loaded_object_points_ground_in_panel_system_stage_2
    f_hat_u = 2283.391289766133  #2342.561249990927#2282.523358266698#2281.339593273153 #2446.88
    f_hat_v = 2279.484382094687  #2338.448312671424#2280.155828279608
    du_array = []
    dv_array = []
    dx_array = []
    cv2.namedWindow('Image', cv2.WINDOW_NORMAL)
    cv2.resizeWindow('Image', 2000, 2000)
    last_ros_image_stamp = object_commander.ros_image_stamp
    try:
        ros_gripper_2_trigger.wait_for_service(timeout=0.1)
        ros_gripper_2_trigger(False)
        print "triggered"
    except:
        pass

    wait_count = 0
    while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp:
        if wait_count > 50:
            raise Exception("Image receive timeout")
        time.sleep(0.25)
        wait_count += 1
    result = object_commander.ros_image
    corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
        result, aruco_dict, parameters=parameters)
    if (20 in ids):
        print "Now placing panel 2"
        placing_panel_2 = True
        board_ground = board_mid_panel_2
        board_panel = board_tip_panel
        loaded_object_points_ground_in_panel_system_stage_1 = loadmat(
            '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel2_Cam_636_object_points_ground_tag_in_panel_frame_Offset_In_Nest.mat'
        )['object_points_ground_in_panel_tag_system']
        loaded_object_points_ground_in_panel_system_stage_2 = loadmat(
            '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel2_Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
        )['object_points_ground_in_panel_tag_system']
        loaded_rvec_difference = loadmat(
            '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel2_Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
        )['rvec_difference']
        loaded_tvec_difference = loadmat(
            '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel2_Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat'
        )['tvec_difference']

        loaded_object_points_ground_in_panel_system = loaded_object_points_ground_in_panel_system_stage_2

    while (True):

        last_ros_image_stamp = object_commander.ros_image_stamp
        try:
            ros_gripper_2_trigger.wait_for_service(timeout=0.1)
            ros_gripper_2_trigger(False)
            print "triggered"
        except:
            pass

        wait_count = 0
        while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp:
            if wait_count > 50:
                raise Exception("Image receive timeout")
            time.sleep(0.25)
            wait_count += 1
        result = object_commander.ros_image
        corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
            result, aruco_dict, parameters=parameters)

        frame_with_markers_and_axis = result
        corners_original = copy.deepcopy(corners)
        ids_original = np.copy(ids)
        sorting_indices = np.argsort(ids_original, 0)
        ids_sorted = ids_original[sorting_indices]
        ids_sorted = ids_sorted.reshape([len(ids_original), 1])
        combined_list = zip(np.ndarray.tolist(ids.flatten()), corners_original)
        #        print "combined_list:",combined_list
        combined_list.sort()
        corners_sorted = [x for y, x in combined_list]
        ids = np.copy(ids_sorted)
        corners = copy.deepcopy(corners_sorted)

        #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image)
        false_ids_ind = np.where(ids > 150)
        mask = np.ones(ids.shape, dtype=bool)
        mask[false_ids_ind] = False
        ids = ids[mask]
        corners = np.array(corners)
        corners = corners[mask.flatten(), :]
        corners = list(corners)
        #print "sorted ids: ",ids
        #print "sorted corners: ",corners

        objPoints_ground, imgPoints_ground = aruco.getBoardObjectAndImagePoints(
            board_ground, corners, ids)
        objPoints_panel, imgPoints_panel = aruco.getBoardObjectAndImagePoints(
            board_panel, corners, ids)
        objPoints_ground = objPoints_ground.reshape(
            [objPoints_ground.shape[0], 3])
        imgPoints_ground = imgPoints_ground.reshape(
            [imgPoints_ground.shape[0], 2])
        objPoints_panel = objPoints_panel.reshape(
            [objPoints_panel.shape[0], 3])
        imgPoints_panel = imgPoints_panel.reshape(
            [imgPoints_panel.shape[0], 2])
        #objPoints_ground, imgPoints_ground	=	aruco.getBoardObjectAndImagePoints(board_ground, corners, ids)

        retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(
            objPoints_ground, imgPoints_ground, CamParam.camMatrix,
            CamParam.distCoeff)
        Rca_ground, b_ground = cv2.Rodrigues(rvec_ground)
        retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(
            objPoints_panel, imgPoints_panel, CamParam.camMatrix,
            CamParam.distCoeff)
        Rca_panel, b_panel = cv2.Rodrigues(rvec_panel)
        frame_with_markers_and_axis = cv2.aruco.drawAxis(
            frame_with_markers_and_axis, CamParam.camMatrix,
            CamParam.distCoeff, Rca_ground, tvec_ground, 0.2)
        frame_with_markers_and_axis = cv2.aruco.drawAxis(
            frame_with_markers_and_axis, CamParam.camMatrix,
            CamParam.distCoeff, Rca_panel, tvec_panel, 0.2)
        rvec_all_markers_ground, tvec_all_markers_ground, _ = aruco.estimatePoseSingleMarkers(
            corners[0:20], 0.04, CamParam.camMatrix, CamParam.distCoeff)
        rvec_all_markers_panel, tvec_all_markers_panel, _ = aruco.estimatePoseSingleMarkers(
            corners[20:45], 0.025, CamParam.camMatrix, CamParam.distCoeff)
        tvec_all = np.concatenate(
            (tvec_all_markers_ground, tvec_all_markers_panel), axis=0)

        for i_ids, i_corners, i_tvec in zip(ids, corners, tvec_all):
            #print 'i_corners',i_corners,i_corners.reshape([1,8])
            UV[i_ids, :] = i_corners.reshape(
                [1, 8])  #np.average(i_corners, axis=1)
            P[i_ids, :] = i_tvec

        #used to find the height of the tags and the delta change of height, z height at desired position
        '''
        Z = 1*P[32:48,2] #- [0.68184539, 0.68560932, 0.68966803, 0.69619578])
        if(placing_panel_2):
            Z = 1*P[16:32,2]
        '''
        #check if all tags detected
        if retVal_ground != 0 and retVal_panel != 0:
            dutmp = []
            dvtmp = []

            #Pixel estimates of the ideal ground tag location
            reprojected_imagePoints_ground_2, jacobian2 = cv2.projectPoints(
                loaded_object_points_ground_in_panel_system.transpose(),
                rvec_panel, tvec_panel, CamParam.camMatrix, CamParam.distCoeff)
            reprojected_imagePoints_ground_2 = reprojected_imagePoints_ground_2.reshape(
                [reprojected_imagePoints_ground_2.shape[0], 2])

            #plot image points for ground tag from corner detection and from re-projections
            for point1, point2 in zip(
                    imgPoints_ground,
                    np.float32(reprojected_imagePoints_ground_2)):
                cv2.circle(frame_with_markers_and_axis, tuple(point1), 5,
                           (0, 0, 255), 3)
                cv2.circle(frame_with_markers_and_axis, tuple(point2), 5,
                           (255, 0, 0), 3)

            #print 'Test:', imgPoints_ground-np.float32(reprojected_imagePoints_ground_2)
            tmp = imgPoints_ground - np.float32(
                reprojected_imagePoints_ground_2)

            cv2.imshow('Image', frame_with_markers_and_axis)
            cv2.waitKey(1)
            image_msg = object_commander.bridge.cv2_to_imgmsg(
                frame_with_markers_and_axis, "mono8")
            image_msg.header.stamp = rospy.Time.now()
            image_pub.publish(image_msg)
            '''                
            for ic in range(4):
                #uses first set of tags, numbers used to offset camera frame, come from camera parameters
                #UV_target = np.vstack([UV[9:13,2*ic]-1209.151959040735,UV[9:13,2*ic+1]-1055.254852652610]).T - UV_shift[:,(2*ic):(2*ic+2)]		#shift corner estimates to the image centers. Necessary for the image jacobian to work.
                reprojected_imagePoints_ground_2 = np.reshape(reprojected_imagePoints_ground_2,(16,8))
                UV_target = np.vstack([reprojected_imagePoints_ground_2[:,2*ic]-1192.255485086403,reprojected_imagePoints_ground_2[:,2*ic+1]-1021.399092147012]).T
                uc = UV_target[:,0]
                vc = UV_target[:,1]
    		    
#                print 'UV_target', UV_target
                UV_current = np.vstack([UV[16:32,2*ic]-1192.255485086403,UV[16:32,2*ic+1]-1021.399092147012]).T
                #find difference between current and desired tag difference
                delta_UV = UV_target-UV_current
#                print 'delta_UV: ',ic, delta_UV
                dutmp.append(np.mean(delta_UV[:,0]))
                dvtmp.append(np.mean(delta_UV[:,1]))


                for tag_i in range(16):
                    if tag_i == 0:
                        J_cam_tmp = 1.0*np.array([[-f_hat_u/Z[tag_i], 0.0, uc[tag_i]/Z[tag_i], uc[tag_i]*vc[tag_i]/f_hat_u, -1.0*(uc[tag_i]*uc[tag_i]/f_hat_u + f_hat_u), vc[tag_i]],
                                                   [0.0, -f_hat_v/Z[tag_i], vc[tag_i]/Z[tag_i], vc[tag_i]*vc[tag_i]/f_hat_v+f_hat_v, -1.0*uc[tag_i]*vc[tag_i]/f_hat_v, -uc[tag_i]]])
                    else:
                        J_cam_tmp= np.concatenate((J_cam_tmp, 1.0*np.array([[-f_hat_u/Z[tag_i], 0.0, uc[tag_i]/Z[tag_i], uc[tag_i]*vc[tag_i]/f_hat_u, -1.0*(uc[tag_i]*uc[tag_i]/f_hat_u + f_hat_u), vc[tag_i]],
                                                   [0.0, -f_hat_v/Z[tag_i], vc[tag_i]/Z[tag_i], vc[tag_i]*vc[tag_i]/f_hat_v+f_hat_v, -1.0*uc[tag_i]*vc[tag_i]/f_hat_v, -uc[tag_i]]])), axis=0)
                #camera jacobian
                if ic ==0:
                    J_cam = J_cam_tmp
                    delta_UV_all = delta_UV.reshape(32,1)
                    UV_target_all = UV_target.reshape(32,1)
                else:
                    J_cam = np.vstack([J_cam,J_cam_tmp])
                    delta_UV_all = np.vstack([delta_UV_all,delta_UV.reshape(32,1)]) 
                    UV_target_all = np.vstack([UV_target_all,UV_target.reshape(32,1)])
            '''

            #tmp = imgPoints_ground-np.float32(reprojected_imagePoints_ground_2)
            dutmp = tmp[:, 0]
            dvtmp = tmp[:, 1]
            print 'dutmp: ', dutmp
            print 'dvtmp: ', dvtmp
            du = np.mean(np.absolute(dutmp))
            dv = np.mean(np.absolute(dvtmp))
            print 'Average du of all points:', du, 'Average dv of all points:', dv

        #print"============== Observed Pose difference in nest position"
        observed_tvec_difference = tvec_ground - tvec_panel
        observed_rvec_difference = rvec_ground - rvec_panel
        #print "tvec difference: ",observed_tvec_difference
        #print "rvec differnece: ",observed_rvec_difference

        #Load ideal pose differnece information from file

        #print"============== Ideal Pose difference in nest position"
        #print "tvec difference: ",loaded_tvec_difference
        #print "rvec differnece: ",loaded_rvec_difference

        print "============== Difference in pose difference in nest position"
        tvec_err = loaded_tvec_difference - observed_tvec_difference
        rvec_err = loaded_rvec_difference - observed_rvec_difference
        print "tvec difference: ", tvec_err
        print "rvec differnece: ", rvec_err

        filename_pose2 = "/home/rpi-cats/KPPplacementdata/Panel2_In_Nest_Pose_" + str(
            t1) + ".mat"
        scipy.io.savemat(filename_pose2,
                         mdict={
                             'tvec_ground_In_Nest': tvec_ground,
                             'tvec_panel_In_Nest': tvec_panel,
                             'Rca_ground_In_Nest': Rca_ground,
                             'Rca_panel_In_Nest': Rca_panel,
                             'tvec_difference_In_Nest':
                             tvec_ground - tvec_panel,
                             'rvec_difference_In_Nest':
                             rvec_ground - rvec_panel,
                             'loaded_tvec_difference': loaded_tvec_difference,
                             'loaded_rvec_difference': loaded_rvec_difference,
                             'observed_tvec_difference':
                             observed_tvec_difference,
                             'observed_rvec_difference':
                             observed_rvec_difference,
                             'du': du,
                             'dv': dv
                         })
예제 #8
0
def main():
    global dictionary
    parser = argparse.ArgumentParser(description='Process some integers.')
    parser.add_argument('inputs', nargs="+")
    parser.add_argument('--output')
    parser.add_argument('--rows', "-r", type=int, default=11)
    parser.add_argument('--cols', "-c", type=int, default=8)
    parser.add_argument('--markersize', "-m", type=float, default=10)
    parser.add_argument('--squaresize', "-s", type=float, default=20)
    parser.add_argument('--dictionary', "-d", default="DICT_5X5_250")
    args = parser.parse_args()

    dic = getattr(cv2.aruco, args.dictionary)
    aruco_dict = cv2.aruco.getPredefinedDictionary(dic)
    board = aruco.CharucoBoard_create(args.cols, args.rows, args.squaresize,
                                      args.markersize, aruco_dict)

    images = args.inputs
    allCorners, allIds, imsize, allImages = read_chessboards(
        images, aruco_dict, board)
    print(allCorners)
    print(allIds)
    print(imsize)
    ret, mtx, dist, rvecs, tvecs = calibrate_camera(allCorners, allIds, imsize,
                                                    board)
    print("result", ret, mtx, dist, rvecs, tvecs)
    if args.output:
        objPoints = []
        imgPoints = []
        for i in range(0, len(allIds)):
            op, ip = aruco.getBoardObjectAndImagePoints(
                board, allCorners[i], allIds[i])
            objPoints.append(op)
            imgPoints.append(ip)
        print("mtx cam", mtx.shape, mtx.dtype)
        o = dict(result=ret,
                 mtx=mtx,
                 dist=dist,
                 markers=dict(vecs=rvecs,
                              tvecs=tvecs,
                              objpts=objPoints,
                              imgpts=imgPoints,
                              corners=allCorners,
                              ids=allIds,
                              images=allImages),
                 imsize=imsize)
        json.dump(o,
                  open(args.output, "w"),
                  indent=4,
                  sort_keys=True,
                  cls=NumpyEncoder)
    i = 3  # select image id
    plt.figure()
    frame = cv2.imread(images[min(i, len(images) - 1)])
    img_undist = cv2.undistort(frame, mtx, dist, None)
    plt.subplot(1, 2, 1)
    plt.imshow(frame)
    plt.title("Raw image")
    plt.axis("off")
    plt.subplot(1, 2, 2)
    plt.imshow(img_undist)
    plt.title("Corrected image")
    plt.axis("off")
    plt.show()
    print("result", ret)
    if False:
        for f in args.inputs:

            img = cv2.imread(f, cv2.IMREAD_GRAYSCALE)
            [markerCorners, markerIds,
             rejectedImgPoints] = cv2.aruco.detectMarkers(img, dictionary)

            if len(markerCorners) > 0:
                [ret, charucoCorners,
                 charucoIds] = cv2.aruco.interpolateCornersCharuco(
                     markerCorners, markerIds, img, board)
                if charucoCorners is not None and charucoIds is not None and len(
                        charucoCorners) > 3 and decimator % 3 == 0:
                    allCorners.append(charucoCorners)
                    allIds.append(charucoIds)

                cv2.aruco.drawDetectedMarkers(img, markerCorners, markerIds)
                cv2.aruco.drawDetectedCornersCharuco(img, charucoCorners,
                                                     charucoIds)