def calibrate(points2D=None, points3D=None):
    # Load corresponding points
    folder = os.path.join(PKG_PATH, CALIB_PATH)
    if points2D is None: points2D = np.load(os.path.join(folder, 'img_corners.npy'))
    if points3D is None: points3D = np.load(os.path.join(folder, 'pcl_corners.npy'))
    
    # Check points shape
    assert(points2D.shape[0] == points3D.shape[0])
    if not (points2D.shape[0] >= 5):
        rospy.logwarn('PnP RANSAC Requires minimum 5 points')
        return

    # Obtain camera matrix and distortion coefficients
    camera_matrix = CAMERA_MODEL.intrinsicMatrix()
    dist_coeffs = CAMERA_MODEL.distortionCoeffs()

    # Estimate extrinsics
    success, rotation_vector, translation_vector, inliers = cv2.solvePnPRansac(points3D, 
        points2D, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE)
    
    # Compute re-projection error.
    points2D_reproj = cv2.projectPoints(points3D, rotation_vector,
        translation_vector, camera_matrix, dist_coeffs)[0].squeeze(1)
    assert(points2D_reproj.shape == points2D.shape)
    error = (points2D_reproj - points2D)[inliers]  # Compute error only over inliers.
    #rmse = np.sqrt(np.mean(error[:, 0] ** 2 + error[:, 1] ** 2))
    rospy.loginfo('Error: ' + str(error))

    # Refine estimate using LM
    if not success:
        rospy.logwarn('Initial estimation unsuccessful, skipping refinement')
    elif not hasattr(cv2, 'solvePnPRefineLM'):
        rospy.logwarn('solvePnPRefineLM requires OpenCV >= 4.1.1, skipping refinement')
    else:
        assert len(inliers) >= 3, 'LM refinement requires at least 3 inlier points'
        rotation_vector, translation_vector = cv2.solvePnPRefineLM(points3D[inliers],
            points2D[inliers], camera_matrix, dist_coeffs, rotation_vector, translation_vector)

        # Compute re-projection error.
        points2D_reproj = cv2.projectPoints(points3D, rotation_vector,
            translation_vector, camera_matrix, dist_coeffs)[0].squeeze(1)
        assert(points2D_reproj.shape == points2D.shape)
        error = (points2D_reproj - points2D)[inliers]  # Compute error only over inliers.
        #rmse = np.sqrt(np.mean(error[:, 0] ** 2 + error[:, 1] ** 2))
        rospy.loginfo('Error: ' + str(error))

    # Convert rotation vector
    rotation_matrix = cv2.Rodrigues(rotation_vector)[0]
    euler = euler_from_matrix(rotation_matrix)
    
    # Save extrinsics
    np.savez(os.path.join(folder, 'extrinsics.npz'),
        euler=euler, R=rotation_matrix, T=translation_vector.T)

    # Display results
    print('Euler angles (RPY):', euler)
    print('Rotation Matrix:', rotation_matrix)
    print('Translation Offsets:', translation_vector.T)
Esempio n. 2
0
def calibrate():
    '''
    Calibrate the LiDAR and image points using OpenCV PnP RANSAC, Requires minimum 5 point correspondences
    Inputs:
        points2D - [numpy array] - (N, 2) array of image points
        points3D - [numpy array] - (N, 3) array of 3D points
    Outputs:
        Extrinsics saved in PKG_PATH/CALIB_PATH/extrinsics.npz
    '''
    # Load corresponding points
    points2D = np.load(
        '/home/eugen/catkin_ws/src/Camera_Lidar/scripts/img_corners.npy')
    points3D = np.load(
        '/home/eugen/catkin_ws/src/Camera_Lidar/scripts/pcl_corners.npy')
    print('points2D:{},  points3D:{}'.format(np.shape(points2D),
                                             np.shape(points3D)))

    if not (points2D.shape[0] >= 5):
        print('PnP RANSAC Requires minimum 5 points')
        return

    # Obtain camera matrix and distortion coefficients
    K = np.array([[484.130454, 0.000000, 457.177461],
                  [0.000000, 484.452449, 364.861413],
                  [0.000000, 0.000000, 1.000000]])
    d = np.array([-0.199619, 0.068964, 0.003371, 0.000296, 0.000000])

    # Estimate extrinsics
    success, rotation_vector, t, _ = cv2.solvePnPRansac(
        points3D, points2D, K, d, flags=cv2.SOLVEPNP_ITERATIVE)
    print('success ', success)
    # Refine estimate using LM
    if not success:
        print('Initial estimation unsuccessful, skipping refinement')
    elif not hasattr(cv2, 'solvePnPRefineLM'):
        print('solvePnPRefineLM requires OpenCV >= 4.1.1, skipping refinement')
    else:
        print('Here')
        rotation_vector, t = cv2.solvePnPRefineLM(points3D, points2D, K, d,
                                                  rotation_vector, t)

    # Convert rotation vector
    R = cv2.Rodrigues(rotation_vector)[0]
    euler = None  #euler_from_matrix(R)

    # Save extrinsics
    np.savez(os.path.join('/home/eugen/catkin_ws/src/Camera_Lidar/scripts',
                          'extrinsics.npz'),
             euler=euler,
             R=R,
             T=t.T)

    # Display results
    print('Euler angles (RPY):', euler)
    print('Rotation Matrix:', R)
    print('Translation Offsets:', t.T)
Esempio n. 3
0
def calibrate(points2D=None, points3D=None):
    # Load corresponding points
    folder = os.path.join(PKG_PATH, CALIB_PATH)
    if points2D is None:
        points2D = np.load(os.path.join(folder, 'img_corners.npy'))
    if points3D is None:
        points3D = np.load(os.path.join(folder, 'pcl_corners.npy'))

    # Check points shape
    assert (points2D.shape[0] == points3D.shape[0])
    if not (points2D.shape[0] >= 5):
        rospy.logwarn('PnP RANSAC Requires minimum 5 points')
        return

    # Obtain camera matrix and distortion coefficients
    camera_matrix = CAMERA_MODEL.intrinsicMatrix()
    dist_coeffs = CAMERA_MODEL.distortionCoeffs()

    # Estimate extrinsics
    success, rotation_vector, translation_vector, _ = cv2.solvePnPRansac(
        points3D,
        points2D,
        camera_matrix,
        dist_coeffs,
        flags=cv2.SOLVEPNP_ITERATIVE)

    # Refine estimate using LM
    if not success:
        rospy.logwarn('Initial estimation unsuccessful, skipping refinement')
    elif not hasattr(cv2, 'solvePnPRefineLM'):
        rospy.logwarn(
            'solvePnPRefineLM requires OpenCV >= 4.1.1, skipping refinement')
    else:
        rotation_vector, translation_vector = cv2.solvePnPRefineLM(
            points3D, points2D, camera_matrix, dist_coeffs, rotation_vector,
            translation_vector)

    # Convert rotation vector
    rotation_matrix = cv2.Rodrigues(rotation_vector)[0]
    euler = euler_from_matrix(rotation_matrix)

    # Save extrinsics
    np.savez(os.path.join(folder, 'extrinsics.npz'),
             euler=euler,
             R=rotation_matrix,
             T=translation_vector.T)

    # Display results
    print('Euler angles (RPY):', euler)
    print('Rotation Matrix:', rotation_matrix)
    print('Translation Offsets:', translation_vector.T)
    def calib(self, imgPath, cameraCalibPath):
        # imgFileList = readFileList(imgFolder)

        data = np.load(cameraCalibPath)
        camMtx = data["mtx"]
        dist = data["dist"]

        objP_pixel = np.ceil(self.readCheckerObjPoint(self.checker_file))
        objP_pixel[:, 2] = 0
        objP = np.array(objP_pixel)
        for i in range(self.checker_pattern[1]):
            for j in range(math.floor(self.checker_pattern[0] / 2)):
                tmp = objP[self.checker_pattern[0] * i + j, 0]
                objP[self.checker_pattern[0] * i + j,
                     0] = objP[self.checker_pattern[0] * i +
                               self.checker_pattern[0] - j - 1, 0]
                objP[self.checker_pattern[0] * i + self.checker_pattern[0] -
                     j - 1, 0] = tmp
        objP[:, 0] -= (self.display_width / 2 - 1)
        objP[:, 1] -= (self.display_height / 2 - 1)

        objP *= self.checker_size

        rtA = []
        rB = []
        tB = []
        rC2Ss = []
        tC2Ss = []

        # define valid image
        validImg = -1
        # for i in trange(len(imgFileList), desc="Images"):
        for i in range(1):

            img = cv2.imread(imgPath, cv2.IMREAD_UNCHANGED)

            # Yunhao
            # img = (img/65535*255).astype(np.uint8)

            # Aruco marker for Mirror position
            cornersAruco, ids = self.detectAruco(img, debug=False)
            if cornersAruco is None and ids is None and len(cornersAruco) <= 3:
                continue

            # Checker for Display
            ret, cornersChecker = self.detectChecker(img, debug=False)
            if not ret:
                print("no Checker!!!")
                continue

            # for a valid image, aruco and checker must be both detected
            validImg += 1

            # Calibrate Mirror Pose with Aruco

            rvecMirror, tvecMirror = self.postEst(cornersAruco, ids, camMtx,
                                                  dist)
            img_axis = aruco.drawAxis(img, camMtx, dist, rvecMirror,
                                      tvecMirror, self.aruco_size)
            cv2.namedWindow('Img_axis', cv2.WINDOW_NORMAL)
            cv2.imshow('Img_axis', img_axis)
            cv2.waitKey(0)  # any key
            cv2.destroyWindow('Img_axis')

            ## Reproejct Camera Extrinsic
            self.reProjAruco(img, camMtx, dist, rvecMirror, tvecMirror,
                             cornersAruco)

            rMatMirror, _ = cv2.Rodrigues(
                rvecMirror)  # rotation vector to rotation matrix
            normalMirror = rMatMirror[:, 2]

            rC2W, tC2W = self.invTransformation(rMatMirror, tvecMirror)
            dW2C = abs(np.dot(normalMirror, tvecMirror))

            # Householder transformation
            p1, p2, p3 = self.householderTransform(normalMirror, dW2C)

            # Calibrate virtual to Camera with Checker
            rpe, rvecVirtual, tvecVirtual = cv2.solvePnP(
                objP, cornersChecker, camMtx, dist, flags=cv2.SOLVEPNP_IPPE
            )  # cv2.SOLVEPNP_IPPE for 4 point solution #cv2.SOLVEPNP_ITERATIVE
            # iterationsCount=200, reprojectionError=8.0,

            rvecVirtual, tvecVirtual = cv2.solvePnPRefineLM(
                objP, cornersChecker, camMtx, dist, rvecVirtual, tvecVirtual)

            proj, jac = cv2.projectPoints(objP, rvecVirtual, tvecVirtual,
                                          camMtx, dist)
            img_rep = img

            cv2.drawChessboardCorners(img_rep, self.checker_size, proj, True)
            width = 960
            height = int(img_rep.shape[0] * 960 / img_rep.shape[1])
            smallimg = cv2.resize(img_rep, (width, height))
            cv2.imshow("img_rep", smallimg)
            cv2.waitKey(0)  # any key
            cv2.destroyWindow("img_rep")

            rMatVirtual, _ = cv2.Rodrigues(
                rvecVirtual)  # rotation vector to rotation matrix

            print(tvecVirtual)
            if validImg == 0:
                rtA = p1
                rB = np.matmul(rMatVirtual, p2)
                tB = np.squeeze(tvecVirtual) + p3
            else:
                rtA = np.concatenate((rtA, p1))
                rB = np.concatenate((rB, np.matmul(rMatVirtual, p2)))
                tB = np.concatenate((tB, np.squeeze(tvecVirtual) + p3))

            rS2C = p1 @ rMatVirtual
            tS2C = p1 @ np.squeeze(tvecVirtual) + p3

            rC2S, tC2S = self.invTransformation(rS2C, tS2C)
            print("rC2S:", rC2S)
            print("tC2S:", tC2S)
            rC2Ss.append(rC2S)
            tC2Ss.append(tC2S)

        # rC2Ss = np.array(rC2Ss)
        # tC2Ss = np.array(tC2Ss)
        # fout = os.path.join(imgFolder, "Cam2Screen.npz")
        # np.savez(fout, rC2S=rC2Ss, tC2S=tC2Ss)
        return rC2Ss, tC2Ss
Esempio n. 5
0
def calib(imgPath, camMtx, dist, half_length, half_height, displayScaleFactor):
    ARUCO_BOARD_HEIGHT = half_height * 2
    ARUCO_BOARD_WIDTH = half_length * 2
    CHECKSQUARE_SIZE = displayScaleFactor

    # objP_pixel = np.ceil(readCheckerObjPoint(CHECKER_FILE))
    objP_pixel = np.ceil(generateObjP(14, 10, 80))
    objP_pixel[:, 2] = 0
    objP = np.array(objP_pixel)
    for i in range(CHECKERPATTERN_SIZE[1]):
        for j in range(math.floor(CHECKERPATTERN_SIZE[0] / 2)):
            tmp = objP[CHECKERPATTERN_SIZE[0] * i + j, 0]
            objP[CHECKERPATTERN_SIZE[0] * i + j,
                 0] = objP[CHECKERPATTERN_SIZE[0] * i +
                           CHECKERPATTERN_SIZE[0] - j - 1, 0]
            objP[CHECKERPATTERN_SIZE[0] * i + CHECKERPATTERN_SIZE[0] - j - 1,
                 0] = tmp
    objP[:, 0] -= (SCREEN_WIDTH / 2 - 1)
    objP[:, 1] -= (SCREEN_HEIGHT / 2 - 1)

    objP *= CHECKSQUARE_SIZE

    rtA = []
    rB = []
    tB = []
    rC2Ss = []
    tC2Ss = []

    # define valid image
    validImg = -1
    #for i in trange(len(imgFileList), desc="Images"):
    for i in range(1):

        #img = cv2.imread(imgPath, cv2.IMREAD_UNCHANGED)
        img = cv2.imread(imgPath)
        # print(img.shape)
        # Yunhao
        # img = (img/65535*255).astype(np.uint8)
        #print(len(img.shape))
        # plt.imshow(img)
        # plt.colorbar()
        # Aruco marker for Mirror position
        cornersAruco, ids = detectAruco(img, debug=False)
        if cornersAruco is None and ids is None and len(cornersAruco) <= 3:
            continue

        # Checker for Display
        # print(len(img.shape))
        ret, cornersChecker = detectChecker(img,
                                            CHECKERPATTERN_SIZE,
                                            debug=True)
        # print(ret)
        # print(cornersChecker)
        if not ret:
            print("no Checker!!!")
            continue

        #for a valid image, aruco and checker must be both detected
        validImg += 1

        # Calibrate Mirror Pose with Aruco

        rvecMirror, tvecMirror = postEst(cornersAruco, ids, camMtx, dist,
                                         ARUCO_MARKER_LENGTH,
                                         ARUCO_BOARD_HEIGHT, ARUCO_BOARD_WIDTH)
        img_axis = aruco.drawAxis(img, camMtx, dist, rvecMirror, tvecMirror,
                                  ARUCO_MARKER_LENGTH)
        width = 960
        height = int(img_axis.shape[0] * 960 / img_axis.shape[1])
        smallimg = cv2.resize(img_axis, (width, height))
        # plt.figure(figsize=(15,10))
        # plt.imshow(smallimg)
        # plt.show()

        ## Reproejct Camera Extrinsic
        reProjAruco(img, camMtx, dist, rvecMirror, tvecMirror, cornersAruco)

        rMatMirror, _ = cv2.Rodrigues(
            rvecMirror)  # rotation vector to rotation matrix
        normalMirror = rMatMirror[:, 2]

        rC2W, tC2W = invTransformation(rMatMirror, tvecMirror)
        dW2C = abs(np.dot(normalMirror, tvecMirror))

        # Householder transformation
        p1, p2, p3 = householderTransform(normalMirror, dW2C)

        # Calibrate virtual to Camera with Checker
        rpe, rvecVirtual, tvecVirtual = cv2.solvePnP(
            objP, cornersChecker, camMtx, dist, flags=cv2.SOLVEPNP_IPPE
        )  # cv2.SOLVEPNP_IPPE for 4 point solution #cv2.SOLVEPNP_ITERATIVE
        #iterationsCount=200, reprojectionError=8.0,

        rvecVirtual, tvecVirtual = cv2.solvePnPRefineLM(
            objP, cornersChecker, camMtx, dist, rvecVirtual, tvecVirtual)

        proj, jac = cv2.projectPoints(objP, rvecVirtual, tvecVirtual, camMtx,
                                      dist)
        img_rep = img

        # cv2.drawChessboardCorners(img_rep, CHECKERPATTERN_SIZE, proj, True)
        width = 960
        height = int(img_rep.shape[0] * 960 / img_rep.shape[1])
        smallimg = cv2.resize(img_rep, (width, height))
        fig = plt.figure(figsize=(15, 10))
        plt.imshow(smallimg)
        imgPath = os.path.join(
            os.path.join(os.path.join(os.getcwd(), 'CalibrationImages'),
                         'Geometric'), 'geoCalibResults')
        fig.savefig(imgPath + '/chesssBoard' + '.png')
        plt.show()

        rMatVirtual, _ = cv2.Rodrigues(
            rvecVirtual)  # rotation vector to rotation matrix

        print(tvecVirtual)
        if validImg == 0:
            rtA = p1
            rB = np.matmul(rMatVirtual, p2)
            tB = np.squeeze(tvecVirtual) + p3
        else:
            rtA = np.concatenate((rtA, p1))
            rB = np.concatenate((rB, np.matmul(rMatVirtual, p2)))
            tB = np.concatenate((tB, np.squeeze(tvecVirtual) + p3))

        rS2C = p1 @ rMatVirtual
        tS2C = p1 @ np.squeeze(tvecVirtual) + p3

        rC2S, tC2S = invTransformation(rS2C, tS2C)
        print("rC2S:", rC2S)
        print("tC2S:", tC2S)
        rC2Ss.append(rC2S)
        tC2Ss.append(tC2S)

    return rC2Ss, tC2Ss
Esempio n. 6
0
def flow_to_trafo_PnP(*args, **kwargs):
    """
    input:
      real_br: torch.tensor torch.Size([2])
      real_tl: torch.tensor torch.Size([2])
      ren_br: torch.tensor torch.Size([2])
      ren_tl: torch.tensor torch.Size([2])
      flow_mask: torch.Size([480, 640])
      u_map: torch.Size([480, 640])
      v_map: torch.Size([480, 640])
      K_ren: torch.Size([3, 3])
      render_d: torch.Size([480, 640])
      h_render: torch.Size([4, 4])
      h_real_est: torch.Size([4, 4])
    output:
      suc: bool
      h:  torch.Size([4, 4])
    """
    real_br = kwargs['real_br']
    real_tl = kwargs['real_tl']
    ren_br = kwargs['ren_br']
    ren_tl = kwargs['ren_tl']
    flow_mask = kwargs['flow_mask']
    u_map = kwargs['u_map']
    v_map = kwargs['v_map']
    K_ren = kwargs['K_ren']
    K_real = kwargs['K_real']
    render_d = kwargs['render_d']
    h_render = kwargs['h_render']
    h_real_est = kwargs['h_real_est']
    
    typ = u_map.dtype

    # Grid for upsampled real
    grid_real_h = torch.linspace(int(real_tl[0]) ,int(real_br[0]) , 480, device=u_map.device)[:,None].repeat(1,640)
    grid_real_w = torch.linspace(int(real_tl[1]) ,int(real_br[1]) , 640, device=u_map.device)[None,:].repeat(480,1)
    # Project depth map to the pointcloud real
    cam_scale = 10000
    real_pixels = torch.stack( [grid_real_w[flow_mask], grid_real_h[flow_mask], torch.ones(grid_real_h.shape, device = u_map.device,  dtype= u_map.dtype)[flow_mask]], dim=1 ).type(typ)

    
    grid_ren_h = torch.linspace(int(ren_tl[0]) ,int(ren_br[0]), 480, device=u_map.device)[:,None].repeat(1,640)
    grid_ren_w = torch.linspace(int(ren_tl[1]) ,int(ren_br[1]) , 640, device=u_map.device)[None,:].repeat(480,1)
    crop_d_pixels = torch.stack( [grid_ren_w.flatten(), grid_ren_h.flatten(), torch.ones(grid_ren_w.shape, device = u_map.device,  dtype= torch.float64).flatten()], dim=1 ).type(typ)
    K_inv = torch.inverse(K_ren.type(torch.float64)).type(typ)
    P_crop_d = K_inv @ crop_d_pixels.T.type(typ)
    P_crop_d = P_crop_d.type(torch.float64) * render_d.flatten() / cam_scale
    P_crop_d = P_crop_d.T


    render_d_ind_h = torch.linspace(0 ,479 , 480, device=u_map.device)[:,None].repeat(1,640)
    render_d_ind_w= torch.linspace(0 ,639 , 640, device=u_map.device)[None,:].repeat(480,1)
    render_d_ind_h = torch.clamp((render_d_ind_h - u_map).type(torch.float64) ,0,479).type( torch.long )[flow_mask]
    render_d_ind_w = torch.clamp((render_d_ind_w - v_map).type(torch.float32),0,639).type( torch.long )[flow_mask] 
    index = render_d_ind_h*640 + render_d_ind_w # hacky indexing along two dimensions

    P_crop_d  = P_crop_d[index] 


    m = filter_pcd( P_crop_d)

    if torch.sum(m) < 50:
        return False, torch.eye(4, dtype= u_map.dtype, device=u_map.device )
    P_crop_d  = P_crop_d[ m ]
    real_pixels = real_pixels[m]
    P_ren = P_crop_d

    # random shuffel
    pts_trafo = min(P_ren.shape[0], 50000)
    idx = torch.randperm( P_ren.shape[0] )[0:pts_trafo]
    P_ren = P_ren[idx]
    real_pixels = real_pixels[idx]

    nr = 10000

    # Move the rendered points to the origin 
    P_ren_in_origin =  (get_H( P_ren ).type(typ) @ torch.inverse( h_render.type(torch.float64) ).type(typ).T) [:,:3]

    # PNP estimation
    objectPoints = P_ren_in_origin.clone().cpu().type(torch.float32).numpy()    
    imagePoints = real_pixels[:,:2].cpu().type(torch.float32).numpy()
    dist = np.array( [[0.0,0.0,0.0,0.0]] )

    if objectPoints.shape[0] < 8:        
        print(f'Failed due to missing corsspondences ({ objectPoints.shape[0]})')
        return False, torch.eye(4, dtype= u_map.dtype, device=u_map.device )
    # set current guess as the inital estimate

    rvec = R.from_matrix(h_real_est[:3,:3].cpu().numpy()).as_rotvec().astype(np.float32)
    tvec = h_real_est[:3,3].cpu().numpy().astype(np.float32)
    # calculate PnP between the pixels coordinates in the real image and the corrosponding points in the origin frame
    r_vec2, t_vec2 = cv2.solvePnPRefineLM(copy.deepcopy(objectPoints), \
        copy.deepcopy(imagePoints), 
        K_real.cpu().type(torch.float32).numpy(), 
        dist, 
        copy.deepcopy(rvec),
        copy.deepcopy( tvec))
    r_vec2 = r_vec2[:,None]
    # retval, r_vec2, t_vec2, inliers = cv2.solvePnPRansac(copy.deepcopy(objectPoints), \
    #     copy.deepcopy(imagePoints), 
    #     K_real.cpu().numpy(), 
    #     dist, 
    #     iterationsCount = 1000, reprojectionError= 0.3)    
    
    h = rvec_tvec_to_H(r_vec2[:,0],t_vec2)

    return True, torch.tensor(h, device=u_map.device ).type(u_map.dtype) 
Esempio n. 7
0
def test(val_loader, model, criterion, epoch, args):
    batch_time = AverageMeter('Time', ':6.4f')
    loss_meter = AverageMeter('Loss', ':6.4f')
    correspondence_probability_meter = AverageMeter('Outlier-Inlier Prob', ':6.4f')
    rotation_meter = AverageMeter('Rotation Error', ':6.4f')
    translation_meter = AverageMeter('Translation Error', ':6.4f')
    progress = ProgressMeter(
        len(val_loader),
        [batch_time, loss_meter, correspondence_probability_meter, rotation_meter, translation_meter],
        prefix='Test: ')

    poseloss = True
    criterion.gamma = 1.0

    model.eval()

    with torch.no_grad():
        end = time.time()

        rotation_errors0, rotation_errors, rotation_errorsLM = [], [], []
        translation_errors0, translation_errors, translation_errorsLM = [], [], []
        reprojection_errors0, reprojection_errors, reprojection_errorsLM, reprojection_errorsGT = [], [], [], []
        num_inliers0, num_inliers, num_inliersLM, num_inliersGT = [], [], [], []
        thetas0, thetas, thetasLM = [], [], []
        num_points_2d_list, num_points_3d_list = [], []
        inference_times = []

        start_time = time.time()
        for batch_index, (p2d, p3d, R_gt, t_gt, C_gt, num_points_2d, num_points_3d) in enumerate(val_loader):

            p2d = p2d.float()
            p3d = p3d.float()
            R_gt = R_gt.float()
            t_gt = t_gt.float()

            if args.frac_outliers == 0.0:
                # Convert C_gt into matrix (stored as a b x n index tensor with outliers indexed by m)
                m = p2d.size(-2)
                C_gt = torch.nn.functional.one_hot(C_gt, num_classes=(m + 1))[:, :, :m].transpose(-2, -1).float()
            elif args.frac_outliers > 0.0:
                # Add random outliers:
                # Get bounding boxes
                bb2d_min = p2d.min(dim=-2)[0]
                bb2d_width = p2d.max(dim=-2)[0] - bb2d_min
                bb3d_min = p3d.min(dim=-2)[0]
                bb3d_width = p3d.max(dim=-2)[0] - bb3d_min
                num_outliers = int(args.frac_outliers * p2d.size(-2))
                p2d_outliers = bb2d_width * torch.rand_like(p2d[:, :num_outliers, :]) + bb2d_min
                p3d_outliers = bb3d_width * torch.rand_like(p3d[:, :num_outliers, :]) + bb3d_min
                p2d = torch.cat((p2d, p2d_outliers), -2)
                p3d = torch.cat((p3d, p3d_outliers), -2)
                num_points_2d[0, 0] = p2d.size(-2)
                num_points_3d[0, 0] = p3d.size(-2)
                # Expand C_gt with outlier indices (b x n index tensor with outliers indexed by m)
                b = p2d.size(0)
                m = p2d.size(-2)
                outlier_indices = C_gt.new_full((b, num_outliers), m)
                C_gt = torch.cat((C_gt, outlier_indices), -1)
                C_gt = torch.nn.functional.one_hot(C_gt, num_classes=(m + 1))[:, :, :m].transpose(-2, -1).float()
                # For memory reasons, if num_points > 10000, downsample first
                if p2d.size(-2) > 10000:
                    idx = torch.randint(p2d.size(-2), size=(10000,))
                    p2d = p2d[:, idx, :]
                    p3d = p3d[:, idx, :]
                    num_points_2d[0, 0] = p2d.size(-2)
                    num_points_3d[0, 0] = p3d.size(-2)
                    C_gt = C_gt[:, idx, :]
                    C_gt = C_gt[:, :, idx]

            if args.gpu is not None:
                p2d = p2d.cuda(args.gpu, non_blocking=True)
                p3d = p3d.cuda(args.gpu, non_blocking=True)
            R_gt = R_gt.cuda(args.gpu, non_blocking=True)
            t_gt = t_gt.cuda(args.gpu, non_blocking=True)
            C_gt = C_gt.cuda(args.gpu, non_blocking=True)

            # Compute output
            P, theta, theta0 = model(p2d, p3d, num_points_2d, num_points_3d, poseloss)

            # Measure elapsed time for reporting (includes dataloading time, but not evaluation / loss time)
            inference_time = (time.time() - start_time)

            loss, losses = criterion(theta, P, R_gt, t_gt, C_gt)
            loss_correspondence_probability = losses[0]
            loss_meter.update(loss.item(), p2d.size(0))
            correspondence_probability_meter.update(loss_correspondence_probability.item(), p2d.size(0))
            if poseloss:    
                loss_rotation = losses[1]
                loss_translation = losses[2]
                rotation_meter.update(loss_rotation.item(), p2d.size(0))
                translation_meter.update(loss_translation.item(), p2d.size(0))

            # Compute refined pose estimate:
            # 1. Find inliers based on RANSAC estimate
            inlier_threshold = 1.0 * math.pi / 180.0 # 1 degree threshold for LM
            C = correspondenceMatricesTheta(theta0, p2d, p3d, inlier_threshold)
            K = np.float32(np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]))
            dist_coeff = np.float32(np.array([0.0, 0.0, 0.0, 0.0]))
            thetaLM = P.new_zeros((P.size(0), 6))
            inlier_indices = C[0, ...].nonzero(as_tuple=True) # Assumes test batch size = 1
            # Skip if point-set has < 4 inlier points:
            if (inlier_indices[0].size()[0] >= 4):
                p2d_np = p2d[0, inlier_indices[0], :].cpu().numpy()
                p3d_np = p3d[0, inlier_indices[1], :].cpu().numpy()
                rvec = theta0[0, :3].cpu().numpy()
                tvec = theta0[0, 3:].cpu().numpy()
                rvec, tvec = cv2.solvePnPRefineLM(p3d_np, p2d_np, K, dist_coeff, rvec, tvec)
                if rvec is not None and tvec is not None:
                    thetaLM[0, :3] = torch.as_tensor(rvec, dtype=P.dtype, device=P.device).squeeze(-1)
                    thetaLM[0, 3:] = torch.as_tensor(tvec, dtype=P.dtype, device=P.device).squeeze(-1)

            inlier_threshold = 0.1 * math.pi / 180.0 # 0.1 degree threshold for reported inlier count
            rotation_errors0 += [rotationErrorsTheta(theta0, R_gt, eps=0.0).item()]
            rotation_errors += [rotationErrorsTheta(theta, R_gt, eps=0.0).item()]
            rotation_errorsLM += [rotationErrorsTheta(thetaLM, R_gt, eps=0.0).item()]
            translation_errors0 += [translationErrorsTheta(theta0, t_gt).item()]
            translation_errors += [translationErrorsTheta(theta, t_gt).item()]
            translation_errorsLM += [translationErrorsTheta(thetaLM, t_gt).item()]
            reprojection_errors0 += [reprojectionErrorsTheta(theta0, p2d, p3d, C_gt, eps=0.0).item()]
            reprojection_errors += [reprojectionErrorsTheta(theta, p2d, p3d, C_gt, eps=0.0).item()]
            reprojection_errorsLM += [reprojectionErrorsTheta(thetaLM, p2d, p3d, C_gt, eps=0.0).item()]
            reprojection_errorsGT += [reprojectionErrors(R_gt, t_gt, p2d, p3d, C_gt, eps=0.0).item()]
            num_inliers0 += [numInliersTheta(theta0, p2d, p3d, inlier_threshold).item()]
            num_inliers += [numInliersTheta(theta, p2d, p3d, inlier_threshold).item()]
            num_inliersLM += [numInliersTheta(thetaLM, p2d, p3d, inlier_threshold).item()]
            num_inliersGT += [numInliers(R_gt, t_gt, p2d, p3d, inlier_threshold).item()]
            num_points_2d_list += [num_points_2d[0].item()]
            num_points_3d_list += [num_points_3d[0].item()]
            inference_times += [inference_time]
            thetas0 += [theta0[0, 0].item(), theta0[0, 1].item(), theta0[0, 2].item(), theta0[0, 3].item(), theta0[0, 4].item(), theta0[0, 5].item()]
            thetas += [theta[0, 0].item(), theta[0, 1].item(), theta[0, 2].item(), theta[0, 3].item(), theta[0, 4].item(), theta[0, 5].item()]
            thetasLM += [thetaLM[0, 0].item(), thetaLM[0, 1].item(), thetaLM[0, 2].item(), thetaLM[0, 3].item(), thetaLM[0, 4].item(), thetaLM[0, 5].item()]

            # Measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()
            start_time = time.time()

            if batch_index % args.print_freq == 0:
                progress.display(batch_index)

        print('Loss: {loss.avg:6.4f}, Outlier-Inlier Prob: {correspondence_probability.avg:6.4f}, Rotation Error: {rot.avg:6.4f}, Translation Error: {transl.avg:6.4f}'
              .format(loss=loss_meter, correspondence_probability=correspondence_probability_meter, rot=rotation_meter, transl=translation_meter))

        # Print all results in a text file:
        if args.poseloss == 0:
            loss_string = 'LcLp'
        else:
            loss_string = 'Lc'
        if args.frac_outliers == 0.0:
            append_string = ''
        elif args.frac_outliers > 0.0:
            append_string = '_outliers_' + str(args.frac_outliers)
        dataset_string = args.dataset
        os.makedirs('./results', exist_ok=True)
        os.makedirs('./results/' + dataset_string, exist_ok=True)
        os.makedirs('./results/' + dataset_string + '/' + loss_string, exist_ok=True)
        with open('./results/' + dataset_string + '/' + loss_string + '/results' + append_string + '.txt', 'w') as save_file:
            save_file.write("rotation_errors0, rotation_errors, rotation_errorsLM, translation_errors0, translation_errors, translation_errorsLM, reprojection_errors0, reprojection_errors, reprojection_errorsLM, reprojection_errorsGT, num_inliers0, num_inliers, num_inliersLM, num_inliersGT, num_points_2d, num_points_3d, inference_time\n")
            for i in range(len(rotation_errors)):
                save_file.write("{:.9f}, {:.9f}, {:.9f}, {:.9f}, {:.9f}, {:.9f}, {:.9f}, {:.9f}, {:.9f}, {:.9f}, {}, {}, {}, {}, {}, {}, {:.9f}\n".format(
                    rotation_errors0[i], rotation_errors[i], rotation_errorsLM[i],
                    translation_errors0[i], translation_errors[i], translation_errorsLM[i],
                    reprojection_errors0[i], reprojection_errors[i], reprojection_errorsLM[i], reprojection_errorsGT[i],
                    num_inliers0[i], num_inliers[i], num_inliersLM[i], num_inliersGT[i],
                    num_points_2d_list[i], num_points_3d_list[i],
                    inference_times[i]
                    ))
        with open('./results/' + dataset_string + '/' + loss_string + '/poses' + append_string + '.txt', 'w') as save_file:
            for i in range(len(rotation_errors)):
                save_file.write("{:.9f}, {:.9f}, {:.9f}, {:.9f}, {:.9f}, {:.9f}, {:.9f}, {:.9f}, {:.9f}, {:.9f}, {:.9f}, {:.9f}, {:.9f}, {:.9f}, {:.9f}, {:.9f}, {:.9f}, {:.9f}\n".format(
                    thetas0[6*i + 0], thetas0[6*i + 1], thetas0[6*i + 2], thetas0[6*i + 3], thetas0[6*i + 4], thetas0[6*i + 5],
                    thetas[6*i + 0], thetas[6*i + 1], thetas[6*i + 2], thetas[6*i + 3], thetas[6*i + 4], thetas[6*i + 5],
                    thetasLM[6*i + 0], thetasLM[6*i + 1], thetasLM[6*i + 2], thetasLM[6*i + 3], thetasLM[6*i + 4], thetasLM[6*i + 5]
                    ))
        # Pickle all output camera poses:
        poses = {}
        poses["theta0"] = np.array(thetas0).reshape(-1, 6)
        poses["theta"] = np.array(thetas).reshape(-1, 6)
        poses["thetaLM"] = np.array(thetasLM).reshape(-1, 6)
        pickle.dump(poses, open('./results/' + dataset_string + '/' + loss_string + '/poses' + append_string + '.pkl', 'wb'))
    return loss_meter.avg