def __init__(self, path_to_theta_lut, poly_coeffs, principal_point=torch.Tensor([0., 0.]), scale_factors=torch.Tensor([1., 1.]), Tcw=None): """ Initializes the Camera class Parameters ---------- intrinsics : dictionary (keys : ax, ay, cx, cy, c1, c2, c3, c5) Camera intrinsics poly_coeffs [c1, c2, c3, c4] principal_point [cx, cy] scale_factors [ax, ay] Tcw : Pose Camera -> World pose transformation """ super().__init__() self.path_to_theta_lut = path_to_theta_lut self.poly_coeffs = poly_coeffs self.principal_point = principal_point self.scale_factors = scale_factors #self.K = K self.Tcw = Pose.identity( len(poly_coeffs) ) if Tcw is None else Tcw #Pose.identity(len(K)) if Tcw is None else Tcw
def __init__(self, poly_coeffs, principal_point, scale_factors, K, k1, k2, k3, p1, p2, camera_type, #int Tensor ; 0 is fisheye, 1 is distorted, 2 is other Tcw=None): """ Initializes the Camera class Parameters ---------- intrinsics : dictionary (keys : ax, ay, cx, cy, c1, c2, c3, c5) Camera intrinsics poly_coeffs [c1, c2, c3, c4] principal_point [cx, cy] scale_factors [ax, ay] Tcw : Pose Camera -> World pose transformation """ super().__init__() self.poly_coeffs = poly_coeffs self.principal_point = principal_point self.scale_factors = scale_factors self.K = K self.k1 = k1 self.k2 = k2 self.k3 = k3 self.p1 = p1 self.p2 = p2 self.camera_type = camera_type self.Tcw = Pose.identity(len(camera_type)) if Tcw is None else Tcw
def __init__(self, K, Tcw=None): """ Initializes the Camera class Parameters ---------- K : torch.Tensor [3,3] Camera intrinsics Tcw : Pose Camera -> World pose transformation """ super().__init__() self.K = K self.Tcw = Pose.identity(len(K)) if Tcw is None else Tcw
def __init__(self, R, Tcw=None): """ Initializes the Camera class Parameters ---------- R : torch.Tensor [B, 3, H, W] Camera ray surface Tcw : Pose Camera -> World pose transformation """ super().__init__() self.ray_surface = R self.Tcw = Pose.identity(1) if Tcw is None else Tcw
import cv2 import open3d as o3d main_folder = '/home/vbelissen/test_data/valeo_data_ready2train/data/dataset_valeo_cea_2017_2018/' seq_idx = '20170320_144339' img_idx = '00011702' path_to_theta_lut = [ main_folder + 'images/fisheye/train/' + seq_idx + '/cam_0/theta_tensor_1280_800.npy' ] poly_coeffs = torch.Tensor([282.85, -27.8671, 114.318, -36.6703]).unsqueeze(0) principal_point = torch.Tensor([0.046296, -7.33178]).unsqueeze(0) scale_factors = torch.Tensor([1., 1. / 1.00173]).unsqueeze(0) Tcw = Pose.identity(len(poly_coeffs)) Twc = Tcw.inverse() r = R.from_quat([1, 0, 0, 0]) depth_map_valeo = np.zeros((1, 1, 800, 1280)) depth_map_valeo[0, 0, :, :] = \ np.load(main_folder + 'depth_maps/fisheye/train/' + seq_idx + '/velodyne_0/' + seq_idx + '_velodyne_0_' + img_idx + '.npz')['velodyne_depth'] depth_map_valeo = depth_map_valeo.astype('float32') depth_map_valeo_tensor = torch.from_numpy(depth_map_valeo) def reconstruct(depth, frame='w'): """ Reconstructs pixel-wise 3D points from a depth map.