def __init__(self, position, look_at, up, fov, clip_near, resolution, fisheye=False): assert (position.dtype == torch.float32) assert (len(position.shape) == 1 and position.shape[0] == 3) assert (look_at.dtype == torch.float32) assert (len(look_at.shape) == 1 and look_at.shape[0] == 3) assert (up.dtype == torch.float32) assert (len(up.shape) == 1 and up.shape[0] == 3) assert (fov.dtype == torch.float32) assert (len(fov.shape) == 1 and fov.shape[0] == 1) assert (isinstance(clip_near, float)) self.position = position self.look_at = look_at self.up = up self.fov = fov self.cam_to_world = transform.gen_look_at_matrix(position, look_at, up) self.world_to_cam = torch.inverse(self.cam_to_world).contiguous() self.fov_factor = torch.tan(transform.radians(0.5 * fov)) self.clip_near = clip_near self.resolution = resolution self.fisheye = fisheye
def __init__(self, position, look_at, up, fov, clip_near, resolution, fisheye=False): self.position = position self.look_at = look_at self.up = up self.fov = fov self.cam_to_world = transform.gen_look_at_matrix(position, look_at, up) self.world_to_cam = torch.inverse(self.cam_to_world) self.fov_factor = torch.tan(transform.radians(0.5 * fov)) self.clip_near = clip_near self.resolution = resolution self.fisheye = fisheye
def __init__(self, position, look_at, up, fov, clip_near, resolution, cam_to_ndc=None, fisheye=False): assert (position.dtype == tf.float32) assert (len(position.shape) == 1 and position.shape[0] == 3) assert (look_at.dtype == tf.float32) assert (len(look_at.shape) == 1 and look_at.shape[0] == 3) assert (up.dtype == tf.float32) assert (len(up.shape) == 1 and up.shape[0] == 3) if fov is not None: assert (fov.dtype == tf.float32) assert (len(fov.shape) == 1 and fov.shape[0] == 1) assert (isinstance(clip_near, float)) self._position = position self._look_at = look_at self._up = up self._fov = fov self.cam_to_world = transform.gen_look_at_matrix(position, look_at, up) self.world_to_cam = tf.linalg.inv(self.cam_to_world).contiguous() if cam_to_ndc is None: fov_factor = 1.0 / tf.tan(transform.radians(0.5 * fov)) o = tf.ones([1], dtype=tf.float32) diag = tf.concat([fov_factor, fov_factor, o], 0) self._cam_to_ndc = tf.diag(diag) else: self._cam_to_ndc = cam_to_ndc self.ndc_to_cam = tf.linalg.inv(self.cam_to_ndc) self.clip_near = clip_near self.resolution = resolution self.fisheye = fisheye
def up(self, value): self._up = value self.cam_to_world = \ transform.gen_look_at_matrix(self._position, self._look_at, self._up) self.world_to_cam = tf.linalg.inv(self.cam_to_world).contiguous()
def look_at(self, value): self._look_at = value self.cam_to_world = \ transform.gen_look_at_matrix(self._position, self._look_at, self._up) self.world_to_cam = torch.inverse(self.cam_to_world).contiguous()