def fov(self, value): self._fov = value fov_factor = 1.0 / torch.tan(transform.radians(0.5 * self._fov)) o = torch.ones([1], dtype=torch.float32, device = fov_factor.device) diag = torch.cat([fov_factor, fov_factor, o], 0) self._intrinsic_mat = torch.diag(diag).contiguous() self.intrinsic_mat_inv = torch.inverse(self._intrinsic_mat).contiguous()
def fov(self, value): self._fov = value fov_factor = 1.0 / tf.tan(transform.radians(0.5 * self._fov)) o = tf.ones([1], dtype=tf.float32) diag = tf.concat([fov_factor, fov_factor, o], 0) self._cam_to_ndc = tf.diag(diag) self.ndc_to_cam = tf.linalg.inv(self._cam_to_ndc)
def fov(self, value): self._fov = value fov_factor = 1.0 / torch.tan(transform.radians(0.5 * self._fov)) o = torch.ones([1], dtype=torch.float32) diag = torch.cat([fov_factor, fov_factor, o], 0) self._cam_to_ndc = torch.diag(diag) self.ndc_to_cam = torch.inverse(self._cam_to_ndc)
def __init__(self, position: Optional[torch.Tensor] = None, look_at: Optional[torch.Tensor] = None, up: Optional[torch.Tensor] = None, fov: Optional[torch.Tensor] = None, clip_near: float = 1e-4, resolution: Tuple[int, int] = (256, 256), cam_to_world: Optional[torch.Tensor] = None, intrinsic_mat: Optional[torch.Tensor] = None, camera_type=pyredner.camera_type.perspective, fisheye: bool = False): if position is not None: assert (position.dtype == torch.float32) assert (len(position.shape) == 1 and position.shape[0] == 3) if look_at is not None: assert (look_at.dtype == torch.float32) assert (len(look_at.shape) == 1 and look_at.shape[0] == 3) if up is not None: assert (up.dtype == torch.float32) assert (len(up.shape) == 1 and up.shape[0] == 3) if fov is not None: assert (fov.dtype == torch.float32) assert (len(fov.shape) == 1 and fov.shape[0] == 1) assert (isinstance(clip_near, float)) if position is None and look_at is None and up is None: assert (cam_to_world is not None) self.position = position self.look_at = look_at self.up = up self._fov = fov self._cam_to_world = cam_to_world if cam_to_world is not None: self.world_to_cam = torch.inverse(self.cam_to_world).contiguous() else: self.world_to_cam = None if intrinsic_mat is None: if camera_type == redner.CameraType.perspective: fov_factor = 1.0 / torch.tan(transform.radians(0.5 * fov)) o = torch.ones([1], dtype=torch.float32) diag = torch.cat([fov_factor, fov_factor, o], 0) self._intrinsic_mat = torch.diag(diag).contiguous() else: self._intrinsic_mat = torch.eye(3, dtype=torch.float32) else: self._intrinsic_mat = intrinsic_mat self.intrinsic_mat_inv = torch.inverse(self.intrinsic_mat).contiguous() self.clip_near = clip_near self.resolution = resolution self.camera_type = camera_type if fisheye: self.camera_type = pyredner.camera_type.fisheye
def parse_transform(node, param_dict): ret = torch.eye(4) for child in node: if child.tag == 'matrix': value = torch.from_numpy(\ np.reshape(\ # support both ',' and ' ' seperator np.fromstring(child.attrib['value'], dtype=np.float32, sep=',' if ',' in child.attrib['value'] else ' '), (4, 4))) ret = value @ ret elif child.tag == 'translate': x = float(check_default(child.attrib['x'], param_dict)) y = float(check_default(child.attrib['y'], param_dict)) z = float(check_default(child.attrib['z'], param_dict)) value = transform.gen_translate_matrix(torch.tensor([x, y, z])) ret = value @ ret elif child.tag == 'scale': # single scale value if 'value' in child.attrib: x = y = z = float(child.attrib['value']) else: x = float(check_default(child.attrib['x'], param_dict)) y = float(check_default(child.attrib['y'], param_dict)) z = float(check_default(child.attrib['z'], param_dict)) value = transform.gen_scale_matrix(torch.tensor([x, y, z])) ret = value @ ret elif child.tag == 'rotate': x = float(check_default(child.attrib['x'], param_dict)) if 'x' in child.attrib else 0.0 y = float(check_default(child.attrib['y'], param_dict)) if 'y' in child.attrib else 0.0 z = float(check_default(child.attrib['z'], param_dict)) if 'z' in child.attrib else 0.0 angle = transform.radians(float(check_default(child.attrib['angle'], param_dict))) axis = np.array([x, y, z]) axis = axis / np.linalg.norm(axis) cos_theta = math.cos(angle) sin_theta = math.sin(angle) mat = torch.zeros(4, 4) mat[0, 0] = axis[0] * axis[0] + (1.0 - axis[0] * axis[0]) * cos_theta mat[0, 1] = axis[0] * axis[1] * (1.0 - cos_theta) - axis[2] * sin_theta mat[0, 2] = axis[0] * axis[2] * (1.0 - cos_theta) + axis[1] * sin_theta mat[1, 0] = axis[0] * axis[1] * (1.0 - cos_theta) + axis[2] * sin_theta mat[1, 1] = axis[1] * axis[1] + (1.0 - axis[1] * axis[1]) * cos_theta mat[1, 2] = axis[1] * axis[2] * (1.0 - cos_theta) - axis[0] * sin_theta mat[2, 0] = axis[0] * axis[2] * (1.0 - cos_theta) - axis[1] * sin_theta mat[2, 1] = axis[1] * axis[2] * (1.0 - cos_theta) + axis[0] * sin_theta mat[2, 2] = axis[2] * axis[2] + (1.0 - axis[2] * axis[2]) * cos_theta mat[3, 3] = 1.0 ret = mat @ ret return ret
def __init__(self, position, look_at, up, fov, clip_near, resolution, cam_to_ndc=None, camera_type=redner.CameraType.perspective, fisheye=False): assert (position.dtype == torch.float32) assert (len(position.shape) == 1 and position.shape[0] == 3) assert (position.device.type == 'cpu') assert (look_at.dtype == torch.float32) assert (len(look_at.shape) == 1 and look_at.shape[0] == 3) assert (look_at.device.type == 'cpu') assert (up.dtype == torch.float32) assert (len(up.shape) == 1 and up.shape[0] == 3) assert (up.device.type == 'cpu') if fov is not None: assert (fov.dtype == torch.float32) assert (len(fov.shape) == 1 and fov.shape[0] == 1) assert (fov.device.type == 'cpu') assert (isinstance(clip_near, float)) self.position = position self.look_at = look_at self.up = up self._fov = fov if cam_to_ndc is None: if camera_type == redner.CameraType.perspective: fov_factor = 1.0 / torch.tan(transform.radians(0.5 * fov)) o = torch.ones([1], dtype=torch.float32) diag = torch.cat([fov_factor, fov_factor, o], 0) self._cam_to_ndc = torch.diag(diag) else: self._cam_to_ndc = torch.eye(3, dtype=torch.float32) else: self._cam_to_ndc = cam_to_ndc self.ndc_to_cam = torch.inverse(self.cam_to_ndc) self.clip_near = clip_near self.resolution = resolution self.camera_type = camera_type if fisheye: self.camera_type = redner.camera_type.fisheye
def __init__(self, position, look_at, up, fov, clip_near, resolution, fisheye=False): self.position = position self.look_at = look_at self.up = up self.fov = fov self.cam_to_world = transform.gen_look_at_matrix(position, look_at, up) self.world_to_cam = torch.inverse(self.cam_to_world) self.fov_factor = torch.tan(transform.radians(0.5 * fov)) self.clip_near = clip_near self.resolution = resolution self.fisheye = fisheye
def __init__(self, position, look_at, up, fov, clip_near, resolution, cam_to_ndc = None, fisheye = False): assert(position.dtype == torch.float32) assert(len(position.shape) == 1 and position.shape[0] == 3) assert(look_at.dtype == torch.float32) assert(len(look_at.shape) == 1 and look_at.shape[0] == 3) assert(up.dtype == torch.float32) assert(len(up.shape) == 1 and up.shape[0] == 3) if fov is not None: assert(fov.dtype == torch.float32) assert(len(fov.shape) == 1 and fov.shape[0] == 1) assert(isinstance(clip_near, float)) self.position = position self.look_at = look_at self.up = up self._fov = fov # self.cam_to_world = transform.gen_look_at_matrix(position, look_at, up) # self.world_to_cam = torch.inverse(self.cam_to_world).contiguous() if cam_to_ndc is None: fov_factor = 1.0 / torch.tan(transform.radians(0.5 * fov)) o = torch.ones([1], dtype=torch.float32) diag = torch.cat([fov_factor, fov_factor, o], 0) self._cam_to_ndc = torch.diag(diag) else: self._cam_to_ndc = cam_to_ndc self.ndc_to_cam = torch.inverse(self.cam_to_ndc) self.clip_near = clip_near self.resolution = resolution self.fisheye = fisheye
def __init__(self, position, look_at, up, fov, clip_near, resolution, cam_to_ndc=None, fisheye=False): assert (position.dtype == tf.float32) assert (len(position.shape) == 1 and position.shape[0] == 3) assert (look_at.dtype == tf.float32) assert (len(look_at.shape) == 1 and look_at.shape[0] == 3) assert (up.dtype == tf.float32) assert (len(up.shape) == 1 and up.shape[0] == 3) if fov is not None: assert (fov.dtype == tf.float32) assert (len(fov.shape) == 1 and fov.shape[0] == 1) assert (isinstance(clip_near, float)) self._position = position self._look_at = look_at self._up = up self._fov = fov self.cam_to_world = transform.gen_look_at_matrix(position, look_at, up) self.world_to_cam = tf.linalg.inv(self.cam_to_world).contiguous() if cam_to_ndc is None: fov_factor = 1.0 / tf.tan(transform.radians(0.5 * fov)) o = tf.ones([1], dtype=tf.float32) diag = tf.concat([fov_factor, fov_factor, o], 0) self._cam_to_ndc = tf.diag(diag) else: self._cam_to_ndc = cam_to_ndc self.ndc_to_cam = tf.linalg.inv(self.cam_to_ndc) self.clip_near = clip_near self.resolution = resolution self.fisheye = fisheye