Exemplo n.º 1
0
 def fov(self, value):
     with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
         self._fov = tf.identity(value).cpu()
         fov_factor = 1.0 / tf.tan(transform.radians(0.5 * self._fov))
         o = tf.convert_to_tensor(np.ones([1], dtype=np.float32),
                                  dtype=tf.float32)
         diag = tf.concat([fov_factor, fov_factor, o], 0)
         self._cam_to_ndc = tf.linalg.tensor_diag(diag)
         self.ndc_to_cam = tf.linalg.inv(self._cam_to_ndc)
Exemplo n.º 2
0
def parse_transform(node):
    ret = tf.eye(4)
    for child in node:
        if child.tag == 'matrix':
            value = tf.convert_to_tensor(
                np.reshape(
                    np.fromstring(child.attrib['value'],
                                  dtype=np.float32,
                                  sep=' '), (4, 4)))
            ret = value @ ret
        elif child.tag == 'translate':
            x = float(child.attrib['x'])
            y = float(child.attrib['y'])
            z = float(child.attrib['z'])
            value = transform.gen_translate_matrix(tf.constant([x, y, z]))
            ret = value @ ret
        elif child.tag == 'scale':
            x = float(child.attrib['x'])
            y = float(child.attrib['y'])
            z = float(child.attrib['z'])
            value = transform.gen_scale_matrix(tf.constant([x, y, z]))
            ret = value @ ret
        elif child.tag == 'rotate':
            x = float(child.attrib['x']) if 'x' in child.attrib else 0.0
            y = float(child.attrib['y']) if 'y' in child.attrib else 0.0
            z = float(child.attrib['z']) if 'z' in child.attrib else 0.0
            angle = transform.radians(float(child.attrib['angle']))
            axis = np.array([x, y, z])
            axis = axis / np.linalg.norm(axis)
            cos_theta = math.cos(angle)
            sin_theta = math.sin(angle)
            mat = np.zeros([4, 4], dtype=np.float32)
            mat[0,
                0] = axis[0] * axis[0] + (1.0 - axis[0] * axis[0]) * cos_theta
            mat[0, 1] = axis[0] * axis[1] * (1.0 -
                                             cos_theta) - axis[2] * sin_theta
            mat[0, 2] = axis[0] * axis[2] * (1.0 -
                                             cos_theta) + axis[1] * sin_theta

            mat[1, 0] = axis[0] * axis[1] * (1.0 -
                                             cos_theta) + axis[2] * sin_theta
            mat[1,
                1] = axis[1] * axis[1] + (1.0 - axis[1] * axis[1]) * cos_theta
            mat[1, 2] = axis[1] * axis[2] * (1.0 -
                                             cos_theta) - axis[0] * sin_theta

            mat[2, 0] = axis[0] * axis[2] * (1.0 -
                                             cos_theta) - axis[1] * sin_theta
            mat[2, 1] = axis[1] * axis[2] * (1.0 -
                                             cos_theta) + axis[0] * sin_theta
            mat[2,
                2] = axis[2] * axis[2] + (1.0 - axis[2] * axis[2]) * cos_theta

            mat[3, 3] = 1.0

            ret = tf.convert_to_tensor(mat) @ ret
    return ret
Exemplo n.º 3
0
 def fov(self, value):
     if value is not None:
         self._fov = value
         with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
             fov_factor = 1.0 / tf.tan(transform.radians(0.5 * self._fov))
             o = tf.ones([1], dtype=tf.float32)
             diag = tf.concat([fov_factor, fov_factor, o], 0)
             self._intrinsic_mat = tf.linalg.tensor_diag(diag)
             self.intrinsic_mat_inv = tf.linalg.inv(self._intrinsic_mat)
     else:
         self._fov = None
Exemplo n.º 4
0
 def __init__(self,
              position: Optional[tf.Tensor] = None,
              look_at: Optional[tf.Tensor] = None,
              up: Optional[tf.Tensor] = None,
              fov: Optional[tf.Tensor] = None,
              clip_near: float = 1e-4,
              resolution: Tuple[int] = (256, 256),
              cam_to_world: Optional[tf.Tensor] = None,
              intrinsic_mat: Optional[tf.Tensor] = None,
              camera_type = pyredner.camera_type.perspective,
              fisheye: bool = False):
     assert(tf.executing_eagerly())
     if position is not None:
         assert(position.dtype == tf.float32)
         assert(len(position.shape) == 1 and position.shape[0] == 3)
     if look_at is not None:
         assert(look_at.dtype == tf.float32)
         assert(len(look_at.shape) == 1 and look_at.shape[0] == 3)
     if up is not None:
         assert(up.dtype == tf.float32)
         assert(len(up.shape) == 1 and up.shape[0] == 3)
     if fov is not None:
         assert(fov.dtype == tf.float32)
         assert(len(fov.shape) == 1 and fov.shape[0] == 1)
     assert(isinstance(clip_near, float))
     if position is None and look_at is None and up is None:
         assert(cam_to_world is  not None)
     
     self.position = position
     self.look_at = look_at
     self.up = up
     self.fov = fov
     with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
         if cam_to_world is not None:
             self.cam_to_world = cam_to_world
         else:
             self.cam_to_world = None
         if intrinsic_mat is None:
             if camera_type == redner.CameraType.perspective:
                 fov_factor = 1.0 / tf.tan(transform.radians(0.5 * fov))
                 o = tf.ones([1], dtype=tf.float32)
                 diag = tf.concat([fov_factor, fov_factor, o], 0)
                 self._intrinsic_mat = tf.linalg.tensor_diag(diag)
             else:
                 self._intrinsic_mat = tf.eye(3, dtype=tf.float32)   
         else:
             self._intrinsic_mat = intrinsic_mat
         self.intrinsic_mat_inv = tf.linalg.inv(self._intrinsic_mat)
     self.clip_near = clip_near
     self.resolution = resolution
     self.camera_type = camera_type
     if fisheye:
         self.camera_type = redner.CameraType.fisheye
Exemplo n.º 5
0
    def __init__(self,
                 position: tf.Tensor,
                 look_at: tf.Tensor,
                 up: tf.Tensor,
                 fov: tf.Tensor,
                 clip_near: float,
                 resolution: Tuple[int],
                 cam_to_ndc: tf.Tensor = None,
                 camera_type=redner.CameraType.perspective,
                 fisheye: bool = False):
        assert (tf.executing_eagerly())
        assert (position.dtype == tf.float32)
        assert (len(position.shape) == 1 and position.shape[0] == 3)
        assert (look_at.dtype == tf.float32)
        assert (len(look_at.shape) == 1 and look_at.shape[0] == 3)
        assert (up.dtype == tf.float32)
        assert (len(up.shape) == 1 and up.shape[0] == 3)
        if fov is not None:
            assert (fov.dtype == tf.float32)
            assert (len(fov.shape) == 1 and fov.shape[0] == 1)
        assert (isinstance(clip_near, float))

        with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
            self.position = tf.identity(position).cpu()
            self.look_at = tf.identity(look_at).cpu()
            self.up = tf.identity(up).cpu()
            self.fov = tf.identity(fov).cpu()
            if cam_to_ndc is None:
                if camera_type == redner.CameraType.perspective:
                    fov_factor = 1.0 / tf.tan(transform.radians(0.5 * fov))
                    o = tf.convert_to_tensor(np.ones([1], dtype=np.float32),
                                             dtype=tf.float32)
                    diag = tf.concat([fov_factor, fov_factor, o], 0)
                    self._cam_to_ndc = tf.linalg.tensor_diag(diag)
                else:
                    self._cam_to_ndc = tf.eye(3, dtype=tf.float32)
            else:
                self._cam_to_ndc = tf.identity(cam_to_ndc).cpu()
            self.ndc_to_cam = tf.linalg.inv(self.cam_to_ndc)
            self.clip_near = clip_near
            self.resolution = resolution
            self.camera_type = camera_type
            if fisheye:
                self.camera_type = redner.CameraType.fisheye