Exemplo n.º 1
0
    def test_assert_all_above_exception_raised(self, dtype):
        """Checks that assert_all_above raises exceptions for invalid input."""
        vector = _pick_random_vector()
        vector = tf.convert_to_tensor(value=vector, dtype=dtype)

        vector = vector * vector
        vector /= -tf.reduce_max(input_tensor=vector, axis=-1, keepdims=True)
        eps = asserts.select_eps_for_addition(dtype)
        outside_vector = vector - eps
        ones_vector = -tf.ones_like(vector)

        with self.subTest(name="outside_and_open_bounds"):
            with self.assertRaises(tf.errors.InvalidArgumentError):
                self.evaluate(
                    asserts.assert_all_above(outside_vector,
                                             -1.0,
                                             open_bound=True))

        with self.subTest(name="outside_and_close_bounds"):
            with self.assertRaises(tf.errors.InvalidArgumentError):
                self.evaluate(
                    asserts.assert_all_above(outside_vector,
                                             -1.0,
                                             open_bound=False))

        with self.subTest(name="exact_and_open_bounds"):
            with self.assertRaises(tf.errors.InvalidArgumentError):
                self.evaluate(
                    asserts.assert_all_above(ones_vector,
                                             -1.0,
                                             open_bound=True))
Exemplo n.º 2
0
def _random_categorical_sample(
        num_samples: int,
        weights: type_alias.TensorLike,
        seed: Optional[type_alias.TensorLike] = None,
        stateless: bool = False,
        name: str = "random_categorical_sample",
        sample_dtype: tf.DType = tf.int32) -> type_alias.TensorLike:
    """Samples from a categorical distribution with arbitrary batch dimensions.

  Note:
    In the following, A1 to An are optional batch dimensions.

  Args:
    num_samples: An `int32` scalar denoting the number of samples to generate
      per mesh.
    weights: A `float` tensor of shape `[A1, ..., An, F]` where F is number of
      faces.
      All weights must be > 0.
    seed: Optional random seed, value depends on `stateless`.
    stateless: Optional flag to use stateless random sampler. If stateless=True,
      then `seed` must be provided as shape `[2]` int tensor. Stateless random
      sampling is useful for testing to generate the same reproducible sequence
      across calls. If stateless=False, then a stateful random number generator
      is used (default behavior).
    name: Name for op. Defaults to "random_categorical_sample".
    sample_dtype: Type of output samples.

  Returns:
    A `sample_dtype` tensor of shape `[A1, ..., An, num_samples]`.
  """
    with tf.name_scope(name):
        asserts.assert_all_above(weights, 0)
        logits = tf.math.log(weights)
        num_faces = tf.shape(input=logits)[-1]
        batch_shape = tf.shape(input=logits)[:-1]
        logits_2d = tf.reshape(logits, [-1, num_faces])
        if stateless:
            seed = tf.convert_to_tensor(value=seed)
            shape.check_static(tensor=seed,
                               tensor_name="seed",
                               has_dim_equals=(-1, 2))
            sample_fn = tf.random.stateless_categorical
        else:
            sample_fn = tf.random.categorical
        draws = sample_fn(logits=logits_2d,
                          num_samples=num_samples,
                          dtype=sample_dtype,
                          seed=seed)
        samples = tf.reshape(draws,
                             shape=tf.concat((batch_shape, (num_samples, )),
                                             axis=0))
        return samples
Exemplo n.º 3
0
    def test_assert_all_above_passthrough(self):
        """Checks that the assert is a passthrough when the flag is False."""
        vector_input = _pick_random_vector()

        vector_output = asserts.assert_all_above(vector_input, 1.0)

        self.assertIs(vector_input, vector_output)
Exemplo n.º 4
0
def spherical_to_cartesian_coordinates(point_spherical, name=None):
    """Function to transform Cartesian coordinates to spherical coordinates.

  Note:
    In the following, A1 to An are optional batch dimensions.

  Args:
    point_spherical: A tensor of shape `[A1, ..., An, 3]`. The last dimension
      contains r, theta, and phi that respectively correspond to the radius,
      polar angle and azimuthal angle; r must be non-negative.
    name: A name for this op. Defaults to 'spherical_to_cartesian_coordinates'.

  Raises:
    tf.errors.InvalidArgumentError: If r, theta or phi contains out of range
    data.

  Returns:
    A tensor of shape `[A1, ..., An, 3]`, where the last dimension contains the
    cartesian coordinates in x,y,z order.
  """
    with tf.compat.v1.name_scope(name, "spherical_to_cartesian_coordinates",
                                 [point_spherical]):
        point_spherical = tf.convert_to_tensor(value=point_spherical)

        shape.check_static(tensor=point_spherical,
                           tensor_name="point_spherical",
                           has_dim_equals=(-1, 3))

        r, theta, phi = tf.unstack(point_spherical, axis=-1)
        r = asserts.assert_all_above(r, 0)
        tmp = r * tf.sin(theta)
        x = tmp * tf.cos(phi)
        y = tmp * tf.sin(phi)
        z = r * tf.cos(theta)
        return tf.stack((x, y, z), axis=-1)
Exemplo n.º 5
0
def generate_random_face_indices(
        num_samples: int,
        face_weights: type_alias.TensorLike,
        seed: Optional[type_alias.TensorLike] = None,
        stateless: bool = False,
        name: str = "generate_random_face_indices") -> type_alias.TensorLike:
    """Generate a sample of face ids given per face probability.

  Note:
    In the following, A1 to An are optional batch dimensions.

  Args:
    num_samples: An `int32` scalar denoting the number of samples to generate
      per mesh.
    face_weights: A `float` tensor of shape `[A1, ..., An, F]` where F is
      number of faces. All weights must be > 0.
    seed: Optional seed for the random number generator.
    stateless: Optional flag to use stateless random sampler. If stateless=True,
      then `seed` must be provided as shape `[2]` int tensor. Stateless random
      sampling is useful for testing to generate the same reproducible sequence
      across calls. If stateless=False, then a stateful random number generator
      is used (default behavior).
    name: Name for op. Defaults to "generate_random_face_indices".

  Returns:
    An `int32` tensor of shape `[A1, ..., An, num_samples]` denoting sampled
      face indices.
  """
    with tf.name_scope(name):
        num_samples = tf.convert_to_tensor(value=num_samples)
        face_weights = tf.convert_to_tensor(value=face_weights)
        shape.check_static(tensor=face_weights,
                           tensor_name="face_weights",
                           has_rank_greater_than=0)
        shape.check_static(tensor=num_samples,
                           tensor_name="num_samples",
                           has_rank=0)

        face_weights = asserts.assert_all_above(face_weights, minval=0.0)
        eps = asserts.select_eps_for_division(face_weights.dtype)
        face_weights = face_weights + eps
        sampled_face_indices = _random_categorical_sample(
            num_samples=num_samples,
            weights=face_weights,
            seed=seed,
            stateless=stateless)
        return sampled_face_indices
Exemplo n.º 6
0
def evaluate_legendre_polynomial(degree_l, order_m, x):
  """Evaluates the Legendre polynomial of degree l and order m at x.

  Note:
    This function is implementing the algorithm described in p. 10 of `Spherical
    Harmonic Lighting: The Gritty Details`.

  Note:
    In the following, A1 to An are optional batch dimensions.

  Args:
    degree_l: An integer tensor of shape `[A1, ..., An]` corresponding to the
      degree of the associated Legendre polynomial. Note that `degree_l` must be
      non-negative.
    order_m: An integer tensor of shape `[A1, ..., An]` corresponding to the
      order of the associated Legendre polynomial. Note that `order_m` must
      satisfy `0 <= order_m <= l`.
    x: A tensor of shape `[A1, ..., An]` with values in [-1,1].

  Returns:
    A tensor of shape `[A1, ..., An]` containing the evaluation of the legendre
    polynomial.
  """
  degree_l = tf.convert_to_tensor(value=degree_l)
  order_m = tf.convert_to_tensor(value=order_m)
  x = tf.convert_to_tensor(value=x)

  if not degree_l.dtype.is_integer:
    raise ValueError("`degree_l` must be of an integer type.")
  if not order_m.dtype.is_integer:
    raise ValueError("`order_m` must be of an integer type.")
  shape.compare_batch_dimensions(
      tensors=(degree_l, order_m, x),
      last_axes=-1,
      tensor_names=("degree_l", "order_m", "x"),
      broadcast_compatible=True)
  degree_l = asserts.assert_all_above(degree_l, 0)
  order_m = asserts.assert_all_in_range(order_m, 0, degree_l)
  x = asserts.assert_all_in_range(x, -1.0, 1.0)

  pmm = _evaluate_legendre_polynomial_pmm_eval(order_m, x)
  return tf.compat.v1.where(
      tf.equal(degree_l, order_m), pmm,
      _evaluate_legendre_polynomial_branch(degree_l, order_m, x, pmm))
Exemplo n.º 7
0
def weighted_random_sample_triangle_mesh(vertex_attributes,
                                         faces,
                                         num_samples,
                                         face_weights,
                                         seed=None,
                                         stateless=False,
                                         name=None):
  """Performs a face probability weighted random sampling of a tri mesh.

  Note:
    In the following, A1 to An are optional batch dimensions.

  Args:
    vertex_attributes: A `float` tensor of shape `[A1, ..., An, V, D]`, where V
      is the number of vertices, and D is dimensionality of each vertex.
    faces: A `int` tensor of shape `[A1, ..., An, F, 3]`, where F is the number
      of faces.
    num_samples: A `int` 0-D tensor denoting number of samples to be drawn from
      each mesh.
    face_weights: A `float` tensor of shape ``[A1, ..., An, F]`, denoting
      unnormalized sampling probability of each face, where F is the number of
      faces.
    seed: Optional random seed.
    stateless: Optional flag to use stateless random sampler. If stateless=True,
      then seed must be provided as shape `[2]` int tensor. Stateless random
      sampling is useful for testing to generate same sequence across calls.
    name: Name for op. Defaults to "weighted_random_sample_triangle_mesh".

  Returns:
    sample_points: A `float` tensor of shape `[A1, ..., An, num_samples, D]`,
      where D is dimensionality of each sampled point.
    sample_face_indices: A `int` tensor of shape `[A1, ..., An, num_samples]`.
  """
  with tf.compat.v1.name_scope(name, "weighted_random_sample_triangle_mesh"):
    faces = tf.convert_to_tensor(value=faces)
    vertex_attributes = tf.convert_to_tensor(value=vertex_attributes)
    face_weights = tf.convert_to_tensor(value=face_weights)
    num_samples = tf.convert_to_tensor(value=num_samples)

    shape.check_static(
        tensor=vertex_attributes,
        tensor_name="vertex_attributes",
        has_rank_greater_than=1)
    shape.check_static(
        tensor=faces, tensor_name="faces", has_rank_greater_than=1)
    shape.check_static(
        tensor=face_weights,
        tensor_name="face_weights",
        has_rank_greater_than=0)
    shape.compare_batch_dimensions(
        tensors=(faces, face_weights),
        last_axes=(-2, -1),
        tensor_names=("faces", "face_weights"),
        broadcast_compatible=False)
    shape.compare_batch_dimensions(
        tensors=(vertex_attributes, faces, face_weights),
        last_axes=(-3, -3, -2),
        tensor_names=("vertex_attributes", "faces", "face_weights"),
        broadcast_compatible=False)

    asserts.assert_all_above(face_weights, 0)

    batch_dims = faces.shape.ndims - 2
    batch_shape = faces.shape.as_list()[:-2]
    sample_shape = tf.concat(
        (batch_shape, tf.convert_to_tensor(
            value=(num_samples,), dtype=tf.int32)),
        axis=0)

    sample_face_indices = generate_random_face_indices(
        num_samples, face_weights, seed=seed, stateless=stateless)
    sample_vertex_indices = tf.gather(
        faces, sample_face_indices, batch_dims=batch_dims)
    sample_vertices = tf.gather(
        vertex_attributes, sample_vertex_indices, batch_dims=batch_dims)
    barycentric = generate_random_barycentric_coordinates(
        sample_shape,
        dtype=vertex_attributes.dtype,
        seed=seed,
        stateless=stateless)
    barycentric = tf.expand_dims(barycentric, axis=-1)
    sample_points = tf.math.multiply(sample_vertices, barycentric)
    sample_points = tf.reduce_sum(input_tensor=sample_points, axis=-2)
    return sample_points, sample_face_indices
Exemplo n.º 8
0
def perspective_right_handed(vertical_field_of_view,
                             aspect_ratio,
                             near,
                             far,
                             name=None):
  """Generates the matrix for a right handed perspective projection.

  Note:
    In the following, A1 to An are optional batch dimensions.

  Args:
    vertical_field_of_view: A tensor of shape `[A1, ..., An]`, where the last
      dimension represents the vertical field of view of the frustum expressed
      in radians. Note that values for `vertical_field_of_view` must be in the
      range (0,pi).
    aspect_ratio: A tensor of shape `[A1, ..., An, C]`, where the last dimension
      stores the width over height ratio of the frustum. Note that values for
      `aspect_ratio` must be non-negative.
    near:  A tensor of shape `[A1, ..., An, C]`, where the last dimension
      captures the distance between the viewer and the near clipping plane. Note
      that values for `near` must be non-negative.
    far:  A tensor of shape `[A1, ..., An, C]`, where the last dimension
      captures the distance between the viewer and the far clipping plane. Note
      that values for `far` must be greater than those of `near`.
    name: A name for this op. Defaults to 'perspective_rh'.

  Raises:
    InvalidArgumentError: if any input contains data not in the specified range
      of valid values.
    ValueError: if the all the inputs are not of the same shape.

  Returns:
    A tensor of shape `[A1, ..., An, 4, 4]`, containing matrices of right
    handed perspective-view frustum.
  """
  with tf.compat.v1.name_scope(
      name, "perspective_rh",
      [vertical_field_of_view, aspect_ratio, near, far]):
    vertical_field_of_view = tf.convert_to_tensor(value=vertical_field_of_view)
    aspect_ratio = tf.convert_to_tensor(value=aspect_ratio)
    near = tf.convert_to_tensor(value=near)
    far = tf.convert_to_tensor(value=far)

    shape.compare_batch_dimensions(
        tensors=(vertical_field_of_view, aspect_ratio, near, far),
        last_axes=-1,
        tensor_names=("vertical_field_of_view", "aspect_ratio", "near", "far"),
        broadcast_compatible=False)

    vertical_field_of_view = asserts.assert_all_in_range(
        vertical_field_of_view, 0.0, math.pi, open_bounds=True)
    aspect_ratio = asserts.assert_all_above(aspect_ratio, 0.0, open_bound=True)
    near = asserts.assert_all_above(near, 0.0, open_bound=True)
    far = asserts.assert_all_above(far, near, open_bound=True)

    inverse_tan_half_vertical_field_of_view = 1.0 / tf.tan(
        vertical_field_of_view * 0.5)
    zero = tf.zeros_like(inverse_tan_half_vertical_field_of_view)
    one = tf.ones_like(inverse_tan_half_vertical_field_of_view)

    x = tf.stack((inverse_tan_half_vertical_field_of_view / aspect_ratio, zero,
                  zero, zero),
                 axis=-1)
    y = tf.stack((zero, inverse_tan_half_vertical_field_of_view, zero, zero),
                 axis=-1)
    near_minus_far = near - far
    z = tf.stack(
        (zero, zero,
         (far + near) / near_minus_far, 2.0 * far * near / near_minus_far),
        axis=-1)
    w = tf.stack((zero, zero, -one, zero), axis=-1)

    return tf.stack((x, y, z, w), axis=-2)
Exemplo n.º 9
0
def brdf(direction_incoming_light: type_alias.TensorLike,
         direction_outgoing_light: type_alias.TensorLike,
         surface_normal: type_alias.TensorLike,
         shininess: type_alias.TensorLike,
         albedo: type_alias.TensorLike,
         brdf_normalization: bool = True,
         name: str = "phong_brdf") -> tf.Tensor:
    """Evaluates the specular brdf of the Phong model.

  Note:
    In the following, A1 to An are optional batch dimensions, which must be
    broadcast compatible.

  Note:
    The gradient of this function is not smooth when the dot product of the
    normal with any light is 0.0.

  Args:
    direction_incoming_light: A tensor of shape `[A1, ..., An, 3]`, where the
      last dimension represents a normalized incoming light vector.
    direction_outgoing_light: A tensor of shape `[A1, ..., An, 3]`, where the
      last dimension represents a normalized outgoing light vector.
    surface_normal: A tensor of shape `[A1, ..., An, 3]`, where the last
      dimension represents a normalized surface normal.
    shininess: A tensor of shape `[A1, ..., An, 1]`, where the last dimension
      represents a non-negative shininess coefficient.
    albedo: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
      represents albedo with values in [0,1].
    brdf_normalization: A `bool` indicating whether normalization should be
      applied to enforce the energy conservation property of BRDFs. Note that
      `brdf_normalization` must be set to False in order to use the original
      Blinn specular model.
    name: A name for this op. Defaults to "phong_brdf".

  Returns:
    A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents
      the amount of light reflected in the outgoing light direction.

  Raises:
    ValueError: if the shape of `direction_incoming_light`,
    `direction_outgoing_light`, `surface_normal`, `shininess` or `albedo` is not
    supported.
    InvalidArgumentError: if not all of shininess values are non-negative, or if
    at least one element of `albedo` is outside of [0,1].
  """
    with tf.name_scope(name):
        direction_incoming_light = tf.convert_to_tensor(
            value=direction_incoming_light)
        direction_outgoing_light = tf.convert_to_tensor(
            value=direction_outgoing_light)
        surface_normal = tf.convert_to_tensor(value=surface_normal)
        shininess = tf.convert_to_tensor(value=shininess)
        albedo = tf.convert_to_tensor(value=albedo)

        shape.check_static(tensor=direction_incoming_light,
                           tensor_name="direction_incoming_light",
                           has_dim_equals=(-1, 3))
        shape.check_static(tensor=direction_outgoing_light,
                           tensor_name="direction_outgoing_light",
                           has_dim_equals=(-1, 3))
        shape.check_static(tensor=surface_normal,
                           tensor_name="surface_normal",
                           has_dim_equals=(-1, 3))
        shape.check_static(tensor=shininess,
                           tensor_name="shininess",
                           has_dim_equals=(-1, 1))
        shape.check_static(tensor=albedo,
                           tensor_name="albedo",
                           has_dim_equals=(-1, 3))
        shape.compare_batch_dimensions(
            tensors=(direction_incoming_light, direction_outgoing_light,
                     surface_normal, shininess, albedo),
            tensor_names=("direction_incoming_light",
                          "direction_outgoing_light", "surface_normal",
                          "shininess", "albedo"),
            last_axes=-2,
            broadcast_compatible=True)
        direction_incoming_light = asserts.assert_normalized(
            direction_incoming_light)
        direction_outgoing_light = asserts.assert_normalized(
            direction_outgoing_light)
        surface_normal = asserts.assert_normalized(surface_normal)
        albedo = asserts.assert_all_in_range(albedo,
                                             0.0,
                                             1.0,
                                             open_bounds=False)
        shininess = asserts.assert_all_above(shininess, 0.0, open_bound=False)

        # Checks whether the incoming or outgoing light point behind the surface.
        dot_incoming_light_surface_normal = vector.dot(
            -direction_incoming_light, surface_normal)
        dot_outgoing_light_surface_normal = vector.dot(
            direction_outgoing_light, surface_normal)
        min_dot = tf.minimum(dot_incoming_light_surface_normal,
                             dot_outgoing_light_surface_normal)
        perfect_reflection_direction = vector.reflect(direction_incoming_light,
                                                      surface_normal)
        perfect_reflection_direction = tf.math.l2_normalize(
            perfect_reflection_direction, axis=-1)
        cos_alpha = vector.dot(perfect_reflection_direction,
                               direction_outgoing_light,
                               axis=-1)
        cos_alpha = tf.maximum(cos_alpha, tf.zeros_like(cos_alpha))
        phong_model = albedo * tf.pow(cos_alpha, shininess)
        if brdf_normalization:
            phong_model *= _brdf_normalization_factor(shininess)
        common_shape = shape.get_broadcasted_shape(min_dot.shape,
                                                   phong_model.shape)
        d_val = lambda dim: 1 if dim is None else tf.compat.dimension_value(dim
                                                                            )
        common_shape = [d_val(dim) for dim in common_shape]
        condition = tf.broadcast_to(tf.greater_equal(min_dot, 0.0),
                                    common_shape)
        phong_model = tf.broadcast_to(phong_model, common_shape)
        return tf.where(condition, phong_model, tf.zeros_like(phong_model))
Exemplo n.º 10
0
def evaluate_spherical_harmonics(degree_l, order_m, theta, phi, name=None):
    """Evaluates a point sample of a Spherical Harmonic basis function.

  Note:
    This function is implementating the algorithm and variable names described
    p. 12 of 'Spherical Harmonic Lighting: The Gritty Details.

  Note:
    In the following, A1 to An are optional batch dimensions.

  Args:
    degree_l: An integer tensor of shape `[A1, ..., An, C]`, where the last
      dimension represents the band of the spherical harmonics. Note that
      `degree_l` must be non-negative.
    order_m: An integer tensor of shape `[A1, ..., An, C]`, where the last
      dimension represents the index of the spherical harmonics in the band
      `degree_l`. Note that `order_m` must satisfy `0 <= order_m <= l`.
    theta: A tensor of shape `[A1, ..., An, 1]`. This variable stores the polar
      angle of the sameple. Values of theta must be in [0, pi].
    phi: A tensor of shape `[A1, ..., An, 1]`. This variable stores the
      azimuthal angle of the sameple. Values of phi must be in [0, 2pi].
    name: A name for this op. Defaults to
      'spherical_harmonics_evaluate_spherical_harmonics'.

  Returns:
    A tensor of shape `[A1, ..., An, C]` containing the evaluation of each basis
    of the spherical harmonics.

  Raises:
    ValueError: if the shape of `theta` or `phi` is not supported.
    InvalidArgumentError: if at least an element of `l`, `m`, `theta` or `phi`
    is outside the expected range.
  """
    with tf.compat.v1.name_scope(
            name, "spherical_harmonics_evaluate_spherical_harmonics",
        [degree_l, order_m, theta, phi]):
        degree_l = tf.convert_to_tensor(value=degree_l)
        order_m = tf.convert_to_tensor(value=order_m)
        theta = tf.convert_to_tensor(value=theta)
        phi = tf.convert_to_tensor(value=phi)

        if not degree_l.dtype.is_integer:
            raise ValueError("`degree_l` must be of an integer type.")
        if not order_m.dtype.is_integer:
            raise ValueError("`order_m` must be of an integer type.")

        shape.compare_dimensions(tensors=(degree_l, order_m),
                                 axes=-1,
                                 tensor_names=("degree_l", "order_m"))
        shape.check_static(tensor=phi,
                           tensor_name="phi",
                           has_dim_equals=(-1, 1))
        shape.check_static(tensor=theta,
                           tensor_name="theta",
                           has_dim_equals=(-1, 1))
        shape.compare_batch_dimensions(tensors=(degree_l, order_m, theta, phi),
                                       last_axes=-2,
                                       tensor_names=("degree_l", "order_m",
                                                     "theta", "phi"),
                                       broadcast_compatible=False)
        # Checks that tensors contain appropriate data.
        degree_l = asserts.assert_all_above(degree_l, 0)
        order_m = asserts.assert_all_in_range(order_m, -degree_l, degree_l)
        theta = asserts.assert_all_in_range(theta, 0.0, np.pi)
        phi = asserts.assert_all_in_range(phi, 0.0, 2.0 * np.pi)

        var_type = theta.dtype
        sign_m = tf.math.sign(order_m)
        order_m = tf.abs(order_m)
        zeros = tf.zeros_like(order_m)
        result_m_zero = _spherical_harmonics_normalization(
            degree_l, zeros, var_type) * evaluate_legendre_polynomial(
                degree_l, zeros, tf.cos(theta))
        result_branch = _evaluate_spherical_harmonics_branch(
            degree_l, order_m, theta, phi, sign_m, var_type)
        return tf.where(tf.equal(order_m, zeros), result_m_zero, result_branch)
Exemplo n.º 11
0
def ndc_to_screen(point_ndc_space,
                  lower_left_corner,
                  screen_dimensions,
                  near,
                  far,
                  name=None):
    """Transforms points from normalized device coordinates to screen coordinates.

  Note:
    In the following, A1 to An are optional batch dimensions which must be
    broadcast compatible between `point_ndc_space` and the other variables.

  Args:
    point_ndc_space: A tensor of shape `[A1, ..., An, 3]`, where the last
      dimension represents points in normalized device coordinates.
    lower_left_corner: A tensor of shape `[A1, ..., An, 2]`, where the last
      dimension captures the position (in pixels) of the lower left corner of
      the screen.
    screen_dimensions: A tensor of shape `[A1, ..., An, 2]`, where the last
      dimension is expressed in pixels and captures the width and the height (in
      pixels) of the screen.
    near:  A tensor of shape `[A1, ..., An, 1]`, where the last dimension
      captures the distance between the viewer and the near clipping plane. Note
      that values for `near` must be non-negative.
    far:  A tensor of shape `[A1, ..., An, 1]`, where the last dimension
      captures the distance between the viewer and the far clipping plane. Note
      that values for `far` must be greater than those of `near`.
    name: A name for this op. Defaults to 'ndc_to_screen'.

  Raises:
    InvalidArgumentError: if any input contains data not in the specified range
      of valid values.
    ValueError: If any input is of an unsupported shape.

  Returns:
    A tensor of shape `[A1, ..., An, 3]`, containing `point_ndc_space` in
    screen coordinates.
  """
    with tf.compat.v1.name_scope(
            name, "ndc_to_screen",
        [point_ndc_space, lower_left_corner, screen_dimensions, near, far]):
        point_ndc_space = tf.convert_to_tensor(value=point_ndc_space)
        lower_left_corner = tf.convert_to_tensor(value=lower_left_corner)
        screen_dimensions = tf.convert_to_tensor(value=screen_dimensions)
        near = tf.convert_to_tensor(value=near)
        far = tf.convert_to_tensor(value=far)

        shape.check_static(tensor=point_ndc_space,
                           tensor_name="point_ndc_space",
                           has_dim_equals=(-1, 3))
        shape.check_static(tensor=lower_left_corner,
                           tensor_name="lower_left_corner",
                           has_dim_equals=(-1, 2))
        shape.check_static(tensor=screen_dimensions,
                           tensor_name="screen_dimensions",
                           has_dim_equals=(-1, 2))
        shape.check_static(tensor=near,
                           tensor_name="near",
                           has_dim_equals=(-1, 1))
        shape.check_static(tensor=far,
                           tensor_name="far",
                           has_dim_equals=(-1, 1))

        shape.compare_batch_dimensions(tensors=(lower_left_corner,
                                                screen_dimensions, near, far),
                                       last_axes=-2,
                                       tensor_names=("lower_left_corner",
                                                     "screen_dimensions",
                                                     "near", "far"),
                                       broadcast_compatible=False)
        shape.compare_batch_dimensions(tensors=(point_ndc_space, near),
                                       last_axes=-2,
                                       tensor_names=("point_ndc_space",
                                                     "near"),
                                       broadcast_compatible=True)

        screen_dimensions = asserts.assert_all_above(screen_dimensions,
                                                     0.0,
                                                     open_bound=True)
        near = asserts.assert_all_above(near, 0.0, open_bound=True)
        far = asserts.assert_all_above(far, near, open_bound=True)

        ndc_to_screen_factor = tf.concat(
            (screen_dimensions, far - near), axis=-1) / 2.0
        screen_center = tf.concat(
            (lower_left_corner + screen_dimensions / 2.0, (near + far) / 2.0),
            axis=-1)
        return ndc_to_screen_factor * point_ndc_space + screen_center
Exemplo n.º 12
0
def triangulate(startpoints, endpoints, weights, name=None):
  """Triangulates 3d points by miminizing the sum of squared distances to rays.

  The rays are defined by their start points and endpoints. At least two rays
  are required to triangulate any given point. Contrary to the standard
  reprojection-error metric, the sum of squared distances to rays can be
  minimized in a closed form.

  Note:
    In the following, A1 to An are optional batch dimensions.

  Args:
    startpoints: A tensor of ray start points with shape `[A1, ..., An, V, 3]`,
      the number of rays V around which the solution points live should be
      greater or equal to 2, otherwise triangulation is impossible.
    endpoints: A tensor of ray endpoints with shape `[A1, ..., An, V, 3]`, the
      number of rays V around which the solution points live should be greater
      or equal to 2, otherwise triangulation is impossible. The `endpoints`
      tensor should have the same shape as the `startpoints` tensor.
    weights: A tensor of ray weights (certainties) with shape `[A1, ..., An,
      V]`. Weights should have all positive entries. Weight should have at least
      two non-zero entries for each point (at least two rays should have
      certainties > 0).
    name: A name for this op. The default value of None means "ray_triangulate".

  Returns:
    A tensor of triangulated points with shape `[A1, ..., An, 3]`.

  Raises:
    ValueError: If the shape of the arguments is not supported.
  """
  with tf.compat.v1.name_scope(name, "ray_triangulate",
                               [startpoints, endpoints, weights]):
    startpoints = tf.convert_to_tensor(value=startpoints)
    endpoints = tf.convert_to_tensor(value=endpoints)
    weights = tf.convert_to_tensor(value=weights)

    shape.check_static(
        tensor=startpoints,
        tensor_name="startpoints",
        has_rank_greater_than=1,
        has_dim_equals=(-1, 3),
        has_dim_greater_than=(-2, 1))
    shape.check_static(
        tensor=endpoints,
        tensor_name="endpoints",
        has_rank_greater_than=1,
        has_dim_equals=(-1, 3),
        has_dim_greater_than=(-2, 1))
    shape.compare_batch_dimensions(
        tensors=(startpoints, endpoints, weights),
        last_axes=(-2, -2, -1),
        broadcast_compatible=False)
    weights = asserts.assert_all_above(weights, 0.0, open_bound=False)
    weights = asserts.assert_at_least_k_non_zero_entries(weights, k=2)

    left_hand_side_list = []
    right_hand_side_list = []
    # TODO: Replace the inefficient for loop and add comments here.
    for ray_id in range(weights.shape[-1]):
      weights_single_ray = weights[..., ray_id]
      startpoints_single_ray = startpoints[..., ray_id, :]
      endpoints_singleview = endpoints[..., ray_id, :]
      ray = endpoints_singleview - startpoints_single_ray
      ray = tf.nn.l2_normalize(ray, axis=-1)
      ray_x, ray_y, ray_z = tf.unstack(ray, axis=-1)
      zeros = tf.zeros_like(ray_x)
      cross_product_matrix = tf.stack(
          (zeros, -ray_z, ray_y, ray_z, zeros, -ray_x, -ray_y, ray_x, zeros),
          axis=-1)
      cross_product_matrix_shape = tf.concat(
          (tf.shape(input=cross_product_matrix)[:-1], (3, 3)), axis=-1)
      cross_product_matrix = tf.reshape(
          cross_product_matrix, shape=cross_product_matrix_shape)
      weights_single_ray = tf.expand_dims(weights_single_ray, axis=-1)
      weights_single_ray = tf.expand_dims(weights_single_ray, axis=-1)
      left_hand_side = weights_single_ray * cross_product_matrix
      left_hand_side_list.append(left_hand_side)
      dot_product = tf.matmul(cross_product_matrix,
                              tf.expand_dims(startpoints_single_ray, axis=-1))
      right_hand_side = weights_single_ray * dot_product
      right_hand_side_list.append(right_hand_side)
    left_hand_side_multi_rays = tf.concat(left_hand_side_list, axis=-2)
    right_hand_side_multi_rays = tf.concat(right_hand_side_list, axis=-2)
    points = tf.linalg.lstsq(left_hand_side_multi_rays,
                             right_hand_side_multi_rays)
    points = tf.squeeze(points, axis=-1)

    return points
Exemplo n.º 13
0
def intersection_ray_sphere(sphere_center,
                            sphere_radius,
                            ray,
                            point_on_ray,
                            name=None):
  """Finds positions and surface normals where the sphere and the ray intersect.

  Note:
    In the following, A1 to An are optional batch dimensions.

  Args:
    sphere_center: A tensor of shape `[3]` representing the 3d sphere center.
    sphere_radius: A tensor of shape `[1]` containing a strictly positive value
      defining the radius of the sphere.
    ray: A tensor of shape `[A1, ..., An, 3]` containing normalized 3D vectors.
    point_on_ray: A tensor of shape `[A1, ..., An, 3]`.
    name: A name for this op. The default value of None means
      "ray_intersection_ray_sphere".

  Returns:
    A tensor of shape `[2, A1, ..., An, 3]` containing the position of the
    intersections, and a tensor of shape `[2, A1, ..., An, 3]` the associated
    surface normals at that point. Both tensors contain NaNs when there is no
    intersections. The first dimension of the returned tensor provides access to
    the first and second intersections of the ray with the sphere.

  Raises:
    ValueError: if the shape of `sphere_center`, `sphere_radius`, `ray` or
      `point_on_ray` is not supported.
    tf.errors.InvalidArgumentError: If `ray` is not normalized.
  """
  with tf.compat.v1.name_scope(
      name, "ray_intersection_ray_sphere",
      [sphere_center, sphere_radius, ray, point_on_ray]):
    sphere_center = tf.convert_to_tensor(value=sphere_center)
    sphere_radius = tf.convert_to_tensor(value=sphere_radius)
    ray = tf.convert_to_tensor(value=ray)
    point_on_ray = tf.convert_to_tensor(value=point_on_ray)

    shape.check_static(
        tensor=sphere_center,
        tensor_name="sphere_center",
        has_rank=1,
        has_dim_equals=(0, 3))
    shape.check_static(
        tensor=sphere_radius,
        tensor_name="sphere_radius",
        has_rank=1,
        has_dim_equals=(0, 1))
    shape.check_static(tensor=ray, tensor_name="ray", has_dim_equals=(-1, 3))
    shape.check_static(
        tensor=point_on_ray, tensor_name="point_on_ray", has_dim_equals=(-1, 3))
    shape.compare_batch_dimensions(
        tensors=(ray, point_on_ray),
        last_axes=(-2, -2),
        broadcast_compatible=False)
    sphere_radius = asserts.assert_all_above(
        sphere_radius, 0.0, open_bound=True)
    ray = asserts.assert_normalized(ray)

    vector_sphere_center_to_point_on_ray = sphere_center - point_on_ray
    distance_sphere_center_to_point_on_ray = tf.norm(
        tensor=vector_sphere_center_to_point_on_ray, axis=-1, keepdims=True)
    distance_projection_sphere_center_on_ray = vector.dot(
        vector_sphere_center_to_point_on_ray, ray)
    closest_distance_sphere_center_to_ray = tf.sqrt(
        tf.square(distance_sphere_center_to_point_on_ray) -
        tf.pow(distance_projection_sphere_center_on_ray, 2))
    half_secant_length = tf.sqrt(
        tf.square(sphere_radius) -
        tf.square(closest_distance_sphere_center_to_ray))
    distances = tf.stack(
        (distance_projection_sphere_center_on_ray - half_secant_length,
         distance_projection_sphere_center_on_ray + half_secant_length),
        axis=0)
    intersections_points = distances * ray + point_on_ray
    normals = tf.math.l2_normalize(
        intersections_points - sphere_center, axis=-1)
    return intersections_points, normals
def distortion_factor(
    squared_radius: type_alias.TensorLike,
    distortion_coefficient: type_alias.TensorLike,
    name: str = "quadratic_radial_distortion_distortion_factor"
) -> Tuple[tf.Tensor, tf.Tensor]:
    """Calculates a quadratic distortion factor given squared radii.

  Given a vector describing a location in camera space in homogeneous
  coordinates, `(x/z, y/z, 1)`, squared_radius is `r^2 = (x/z)^2 + (y/z)^2`.
  distortion_factor multiplies `x/z` and `y/z` to obtain the distorted
  coordinates. In this function, `distortion_factor` is given by
  `1.0 + distortion_coefficient * squared_radius`.

  Note:
    In the following, A1 to An are optional batch dimensions, which must be
    broadcast compatible.

  Args:
    squared_radius: A tensor of shape `[A1, ..., An, H, W]`, containing the
      radii of the image pixels computed as `(x/z)^2 + (y/z)^2`. We use squared
      radius rather than the radius itself to avoid an unnecessary `sqrt`, which
      may introduce gradient singularities. The non-negativity of squared radius
      is only enforced in debug mode.
    distortion_coefficient: A `scalar` or a tensor of shape `[A1, ..., An]`,
      which contains the distortion coefficients of each image.
    name: A name for this op. Defaults to
      "quadratic_radial_distortion_distortion_factor".

  Returns:
    distortion_factor: A tensor of shape `[A1, ..., An, H, W]`, the correction
      factor that should multiply the projective coordinates `(x/z)` and `(y/z)`
      to apply the distortion.
    overflow_mask: A boolean tensor of shape `[A1, ..., An, H, W]`, `True` where
      `squared_radius` is beyond the range where the distortion function is
      monotonically increasing. Wherever `overflow_mask` is True,
      `distortion_factor`'s value is meaningless.
  """
    with tf.name_scope(name, ):
        squared_radius = tf.convert_to_tensor(value=squared_radius)
        distortion_coefficient = tf.convert_to_tensor(
            value=distortion_coefficient)

        if distortion_coefficient.shape.ndims == 0:
            distortion_coefficient = tf.expand_dims(distortion_coefficient,
                                                    axis=0)
        shape.check_static(tensor=squared_radius,
                           tensor_name="squared_radius",
                           has_rank_greater_than=1)
        shape.compare_batch_dimensions(tensors=(squared_radius,
                                                distortion_coefficient),
                                       tensor_names=("squared_radius",
                                                     "distortion_coefficient"),
                                       last_axes=(-3, -1),
                                       broadcast_compatible=True)
        squared_radius = asserts.assert_all_above(squared_radius,
                                                  0.0,
                                                  open_bound=False)
        distortion_coefficient = tf.expand_dims(distortion_coefficient,
                                                axis=-1)
        distortion_coefficient = tf.expand_dims(distortion_coefficient,
                                                axis=-1)
        distortion_coefficient_times_squared_radius = (distortion_coefficient *
                                                       squared_radius)
        distortion_factor_ = 1.0 + distortion_coefficient_times_squared_radius
        # This condition needs to hold for the distortion to be monotomnically
        # increasing, as can be derived by differentiating it.
        overflow_mask = tf.less(
            1.0 + 3.0 * distortion_coefficient_times_squared_radius, 0.0)
        return distortion_factor_, overflow_mask
def undistortion_factor(
    distorted_squared_radius: type_alias.TensorLike,
    distortion_coefficient: type_alias.TensorLike,
    num_iterations: int = 5,
    name: str = "quadratic_radial_distortion_undistortion_factor"
) -> Tuple[tf.Tensor, tf.Tensor]:
    """Calculates the inverse quadratic distortion function given squared radii.

  Given a vector describing a location in camera space in homogeneous
  coordinates `(x/z, y/z, 1)`, after distortion has been applied, these become
  `(x'/z, y'/z, 1)`. `distorted_squared_radius` is `(x'/z)^2 + (y'/z)^2`.
  `undistortion_factor` multiplies `x'/z` and `y'/z` to obtain the undistorted
  projective coordinates `x/z` and `y/z`.
  The undustortion factor in this function is derived from a quadratic.
  distortion function, where the distortion factor equals
  `1.0 + distortion_coefficient * squared_radius`.

  Note:
    In the following, A1 to An are optional batch dimensions, which must be
    broadcast compatible.

  Args:
    distorted_squared_radius: A tensor of shape `[A1, ..., An, H, W]` containing
      the value of  projective coordinates `(x/z)^2 + (y/z)^2`. For each pixel
      it contains the squared distance of that pixel to the center of the image
      plane. We use `distorted_squared_radius` rather than the distorted radius
      itself to avoid an unnecessary `sqrt`, which may introduce gradient
      singularities. The non-negativity of `distorted_squared_radius` is only
      enforced in debug mode.
    distortion_coefficient: A `scalar` or a tensor of shape `[A1, ..., An]`,
      which contains the distortion coefficients of each image.
    num_iterations: Number of Newton-Raphson iterations to calculate the inverse
      distortion function. Defaults to 5, which is on the high-accuracy side.
    name: A name for this op. Defaults to
      "quadratic_radial_distortion_undistortion_factor".

  Returns:
    undistortion: A tensor of shape `[A1, ..., An, H, W]` containing the
      correction factor that should multiply the distorted projective
      coordinates `(x'/z)` and `(y'/z)` to obtain the undistorted ones.
    overflow_mask: A `bool` tensor of shape `[A1, ..., An, H, W]`, `True` where
      `distorted_squared_radius` is beyond the range where the distortion
      function is monotonically increasing. Wherever `overflow_mask` is `True`,
      `undistortion_factor`'s value is meaningless.

  """
    with tf.name_scope(name):
        distorted_squared_radius = tf.convert_to_tensor(
            value=distorted_squared_radius)
        distortion_coefficient = tf.convert_to_tensor(
            value=distortion_coefficient)

        if distortion_coefficient.shape.ndims == 0:
            distortion_coefficient = tf.expand_dims(distortion_coefficient,
                                                    axis=0)
        shape.check_static(tensor=distorted_squared_radius,
                           tensor_name="distorted_squared_radius",
                           has_rank_greater_than=1)
        shape.compare_batch_dimensions(
            tensors=(distorted_squared_radius, distortion_coefficient),
            tensor_names=("distorted_squared_radius",
                          "distortion_coefficient"),
            last_axes=(-3, -1),
            broadcast_compatible=True)
        distorted_squared_radius = asserts.assert_all_above(
            distorted_squared_radius, 0.0, open_bound=False)
        distortion_coefficient = tf.expand_dims(distortion_coefficient,
                                                axis=-1)
        distortion_coefficient = tf.expand_dims(distortion_coefficient,
                                                axis=-1)
        # For a distortion function of r' = (1 + ar^2)r, with a negative a, the
        # maximum r until which r'(r) is monotonically increasing is r^2 = -1/(3a).
        # At that value, r'^2 = -4 / (27a). Therefore the overflow condition for r'
        # is ar'^2 +(4/27.0) < 0. For a positive a it never holds, as it should,
        # because then r' is monotonic in r everywhere and thus never overflows.
        distortion_coefficient_times_distorted_squared_radius = (
            distortion_coefficient * distorted_squared_radius)
        overflow_mask = tf.less(
            4.0 / 27.0 + distortion_coefficient_times_distorted_squared_radius,
            0.0)

        # Newton-raphson iterations. The expression below is obtained from
        # algebrically simplifying the Newton-Raphson formula
        # (https://en.wikipedia.org/wiki/Newtons_method).
        # We initialize with the approximate formula for the undistortion function
        # given here https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4934233/.
        undistortion = (1.0 -
                        distortion_coefficient_times_distorted_squared_radius)
        for _ in range(num_iterations):
            two_thirds_undistortion = 2.0 * undistortion / 3.0
            undistortion = (1.0 - two_thirds_undistortion) / (
                1.0 +
                3.0 * distortion_coefficient_times_distorted_squared_radius *
                undistortion * undistortion) + two_thirds_undistortion
        return undistortion, overflow_mask
Exemplo n.º 16
0
def interpolate(points,
                weights,
                indices,
                normalize=True,
                allow_negative_weights=False,
                name=None):
    """Weighted interpolation for M-D point sets.

  Given an M-D point set, this function can be used to generate a new point set
  that is formed by interpolating a subset of points in the set.

  Note:
    In the following, A1 to An, and B1 to Bk are optional batch dimensions.

  Args:
    points: A tensor with shape `[B1, ..., Bk, M] and rank R > 1, where M is the
      dimensionality of the points.
    weights: A tensor with shape `[A1, ..., An, P]`, where P is the number of
      points to interpolate for each output point.
    indices: A tensor of dtype tf.int32 and shape `[A1, ..., An, P, R-1]`, which
      contains the point indices to be used for each output point. The R-1
      dimensional axis gives the slice index of a single point in `points`. The
      first n+1 dimensions of weights and indices must match, or be broadcast
      compatible.
    normalize: A `bool` describing whether or not to normalize the weights on
      the last axis.
    allow_negative_weights: A `bool` describing whether or not negative weights
      are allowed.
    name: A name for this op. Defaults to "weighted_interpolate".

  Returns:
    A tensor of shape `[A1, ..., An, M]` storing the interpolated M-D
    points. The first n dimensions will be the same as weights and indices.
  """
    with tf.compat.v1.name_scope(name, "weighted_interpolate",
                                 [points, weights, indices]):
        points = tf.convert_to_tensor(value=points)
        weights = tf.convert_to_tensor(value=weights)
        indices = tf.convert_to_tensor(value=indices)

        shape.check_static(tensor=points,
                           tensor_name="points",
                           has_rank_greater_than=1)
        shape.check_static(tensor=indices,
                           tensor_name="indices",
                           has_rank_greater_than=1,
                           has_dim_equals=(-1, points.shape.ndims - 1))
        shape.compare_dimensions(tensors=(weights, indices),
                                 axes=(-1, -2),
                                 tensor_names=("weights", "indices"))
        shape.compare_batch_dimensions(tensors=(weights, indices),
                                       last_axes=(-2, -3),
                                       tensor_names=("weights", "indices"),
                                       broadcast_compatible=True)
        if not allow_negative_weights:
            weights = asserts.assert_all_above(weights, 0.0, open_bound=False)

        if normalize:
            sums = tf.reduce_sum(input_tensor=weights, axis=-1, keepdims=True)
            sums = asserts.assert_nonzero_norm(sums)
            weights = safe_ops.safe_signed_div(weights, sums)
        point_lists = tf.gather_nd(points, indices)
        return vector.dot(point_lists,
                          tf.expand_dims(weights, axis=-1),
                          axis=-2,
                          keepdims=False)