Esempio n. 1
0
 def sample_at(self, x):
     phase_offset = math.batch_align_scalar(self.phase_offset, 0, x)
     k = math.batch_align(self.k, 1, x)
     data = math.batch_align_scalar(self.data, 0, x)
     spatial_phase = math.sum(k * x, -1, keepdims=True)
     wave = math.sin(spatial_phase + phase_offset) * data
     return math.cast(wave, self.dtype)
Esempio n. 2
0
 def centered_grid(self,
                   data,
                   components=1,
                   dtype=None,
                   name=None,
                   batch_size=None,
                   extrapolation=None):
     warnings.warn(
         "Domain.centered_shape and Domain.centered_grid are deprecated. Use CenteredGrid.sample() instead.",
         DeprecationWarning)
     from phi.physics.field import CenteredGrid
     if callable(data):  # data is an initializer
         shape = self.centered_shape(components,
                                     batch_size=batch_size,
                                     name=name,
                                     extrapolation=extrapolation,
                                     age=())
         try:
             grid = data(shape, dtype=dtype)
         except TypeError:
             grid = data(shape)
         if grid.age == ():
             grid._age = 0.0
     else:
         grid = CenteredGrid.sample(data, self, batch_size=batch_size)
     assert grid.component_count == components, "Field has %d components but %d are required for '%s'" % (
         grid.component_count, components, name)
     if dtype is not None and math.dtype(grid.data) != dtype:
         grid = grid.copied_with(data=math.cast(grid.data, dtype))
     if name is not None:
         grid = grid.copied_with(name=name, tags=(name, ) + grid.tags)
     if extrapolation is not None:
         grid = grid.copied_with(extrapolation=extrapolation)
     return grid
Esempio n. 3
0
 def test_cast(self):
     for backend in BACKENDS:
         with backend:
             x = math.random_uniform(dtype=DType(float, 64))
             self.assertEqual(DType(float, 32), math.to_float(x).dtype, msg=backend.name)
             self.assertEqual(DType(complex, 64), math.to_complex(x).dtype, msg=backend.name)
             with math.precision(64):
                 self.assertEqual(DType(float, 64), math.to_float(x).dtype, msg=backend.name)
                 self.assertEqual(DType(complex, 128), math.to_complex(x).dtype, msg=backend.name)
             self.assertEqual(DType(int, 64), math.to_int64(x).dtype, msg=backend.name)
             self.assertEqual(DType(int, 32), math.to_int32(x).dtype, msg=backend.name)
             self.assertEqual(DType(float, 16), math.cast(x, DType(float, 16)).dtype, msg=backend.name)
             self.assertEqual(DType(complex, 128), math.cast(x, DType(complex, 128)).dtype, msg=backend.name)
             try:
                 math.cast(x, DType(float, 3))
                 self.fail(msg=backend.name)
             except KeyError:
                 pass
Esempio n. 4
0
def gradients(y, xs, grad_y=None):
    """
    Compute the analytic gradients using TensorFlow's automatic differentiation.

    :param y: tensor or struct of tensors. The contributions of all tensors in `y` are added up.
    :param xs: struct of input tensors
    :return: struct compatible with `xs` holding dy/dx
    """
    ys = struct.flatten(y)
    if grad_y is not None:
        grad_y = struct.flatten(grad_y)
        for i in range(len(grad_y)):
            grad_y[i] = math.cast(grad_y[i], math.dtype(ys[i]))
    xs_ = struct.flatten(xs)
    grad = tf.gradients(ys, xs_, grad_ys=grad_y)
    return struct.unflatten(grad, xs)
Esempio n. 5
0
 def sample_at(self, points):
     x = (points - self.center) / self.unit_distance
     pot = math.sum(x**2, -1, keepdims=True) * self.data
     if self.maximum_value is not None:
         pot = math.minimum(pot, self.maximum_value)
     return math.cast(pot, np.float32)