def centered_grid(self, data, components=1, dtype=None, name=None, batch_size=None, extrapolation=None): warnings.warn( "Domain.centered_shape and Domain.centered_grid are deprecated. Use CenteredGrid.sample() instead.", DeprecationWarning) from phi.physics.field import CenteredGrid if callable(data): # data is an initializer shape = self.centered_shape(components, batch_size=batch_size, name=name, extrapolation=extrapolation, age=()) try: grid = data(shape, dtype=dtype) except TypeError: grid = data(shape) if grid.age == (): grid._age = 0.0 else: grid = CenteredGrid.sample(data, self, batch_size=batch_size) assert grid.component_count == components, "Field has %d components but %d are required for '%s'" % ( grid.component_count, components, name) if dtype is not None and math.dtype(grid.data) != dtype: grid = grid.copied_with(data=math.cast(grid.data, dtype)) if name is not None: grid = grid.copied_with(name=name, tags=(name, ) + grid.tags) if extrapolation is not None: grid = grid.copied_with(extrapolation=extrapolation) return grid
def from_scalar(scalar_field, axis_forces, name=None): assert isinstance(scalar_field, CenteredGrid) assert scalar_field.component_count == 1, 'channel must be scalar but has %d components' % scalar_field.component_count tensors = [] for axis in range(scalar_field.rank): force = axis_forces[axis] if isinstance(axis_forces, (list, tuple)) else axis_forces[...,axis] if isinstance(force, Number) and force == 0: dims = list(math.staticshape(scalar_field.data)) dims[axis+1] += 1 tensors.append(math.zeros(dims, math.dtype(scalar_field.data))) else: upper = scalar_field.axis_padded(axis, 0, 1).data lower = scalar_field.axis_padded(axis, 1, 0).data tensors.append(math.mul((upper + lower) / 2, force)) return StaggeredGrid(tensors, scalar_field.box, name=name, batch_size=scalar_field._batch_size)
def gradients(y, xs, grad_y=None): """ Compute the analytic gradients using TensorFlow's automatic differentiation. :param y: tensor or struct of tensors. The contributions of all tensors in `y` are added up. :param xs: struct of input tensors :return: struct compatible with `xs` holding dy/dx """ ys = struct.flatten(y) if grad_y is not None: grad_y = struct.flatten(grad_y) for i in range(len(grad_y)): grad_y[i] = math.cast(grad_y[i], math.dtype(ys[i])) xs_ = struct.flatten(xs) grad = tf.gradients(ys, xs_, grad_ys=grad_y) return struct.unflatten(grad, xs)
def dtype(self): return math.dtype(self.data)