def _shift_resample(self, resolution: Shape, bounds: Box, threshold=1e-5, max_padding=20): assert math.all_available( bounds.lower, bounds.upper ), "Shift resampling requires 'bounds' to be available." lower = math.to_int32( math.ceil( math.maximum(0, self.box.lower - bounds.lower) / self.dx - threshold)) upper = math.to_int32( math.ceil( math.maximum(0, bounds.upper - self.box.upper) / self.dx - threshold)) total_padding = (math.sum(lower) + math.sum(upper)).numpy() if total_padding > max_padding: return NotImplemented elif total_padding > 0: from phi.field import pad padded = pad( self, { dim: (int(lower[i]), int(upper[i])) for i, dim in enumerate(self.shape.spatial.names) }) grid_box, grid_resolution, grid_values = padded.box, padded.resolution, padded.values else: grid_box, grid_resolution, grid_values = self.box, self.resolution, self.values origin_in_local = grid_box.global_to_local( bounds.lower) * grid_resolution data = math.sample_subgrid(grid_values, origin_in_local, resolution) return data
def test_np_speed_sum(self): print() np1, np2 = rnpv(64), rnpv(256) t1 = math.tensor(np1, batch('batch'), spatial('x, y'), channel('vector')) t2 = math.tensor(np2, batch('batch'), spatial('x, y'), channel('vector')) _assert_equally_fast(lambda: np.sum(np1), lambda: math.sum(t1, dim=t1.shape), n=10000) _assert_equally_fast(lambda: np.sum(np2), lambda: math.sum(t2, dim=t1.shape), n=10000)
def at(self, other_field): if self.compatible(other_field): return self if isinstance(other_field, CenteredGrid) and np.allclose( self.dx, other_field.dx): paddings = _required_paddings_transposed(self.box, self.dx, other_field.box) if math.sum(paddings) == 0: origin_in_local = self.box.global_to_local( other_field.box.lower) * self.resolution data = _crop_for_interpolation(self.data, origin_in_local, other_field.resolution) dimensions = self.resolution != other_field.resolution dimensions = [ d for d in math.spatial_dimensions(data) if dimensions[d - 1] ] data = math.interpolate_linear(data, origin_in_local % 1.0, dimensions) return CenteredGrid(data, other_field.box, name=self.name, batch_size=self._batch_size) elif math.sum(paddings) < 16: padded = self.padded(np.transpose(paddings).tolist()) return padded.at(other_field) return Field.at(self, other_field)
def _shift_resample(self, resolution, box, threshold=1e-5, max_padding=20): lower = math.to_int( math.ceil( math.maximum(0, self.box.lower - box.lower) / self.dx - threshold)) upper = math.to_int( math.ceil( math.maximum(0, box.upper - self.box.upper) / self.dx - threshold)) total_padding = math.sum(lower) + math.sum(upper) if total_padding > max_padding: return NotImplemented elif total_padding > 0: from phi.field import pad padded = pad( self, { dim: (int(lower[i]), int(upper[i])) for i, dim in enumerate(self.shape.spatial.names) }) grid_box, grid_resolution, grid_values = padded.box, padded.resolution, padded.values else: grid_box, grid_resolution, grid_values = self.box, self.resolution, self.values origin_in_local = grid_box.global_to_local(box.lower) * grid_resolution data = math.sample_subgrid(grid_values, origin_in_local, resolution) return data
def normalize_to(target, source=1): """ Multiplies the target so that its total content matches the source. :param target: a tensor :param source: a tensor or number :return: normalized tensor of the same shape as target """ return target * (math.sum(source) / math.sum(target))
def test_np_speed_sum(self): np1, np2 = rnpv(64), rnpv(256) t1, t2 = math.tensors(np1, np2, names='batch,x,y,vector') _assert_equally_fast(lambda: np.sum(np1), lambda: math.sum(t1), n=10000) _assert_equally_fast(lambda: np.sum(np2), lambda: math.sum(t2), n=10000)
def l1_loss(tensor, batch_norm=True, reduce_batches=True): if isinstance(tensor, StaggeredGrid): tensor = tensor.staggered if reduce_batches: total_loss = math.sum(math.abs(tensor)) else: total_loss = math.sum(math.abs(tensor), axis=list(range(1, len(tensor.shape)))) if batch_norm and reduce_batches: batch_size = math.shape(tensor)[0] return total_loss / math.to_float(batch_size) else: return total_loss
def l_n_loss(tensor, n, batch_norm=True, reduce_batches=True): if isinstance(tensor, StaggeredGrid): tensor = tensor.staggered if reduce_batches: total_loss = math.sum(tensor**n) / n else: total_loss = math.sum(tensor**n, axis=list(range(1, len(tensor.shape)))) / n if batch_norm: batch_size = math.shape(tensor)[0] return total_loss / math.to_float(batch_size) else: return total_loss
def plot_solves(): """ While `plot_solves()` is active, certain performance optimizations and algorithm implementations may be disabled. """ from . import math import pylab cycle = pylab.rcParams['axes.prop_cycle'].by_key()['color'] with math.SolveTape(record_trajectories=True) as solves: try: yield solves finally: for i, result in enumerate(solves): assert isinstance(result, math.SolveInfo) from phi.math._tensors import disassemble_tree _, (residual, ) = disassemble_tree(result.residual) residual_mse = math.mean(math.sqrt(math.sum(residual**2)), residual.shape.without('trajectory')) residual_mse_max = math.max( math.sqrt(math.sum(residual**2)), residual.shape.without('trajectory')) # residual_mean = math.mean(math.abs(residual), residual.shape.without('trajectory')) residual_max = math.max(math.abs(residual), residual.shape.without('trajectory')) pylab.plot(residual_mse.numpy(), label=f"{i}: {result.method}", color=cycle[i % len(cycle)]) pylab.plot(residual_max.numpy(), '--', alpha=0.2, color=cycle[i % len(cycle)]) pylab.plot(residual_mse_max.numpy(), alpha=0.2, color=cycle[i % len(cycle)]) print( f"Solve {i}: {result.method} ({1000 * result.solve_time:.1f} ms)\n" f"\t{result.solve}\n" f"\t{result.msg}\n" f"\tConverged: {result.converged}\n" f"\tDiverged: {result.diverged}\n" f"\tIterations: {result.iterations}\n" f"\tFunction evaulations: {result.function_evaluations.trajectory[-1]}" ) pylab.yscale('log') pylab.ylabel("Residual: MSE / max / individual max") pylab.xlabel("Iteration") pylab.title(f"Solve Convergence") pylab.legend(loc='upper right') pylab.savefig(f"pressure-solvers-FP32.png") pylab.show()
def test_integrate_all(self): grid = CenteredGrid(field.Noise(vector=2), extrapolation.ZERO, x=10, y=10, bounds=Box[0:10, 0:10]) math.assert_close(field.integrate(grid, grid.bounds), math.sum(grid.values, 'x,y')) grid = CenteredGrid(field.Noise(vector=2), extrapolation.ZERO, x=10, y=10, bounds=Box[0:1, 0:1]) math.assert_close(field.integrate(grid, grid.bounds), math.sum(grid.values, 'x,y') / 100)
def divergence(field: Grid) -> CenteredGrid: """ Computes the divergence of a grid using finite differences. This function can operate in two modes depending on the type of `field`: * `CenteredGrid` approximates the divergence at cell centers using central differences * `StaggeredGrid` exactly computes the divergence at cell centers Args: field: vector field as `CenteredGrid` or `StaggeredGrid` Returns: Divergence field as `CenteredGrid` """ if isinstance(field, StaggeredGrid): components = [] for i, dim in enumerate(field.shape.spatial.names): div_dim = math.gradient(field.values.vector[i], dx=field.dx[i], difference='forward', padding=None, dims=[dim]).gradient[0] components.append(div_dim) data = math.sum(components, 0) return CenteredGrid(data, field.box, field.extrapolation.gradient()) elif isinstance(field, CenteredGrid): left, right = shift(field, (-1, 1), stack_dim='div_') grad = (right - left) / (field.dx * 2) components = [grad.vector[i].div_[i] for i in range(grad.div_.size)] result = sum(components) return result else: raise NotImplementedError( f"{type(field)} not supported. Only StaggeredGrid allowed.")
def sample_at(self, points): if self.mode == 'EXP': envelope = math.exp(-0.5 * math.sum( (points - self.center)**2, axis=-1, keepdims=True) / self.size**2) envelope = math.to_float(envelope) return envelope * self.factor elif self.mode == 'RECT': conf = np.zeros(points.shape) conf[:, self.center[0] - self.size:self.center[0] + self.size, self.center[1] - self.size:self.center[1] + self.size, :] = np.ones( conf[:, self.center[0] - self.size:self.center[0] + self.size, self.center[1] - self.size:self.center[1] + self.size, :].shape) return conf[:, :, :, :-1] * self.factor elif self.mode == 'RANDOM': conf = np.random.random_sample(points.shape) conf[:, 0, :, :] *= 0 conf[:, -1, :, :] *= 0 conf[:, :, 0, :] *= 0 conf[:, :, -1, :] *= 0 return conf[:, :, :, :-1] * self.factor
def test_grid_sample(self): for backend in BACKENDS: with backend: grid = math.sum(math.meshgrid(x=[1, 2, 3], y=[0, 3]), 'vector') # 1 2 3 | 4 5 6 coords = math.tensor([(0, 0), (0.5, 0), (0, 0.5), (-2, -1)], instance('list'), channel('vector')) interp = math.grid_sample(grid, coords, extrapolation.ZERO) math.assert_close(interp, [1, 1.5, 2.5, 0], msg=backend.name)
def sample_at(self, points): envelope = math.exp(-0.5 * math.sum( (points - self.center)**2, axis=-1, keepdims=True) / self.size**2) envelope = math.to_float(envelope) wave = math.exp(1j * math.to_float( math.expand_dims(np.dot(points, self.wave_vector), -1))) * envelope return wave * self.data
def test_domain_grid_from_function(self): grid = Domain(x=4, y=3).scalar_grid(lambda x: math.sum(x**2, 'vector')) math.assert_close(grid.values.x[0].y[0], 0.5) self.assertEqual(grid.shape.volume, 12) grid = Domain( x=4, y=3).scalar_grid(lambda x: math.ones(x.shape.non_channel)) math.assert_close(grid.values, 1)
def grid_sample(self, resolution, size, batch_size=1, channels=None): channels = channels or self.channels or len(size) shape = (batch_size, ) + tuple(resolution) + (channels, ) rndj = math.to_complex( self.math.random_normal(shape)) + 1j * math.to_complex( self.math.random_normal(shape)) # Note: there is no complex32 k = math.fftfreq( resolution) * resolution / size * self.scale # in physical units k = math.sum(k**2, axis=-1, keepdims=True) lowest_frequency = 0.1 weight_mask = 1 / (1 + math.exp( (lowest_frequency - k) * 1e3)) # High pass filter # --- Compute 1/k --- k[(0, ) * len(k.shape)] = np.inf inv_k = 1 / k inv_k[(0, ) * len(k.shape)] = 0 # --- Compute result --- fft = rndj * inv_k**self.smoothness * weight_mask array = math.real(math.ifft(fft)) array /= math.std(array, axis=tuple(range(1, math.ndims(array))), keepdims=True) array -= math.mean(array, axis=tuple(range(1, math.ndims(array))), keepdims=True) array = math.to_float(array) return array
def lies_inside(self, location): center = math.batch_align(self.center, 1, location) radius = math.batch_align(self.radius, 0, location) distance_squared = math.sum((location - center)**2, axis=-1, keepdims=True) return distance_squared <= radius**2
def spatial_sum(tensor): if isinstance(tensor, StaggeredGrid): tensor = tensor.staggered summed = math.sum(tensor, axis=math.dimrange(tensor)) for i in math.dimrange(tensor): summed = math.expand_dims(summed, i) return summed
def sample_at(self, x): phase_offset = math.batch_align_scalar(self.phase_offset, 0, x) k = math.batch_align(self.k, 1, x) data = math.batch_align_scalar(self.data, 0, x) spatial_phase = math.sum(k * x, -1, keepdims=True) wave = math.sin(spatial_phase + phase_offset) * data return math.cast(wave, self.dtype)
def total(self): v_length = math.sqrt( math.add( [self.staggered[..., i]**2 for i in range(self.shape[-1])])) total = math.sum(v_length, axis=range(1, self.spatial_rank + 1)) for i in range(self.spatial_rank + 1): total = math.expand_dims(total, -1) return total
def sample_at(self, x): phase_offset = math.batch_align_scalar(self.phase_offset, 0, x) k = math.batch_align(self.k, 1, x) data = math.batch_align(self.data, 1, x) spatial_phase = math.sum(k * x, -1, keepdims=True) result = math.sin( math.to_float(spatial_phase + phase_offset)) * math.to_float(data) return result
def value_at(self, location): center = math.batch_align(self.center, 1, location) radius = math.batch_align(self.radius, 0, location) distance_squared = math.sum((location - center)**2, axis=-1, keepdims=True) bool_inside = distance_squared <= radius**2 return math.to_float(bool_inside)
def divergence(self, physical_units=True): components = [] for dim, field in enumerate(self.data): grad = math.axis_gradient(field.data, dim) if physical_units: grad /= self.dx[dim] components.append(grad) data = math.sum(components, 0) return CenteredGrid(data, self.box, name='div(%s)' % self.name, batch_size=self._batch_size)
def _sample(self, geometry: Geometry) -> math.Tensor: points = geometry.center distances = points - self.location strength = self.strength if self.falloff is None else self.strength * self.falloff(distances) velocity = math.cross_product(strength, distances) velocity = math.sum(velocity, self.location.shape.batch.without(points.shape)) if self.component: velocity = velocity.vector[self.component] return velocity
def test_grid_sample(self): for backend in (math.NUMPY_BACKEND, tf.TF_BACKEND, torch.TORCH_BACKEND): with backend: grid = math.sum(math.meshgrid(x=[1, 2, 3], y=[0, 3]), 'vector') # 1 2 3 | 4 5 6 coords = math.tensor([(0, 0), (0.5, 0), (0, 0.5), (-2, -1)], names=('list', 'vector')) interp = math.grid_sample(grid, coords, extrapolation.ZERO) math.assert_close(interp, [1, 1.5, 2.5, 0])
def linear_function(val): val = -val val *= 2 val = math.pad(val, {'x': (2, 0), 'y': (0, 1)}, extrapolation.PERIODIC) val = val.x[:-2].y[1:] + val.x[2:].y[:-1] val = math.pad(val, {'x': (0, 0), 'y': (0, 1)}, extrapolation.ZERO) val = math.pad(val, {'x': (2, 2), 'y': (0, 1)}, extrapolation.BOUNDARY) # sl = sl.vector[0] return val val = val.x[1:4].y[:2] return math.sum([val, sl], axis=0) - sl
def at(self, other_field, collapse_dimensions=True, force_optimization=False, return_self_if_compatible=False): if self.compatible( other_field ): # and return_self_if_compatible: not applicable for fields with Points return self if isinstance(other_field, CenteredGrid) and np.allclose( self.dx, other_field.dx): paddings = _required_paddings_transposed(self.box, self.dx, other_field.box) if math.sum(paddings) == 0: origin_in_local = self.box.global_to_local( other_field.box.lower) * self.resolution data = _crop_for_interpolation(self.data, origin_in_local, other_field.resolution) dimensions = self.resolution != other_field.resolution dimensions = [ d for d in math.spatial_dimensions(data) if dimensions[d - 1] ] data = math.interpolate_linear(data, origin_in_local % 1.0, dimensions) return CenteredGrid(data, other_field.box, name=self.name, batch_size=self._batch_size) elif math.sum(paddings) < 16: padded = self.padded(np.transpose(paddings).tolist()) return padded.at(other_field, collapse_dimensions, force_optimization) return Field.at(self, other_field, force_optimization=force_optimization)
def diffuse(field, amount, substeps=1): assert isinstance(field, CenteredGrid) if field.extrapolation == 'periodic': frequencies = math.fft(math.to_complex(field.data)) k = math.fftfreq(field.resolution) / field.dx k = math.sum(k**2, axis=-1, keepdims=True) fft_laplace = -(2 * pi)**2 * k diffuse_kernel = math.to_complex(math.exp(fft_laplace * amount)) data = math.ifft(frequencies * diffuse_kernel) data = math.real(data) else: data = field.data for i in range(substeps): data += amount / substeps * field.laplace() return field.with_data(data)
def test_grid_sample_gradient_2d(self): grads_grid = [] grads_coords = [] for backend in BACKENDS: if backend.supports(Backend.gradients): with backend: grid = math.tensor([[1., 2, 3], [1, 2, 3]], spatial('x,y')) coords = math.tensor([(0.5, 0.5), (1, 1.1), (-0.8, -0.5)], instance('points'), channel('vector')) with math.record_gradients(grid, coords): sampled = math.grid_sample(grid, coords, extrapolation.ZERO) loss = math.sum(sampled) / 3 grad_grid, grad_coords = math.gradients(loss, grid, coords) grads_grid.append(grad_grid) grads_coords.append(grad_coords) math.assert_close(*grads_grid) math.assert_close(*grads_coords)
def _weighted_sliced_laplace_nd(tensor, weights): if tensor.shape[-1] != 1: raise ValueError('Laplace operator requires a scalar channel as input') dims = range(math.spatial_rank(tensor)) components = [] for dimension in dims: lower_weights, center_weights, upper_weights = _dim_shifted( weights, dimension, (-1, 0, 1), diminish_others=(1, 1)) lower_values, center_values, upper_values = _dim_shifted( tensor, dimension, (-1, 0, 1), diminish_others=(1, 1)) diff = math.mul( upper_values, upper_weights * center_weights) + math.mul( lower_values, lower_weights * center_weights) + math.mul( center_values, -lower_weights - upper_weights) components.append(diff) return math.sum(components, 0)