def test_struct_initializers(self): obj = ([4], CenteredGrid([1, 4, 1], box[0:1], content_type=struct.shape), ([9], [8, 2])) z = math.zeros(obj) self.assertIsInstance(z, tuple) np.testing.assert_equal(z[0], np.zeros([4])) z2 = math.zeros_like(z) np.testing.assert_equal(math.shape(z)[0], math.shape(z2)[0])
def test_zeros_nonuniform(self): nonuniform = shape_stack('stack', BATCH_DIM, shape(time=1, x=3, y=3), shape(x=3, y=4), shape()) self.assertEqual(math.zeros(nonuniform).shape, nonuniform) self.assertEqual(math.ones(nonuniform).shape, nonuniform) self.assertEqual(math.random_normal(nonuniform).shape, nonuniform) self.assertEqual(math.random_uniform(nonuniform).shape, nonuniform)
def test_struct_initializers(self): bounds = box[0:1] # outside unsafe with struct.unsafe(): obj = ([4], CenteredGrid([1, 4, 1], bounds), ([9], [8, 2])) z = math.zeros(obj) self.assertIsInstance(z, tuple) np.testing.assert_equal(z[0], np.zeros([4])) z2 = math.zeros_like(z) np.testing.assert_equal(math.shape(z)[0], math.shape(z2)[0])
def test_tensor_from_tuple_of_tensor_like(self): native = ((1, 2, 3), math.zeros(vector=3)) for backend in (NUMPY_BACKEND, TORCH_BACKEND, TF_BACKEND): with backend: tens = math.tensor(native, names=['stack', 'vector'], convert=False) self.assertEqual(math.NUMPY_BACKEND, math.choose_backend(tens)) self.assertEqual(shape(stack=2, vector=3), tens.shape) tens = math.tensor(native, names=['stack', 'vector']) self.assertEqual(backend, math.choose_backend(tens)) self.assertEqual(shape(stack=2, vector=3), tens.shape)
def test_stack(self): stacked = shape_stack('stack', BATCH_DIM, shape(time=1, x=3, y=3), shape(x=3, y=4), shape()) print(stacked) self.assertEqual(('stack', 'time', 'x', 'y'), stacked.names) self.assertEqual(3, stacked.stack) self.assertEqual(1, stacked.time) math.assert_close((3, 3, 1), stacked.x) math.assert_close((3, 4, 1), stacked.y) print(stacked.shape) self.assertEqual(('stack', 'dims'), stacked.shape.names) self.assertEqual(12, stacked.shape.volume)
def test_trace_function(self): def f(x: math.Tensor, y: math.Tensor): return x + y for backend in BACKENDS: with backend: ft = math.jit_compile(f) args1 = math.ones(x=2), math.ones(y=2) args2 = math.ones(x=3), math.ones(y=3) res1 = ft(*args1) self.assertEqual(math.shape(x=2, y=2), res1.shape) math.assert_close(res1, 2) res2 = ft(*args2) self.assertEqual(math.shape(x=3, y=3), res2.shape) math.assert_close(res2, 2)
def _grid_sample(self, box, resolution): """ Samples this field on a regular grid. :param box: physical dimensions of the grid :param resolution: grid resolution :return: CenteredGrid """ sample_indices_nd = math.to_int( math.round(box.global_to_local(self.sample_points) * resolution)) sample_indices_nd = math.minimum( math.maximum(0, sample_indices_nd), resolution - 1 ) # Snap outside points to edges, otherwise scatter raises an error # Correct format for math.scatter valid_indices = _batch_indices(sample_indices_nd) shape = (math.shape( self.data)[0], ) + tuple(resolution) + (self.data.shape[-1], ) scattered = math.scatter(self.sample_points, valid_indices, self.data, shape, duplicates_handling=self.mode) return CenteredGrid(data=scattered, box=box, extrapolation='constant', name=self.name + '_centered')
def at(self, other_field): if isinstance(other_field, CenteredGrid): batch_size = other_field._batch_size if batch_size is None: if other_field.content_type in (struct.shape, struct.staticshape): batch_size = other_field.data[0] else: batch_size = math.shape(other_field.data)[0] array = self.grid_sample(other_field.resolution, other_field.box.size, batch_size=batch_size) return other_field.with_data(array) if isinstance(other_field, StaggeredGrid): assert self.channels is None or self.channels == other_field.rank return other_field.with_data([ self.grid_sample(grid.resolution, grid.box.size, grid._batch_size, 1) for grid in other_field.unstack() ]) if isinstance(other_field, Domain): array = self.grid_sample(other_field.resolution, other_field.box.size) return CenteredGrid(array, box=other_field.box, extrapolation='boundary')
def sparse_cg(field, A, max_iterations, guess, accuracy, back_prop=False): div_vec = math.reshape(field, [-1, int(np.prod(field.shape[1:]))]) if guess is not None: guess = math.reshape(guess, [-1, int(np.prod(field.shape[1:]))]) apply_A = lambda pressure: math.matmul(A, pressure) result_vec, iterations = conjugate_gradient(div_vec, apply_A, guess, accuracy, max_iterations, back_prop) return math.reshape(result_vec, math.shape(field)), iterations
def distribute_points(density, particles_per_cell=1, distribution='uniform'): """ Distribute points according to the distribution specified in density. :param density: binary tensor :param particles_per_cell: integer :param distribution: 'uniform' or 'center' :return: tensor of shape (batch_size, point_count, rank) """ assert distribution in ('center', 'uniform') index_array = [] batch_size = math.staticshape(density)[0] if math.staticshape(density)[0] is not None else 1 for batch in range(batch_size): indices = math.where(density[batch, ..., 0] > 0) indices = math.to_float(indices) temp = [] for _ in range(particles_per_cell): if distribution == 'center': temp.append(indices + 0.5) elif distribution == 'uniform': temp.append(indices + math.random_uniform(math.shape(indices))) index_array.append(math.concat(temp, axis=0)) try: index_array = math.stack(index_array) return index_array except ValueError: raise ValueError("all arrays in the batch must have the same number of active cells.")
def solve(self, field, domain, guess, enable_backprop): assert isinstance(domain, FluidDomain) active_mask = domain.active_tensor(extend=1) fluid_mask = domain.accessible_tensor(extend=1) dimensions = math.staticshape(field)[1:-1] N = int(np.prod(dimensions)) periodic = Material.periodic(domain.domain.boundaries) if math.choose_backend([field, active_mask, fluid_mask]).matches_name('SciPy'): A = sparse_pressure_matrix(dimensions, active_mask, fluid_mask, periodic) else: sidx, sorting = sparse_indices(dimensions, periodic) sval_data = sparse_values(dimensions, active_mask, fluid_mask, sorting, periodic) backend = math.choose_backend(field) sval_data = backend.cast(sval_data, field.dtype) A = backend.sparse_tensor(indices=sidx, values=sval_data, shape=[N, N]) div_vec = math.reshape(field, [-1, int(np.prod(field.shape[1:]))]) if guess is not None: guess = math.reshape(guess, [-1, int(np.prod(field.shape[1:]))]) def apply_A(pressure): return math.matmul(A, pressure) result_vec, iterations = conjugate_gradient(div_vec, apply_A, guess, self.accuracy, self.max_iterations, enable_backprop) return math.reshape(result_vec, math.shape(field)), iterations
def test_trace_function(self): def f(x: math.Tensor, y: math.Tensor): return x + y for backend in [ math.NUMPY_BACKEND, tf.TF_BACKEND, torch.TORCH_BACKEND ]: with backend: ft = math.trace_function(f) args1 = math.ones(x=2), math.ones(y=2) args2 = math.ones(x=3), math.ones(y=3) res1 = ft(*args1) self.assertEqual(math.shape(x=2, y=2), res1.shape) math.assert_close(res1, 2) res2 = ft(*args2) self.assertEqual(math.shape(x=3, y=3), res2.shape) math.assert_close(res2, 2)
def __init__(self, center, unit_distance, maximum_value=1.0, data=1.0, name='harmonic', **kwargs): rank = math.shape(center)[-1] AnalyticField.__init__(self, **struct.kwargs(locals()))
def l1_loss(tensor, batch_norm=True, reduce_batches=True): if isinstance(tensor, StaggeredGrid): tensor = tensor.staggered if reduce_batches: total_loss = math.sum(math.abs(tensor)) else: total_loss = math.sum(math.abs(tensor), axis=list(range(1, len(tensor.shape)))) if batch_norm and reduce_batches: batch_size = math.shape(tensor)[0] return total_loss / math.to_float(batch_size) else: return total_loss
def l_n_loss(tensor, n, batch_norm=True, reduce_batches=True): if isinstance(tensor, StaggeredGrid): tensor = tensor.staggered if reduce_batches: total_loss = math.sum(tensor**n) / n else: total_loss = math.sum(tensor**n, axis=list(range(1, len(tensor.shape)))) / n if batch_norm: batch_size = math.shape(tensor)[0] return total_loss / math.to_float(batch_size) else: return total_loss
def __init__(self, shape=math.EMPTY_SHAPE, scale=10, smoothness=1.0, **dims): """ Generates random noise fluctuations which can be configured in physical size and smoothness. Each time values are sampled from a Noise field, a new noise field is generated. Noise is typically used as an initializer for CenteredGrids or StaggeredGrids. Args: channels: Number of independent random scalar fields this Field consists of scale: Size of noise fluctuations in physical units smoothness: Determines how quickly high frequencies die out """ self.scale = scale self.smoothness = smoothness self._shape = shape & math.shape(**dims)
def batch_indices(indices): """ Reshapes the indices such that, aside from indices, they also contain batch number. For example the entry (32, 40) as coordinates of batch 2 will become (2, 32, 40). Transform shape (b, p, d) to (b, p, d+1) where batch size is b, number of particles is p and number of dimensions is d. """ batch_size = indices.shape[0] out_spatial_rank = len(indices.shape) - 2 out_spatial_size = math.shape(indices)[1:-1] batch_range = math.DYNAMIC_BACKEND.choose_backend(indices).range(batch_size) batch_ids = math.reshape(batch_range, [batch_size] + [1] * out_spatial_rank) tile_shape = math.pad(out_spatial_size, [[1,0]], constant_values=1) batch_ids = math.expand_dims(math.tile(batch_ids, tile_shape), axis=-1) return math.concat((batch_ids, indices), axis=-1)
def create(parent_directory: str, shape: math.Shape = math.EMPTY_SHAPE, copy_calling_script=True, **dimensions) -> 'Scene': """ Creates a new `Scene` or a batch of new scenes inside `parent_directory`. See Also: `Scene.at()`, `Scene.list()`. Args: parent_directory: Directory to hold the new `Scene`. If it doesn't exist, it will be created. shape: Determines number of scenes to create. Multiple scenes will be represented by a `Scene` with `is_batch=True`. copy_calling_script: Whether to copy the Python file that invoked this method into the `src` folder of all created scenes. See `Scene.copy_calling_script()`. dimensions: Additional batch dimensions Returns: Single `Scene` object representing the new scene(s). """ shape = (shape & math.shape(**dimensions)).to_batch() parent_directory = expanduser(parent_directory) abs_dir = abspath(parent_directory) if not isdir(abs_dir): os.makedirs(abs_dir) next_id = 0 else: indices = [ int(name[4:]) for name in os.listdir(abs_dir) if name.startswith("sim_") ] next_id = max([-1] + indices) + 1 ids = math.wrap(tuple(range(next_id, next_id + shape.volume))).vector.split(shape) paths = math.map(lambda id_: join(parent_directory, f"sim_{id_:06d}"), ids) scene = Scene(paths) scene.mkdir() if copy_calling_script: try: scene.copy_calling_script() except IOError as err: warnings.warn( f"Failed to copy calling script to scene during Scene.create(): {err}" ) return scene
def test_box_batched(self): box = Box(math.tensor([(0, 0), (1, 1)], 'boxes,vector'), 1) self.assertEqual(math.shape(boxes=2, x=1, y=1), box.shape)
def component_count(self): if math.ndims(self.data) == 0: return 1 return math.shape(self.data)[-1]
def test_combine(self): self.assertEqual(shape(batch=2, x=3, y=4), shape(batch=2) & shape(x=3, y=4)) self.assertEqual(shape(x=3, vector=2), shape(vector=2) & shape(x=3)) self.assertEqual(shape(batch=10, x=3, vector=2), shape(vector=2) & shape(x=3) & shape(batch=10))
def test_downsample_staggered_2d(self): grid = Domain(x=32, y=40).staggered_grid(1) downsampled = field.downsample2x(grid) self.assertEqual( math.shape(x=16, y=20, vector=2).alphabetically(), downsampled.shape.alphabetically())
def wave_vector(self, wave_vector): if len(math.shape(wave_vector)) == 0: wave_vector = math.expand_dims(wave_vector, 0) return wave_vector
def test_sample_at(self): DOMAIN = Domain(x=4, y=3) field = AngularVelocity([0, 0]) self.assertEqual(math.shape(vector=2), field.shape.channel) field >> DOMAIN.vector_grid() field >> DOMAIN.staggered_grid()
def test_box_constructor(self): box = Box(0, (1, 1)) math.assert_close(box.size, 1) self.assertEqual(math.shape(x=1, y=1), box.shape)
def test_subshapes(self): s = shape(batch=10, x=4, y=3, vector=2) self.assertEqual(shape(batch=10), s.batch) self.assertEqual(shape(x=4, y=3), s.spatial) self.assertEqual(shape(vector=2), s.channel)
def test_indexing(self): s = shape(batch=10, x=4, y=3, vector=2) self.assertEqual(shape(batch=10), s[0:1]) self.assertEqual(shape(batch=10), s[[0]]) self.assertEqual(shape(x=4, y=3), s[1:3])
def test_custom_spatial_dims(self): domain = Domain(a=4, b=3) grid = domain.scalar_grid(1) self.assertEqual(math.shape(a=4, b=3), grid.shape) grid = domain.staggered_grid(1) self.assertEqual(math.shape(a=4, b=3, vector=2), grid.shape)
def test_after_gather(self): self.assertEqual(shape(x=2), shape(x=3).after_gather({'x': slice(None, None, 2)}))
def approximate_fraction_inside(self, location, cell_size): return math.tile(math.to_float(0), list(math.shape(location)[:-1]) + [1])