def sparse_cg(field, A, max_iterations, guess, accuracy, back_prop=False): div_vec = math.reshape(field, [-1, int(np.prod(field.shape[1:]))]) if guess is not None: guess = math.reshape(guess, [-1, int(np.prod(field.shape[1:]))]) apply_A = lambda pressure: math.matmul(A, pressure) result_vec, iterations = conjugate_gradient(div_vec, apply_A, guess, accuracy, max_iterations, back_prop) return math.reshape(result_vec, math.shape(field)), iterations
def solve(self, field, domain, guess, enable_backprop): assert isinstance(domain, FluidDomain) active_mask = domain.active_tensor(extend=1) fluid_mask = domain.accessible_tensor(extend=1) dimensions = math.staticshape(field)[1:-1] N = int(np.prod(dimensions)) periodic = Material.periodic(domain.domain.boundaries) if math.choose_backend([field, active_mask, fluid_mask]).matches_name('SciPy'): A = sparse_pressure_matrix(dimensions, active_mask, fluid_mask, periodic) else: sidx, sorting = sparse_indices(dimensions, periodic) sval_data = sparse_values(dimensions, active_mask, fluid_mask, sorting, periodic) backend = math.choose_backend(field) sval_data = backend.cast(sval_data, field.dtype) A = backend.sparse_tensor(indices=sidx, values=sval_data, shape=[N, N]) div_vec = math.reshape(field, [-1, int(np.prod(field.shape[1:]))]) if guess is not None: guess = math.reshape(guess, [-1, int(np.prod(field.shape[1:]))]) def apply_A(pressure): return math.matmul(A, pressure) result_vec, iterations = conjugate_gradient(div_vec, apply_A, guess, self.accuracy, self.max_iterations, enable_backprop) return math.reshape(result_vec, math.shape(field)), iterations
def multi_advect(self, fields, interpolation="LINEAR", dt=1): assert isinstance( fields, (list, tuple)), "first parameter must be either a tuple or list" inputs_lists = [] coords_lists = [] value_generators = [] for field in fields: if isinstance(field, StaggeredGrid): i, c, v = self._mac_block_advection(field.staggered, dt) else: i, c, v = self._centered_block_advection(field, dt) inputs_lists.append(i) coords_lists.append(c) value_generators.append(v) inputs = math.concat(sum(inputs_lists, []), 0) coords = math.concat(sum(coords_lists, []), 0) all_advected = math.resample(inputs, coords, interpolation=interpolation, boundary="REPLICATE") all_advected = math.reshape(all_advected, [self.spatial_rank, -1] + list(all_advected.shape[1:])) all_advected = math.unstack(all_advected) results = [] abs_i = 0 for i in range(len(inputs_lists)): n = len(inputs_lists[0]) assigned_advected = all_advected[abs_i:abs_i + n] results.append(value_generators[i](assigned_advected)) abs_i += n return results
def upsample2x(tensor, interpolation="LINEAR"): if interpolation.lower() != "linear": raise ValueError("Only linear interpolation supported") dims = range(spatial_rank(tensor)) vlen = tensor.shape[-1] spatial_dims = tensor.shape[1:-1] tensor = math.pad(tensor, [[0, 0]] + [[1, 1]] * spatial_rank(tensor) + [[0, 0]], "SYMMETRIC") for dim in dims: left_slices_1 = [(slice(2, None) if i == dim else slice(None)) for i in dims] left_slices_2 = [(slice(1, -1) if i == dim else slice(None)) for i in dims] right_slices_1 = [(slice(1, -1) if i == dim else slice(None)) for i in dims] right_slices_2 = [(slice(-2) if i == dim else slice(None)) for i in dims] left = 0.75 * tensor[[slice(None)] + left_slices_2 + [slice(None)]] + 0.25 * tensor[ [slice(None)] + left_slices_1 + [slice(None)]] right = 0.25 * tensor[[slice(None)] + right_slices_2 + [ slice(None) ]] + 0.75 * tensor[[slice(None)] + right_slices_1 + [slice(None)]] combined = math.stack([right, left], axis=2 + dim) tensor = math.reshape(combined, [-1] + [ spatial_dims[dim] * 2 if i == dim else tensor.shape[i + 1] for i in dims ] + [vlen]) return tensor
def batch_indices(indices): """ Reshapes the indices such that, aside from indices, they also contain batch number. For example the entry (32, 40) as coordinates of batch 2 will become (2, 32, 40). Transform shape (b, p, d) to (b, p, d+1) where batch size is b, number of particles is p and number of dimensions is d. """ batch_size = indices.shape[0] out_spatial_rank = len(indices.shape) - 2 out_spatial_size = math.shape(indices)[1:-1] batch_range = math.DYNAMIC_BACKEND.choose_backend(indices).range(batch_size) batch_ids = math.reshape(batch_range, [batch_size] + [1] * out_spatial_rank) tile_shape = math.pad(out_spatial_size, [[1,0]], constant_values=1) batch_ids = math.expand_dims(math.tile(batch_ids, tile_shape), axis=-1) return math.concat((batch_ids, indices), axis=-1)