def solve(self, field, domain, guess): assert isinstance(domain, FluidDomain) active_mask = domain.active_tensor(extend=1) fluid_mask = domain.accessible_tensor(extend=1) dimensions = math.staticshape(field)[1:-1] N = int(np.prod(dimensions)) periodic = Material.periodic(domain.domain.boundaries) if math.choose_backend([field, active_mask, fluid_mask]).matches_name('SciPy'): A = sparse_pressure_matrix(dimensions, active_mask, fluid_mask, periodic) else: sidx, sorting = sparse_indices(dimensions, periodic) sval_data = sparse_values(dimensions, active_mask, fluid_mask, sorting, periodic) backend = math.choose_backend(field) sval_data = backend.cast(sval_data, field.dtype) A = backend.sparse_tensor(indices=sidx, values=sval_data, shape=[N, N]) if self.autodiff: return sparse_cg(field, A, self.max_iterations, guess, self.accuracy, back_prop=True) else: def pressure_gradient(op, grad): return sparse_cg(grad, A, max_gradient_iterations, None, self.gradient_accuracy)[0] pressure, iteration = math.with_custom_gradient(sparse_cg, [field, A, self.max_iterations, guess, self.accuracy], pressure_gradient, input_index=0, output_index=0, name_base='scg_pressure_solve') max_gradient_iterations = iteration if self.max_gradient_iterations == 'mirror' else self.max_gradient_iterations return pressure, iteration
def solve(self, divergence, domain, guess): assert isinstance(domain, PoissonDomain) fluid_mask = domain.accessible_tensor(extend=1) if self.autodiff: return solve_pressure_forward(divergence, fluid_mask, self.max_iterations, guess, self.accuracy, domain, back_prop=True) else: def pressure_gradient(op, grad): return solve_pressure_forward(grad, fluid_mask, max_gradient_iterations, None, self.gradient_accuracy, domain)[0] pressure, iteration = math.with_custom_gradient( solve_pressure_forward, [ divergence, fluid_mask, self.max_iterations, guess, self.accuracy, domain ], pressure_gradient, input_index=0, output_index=0, name_base='geom_solve') max_gradient_iterations = iteration if self.max_gradient_iterations == 'mirror' else self.max_gradient_iterations return pressure, iteration
def solve_with_boundaries( self, divergence, active_mask, fluid_mask, accuracy=1e-5, pressure_guess=None, # pressure_guess is not used in this implementation => Kernel automatically takes the last pressure value for initial_guess max_iterations=2000, gradient_accuracy=None, return_loop_counter=False): def pressure_gradient(op, grad): return self.cuda_solve_forward(grad, active_mask, fluid_mask, accuracy, max_iterations)[0] pressure_out, iterations = math.with_custom_gradient( self.cuda_solve_forward, [divergence, active_mask, fluid_mask, accuracy, max_iterations], pressure_gradient, input_index=0, output_index=0, name_base="cuda_pressure_solve") if return_loop_counter: return pressure_out, iterations else: return pressure_out
def solve(self, divergence, domain, pressure_guess): assert isinstance(domain, FluidDomain) active_mask = domain.active_tensor(extend=1) fluid_mask = domain.accessible_tensor(extend=1) dimensions = list(divergence.shape[1:-1]) N = int(np.prod(dimensions)) if math.choose_backend(divergence).matches_name('TensorFlow'): import tensorflow as tf if tf.__version__[0] == '2': print('Adjusting for tensorflow 2.0') tf = tf.compat.v1 tf.disable_eager_execution() sidx, sorting = sparse_indices(dimensions) sval_data = sparse_values(dimensions, active_mask, fluid_mask, sorting) A = tf.SparseTensor(indices=sidx, values=sval_data, dense_shape=[N, N]) else: A = sparse_pressure_matrix(dimensions, active_mask, fluid_mask) if self.autodiff: return sparse_cg(divergence, A, self.max_iterations, pressure_guess, self.accuracy, back_prop=True) else: def pressure_gradient(op, grad): return sparse_cg(grad, A, max_gradient_iterations, None, self.gradient_accuracy)[0] pressure, iteration = math.with_custom_gradient( sparse_cg, [ divergence, A, self.max_iterations, pressure_guess, self.accuracy ], pressure_gradient, input_index=0, output_index=0, name_base='scg_pressure_solve') max_gradient_iterations = iteration if self.max_gradient_iterations == 'mirror' else self.max_gradient_iterations return pressure, iteration
def solve(self, divergence, domain, pressure_guess): assert isinstance(domain, FluidDomain) if self.autodiff: return _mg_solve_forward(divergence, domain, pressure_guess, self.solvers) def pressure_gradient(op, grad): return _mg_solve_forward(grad, domain, None, self.solvers)[0] return math.with_custom_gradient( _mg_solve_forward, [divergence, domain, pressure_guess, self.solvers], pressure_gradient, input_index=0, output_index=0, name_base='multiscale_solve')
def poisson_solve(input_field, poisson_domain, solver=None, guess=None, gradient='implicit'): """ Solves the Poisson equation Δp = input_field for p. :param gradient: one of ('implicit', 'autodiff', 'inverse') If 'autodiff', use the built-in autodiff for backpropagation. The intermediate results of each loop iteration will be permanently stored if backpropagation is used. If 'implicit', computes a forward pressure solve in reverse accumulation backpropagation. This requires less memory but is only accurate if the solution is fully converged. :param input_field: CenteredGrid :param poisson_domain: PoissonDomain instance :param solver: PoissonSolver to use, None for default :param guess: CenteredGrid with same size and resolution as input_field :return: p as CenteredGrid, iteration count as int or None if not available :rtype: CenteredGrid, int """ assert isinstance(input_field, CenteredGrid) if guess is not None: assert isinstance(guess, CenteredGrid) assert guess.compatible(input_field) guess = guess.data if isinstance(poisson_domain, Domain): poisson_domain = PoissonDomain(poisson_domain) if solver is None: solver = _choose_solver(input_field.resolution, math.choose_backend([input_field.data, poisson_domain.active.data, poisson_domain.accessible.data])) if not struct.any(Material.open(poisson_domain.domain.boundaries)): # has no open boundary input_field = input_field - math.mean(input_field.data, axis=tuple(range(1, 1 + input_field.rank)), keepdims=True) # Subtract mean divergence assert gradient in ('autodiff', 'implicit', 'inverse') if gradient == 'autodiff': pressure, iteration = solver.solve(input_field.data, poisson_domain, guess, enable_backprop=True) else: if gradient == 'implicit': def poisson_gradient(_op, grad): return poisson_solve(CenteredGrid.sample(grad, poisson_domain.domain), poisson_domain, solver, None, gradient=gradient)[0].data else: # gradient = 'inverse' def poisson_gradient(_op, grad): return CenteredGrid.sample(grad, poisson_domain.domain).laplace(physical_units=False).data pressure, iteration = math.with_custom_gradient(solver.solve, [input_field.data, poisson_domain, guess, False], poisson_gradient, input_index=0, output_index=0, name_base='poisson_solve') pressure = CenteredGrid(pressure, input_field.box, extrapolation=input_field.extrapolation, name='pressure') return pressure, iteration
def solve(self, divergence, domain, pressure_guess): assert isinstance(domain, FluidDomain) active_mask = domain.active_tensor(extend=1) fluid_mask = domain.accessible_tensor(extend=1) dimensions = list(divergence.shape[1:-1]) N = int(np.prod(dimensions)) if math.choose_backend(divergence).matches_name('SciPy'): A = sparse_pressure_matrix(dimensions, active_mask, fluid_mask) else: sidx, sorting = sparse_indices(dimensions) sval_data = sparse_values(dimensions, active_mask, fluid_mask, sorting) A = math.choose_backend(divergence).sparse_tensor(indices=sidx, values=sval_data, shape=[N, N]) if self.autodiff: return sparse_cg(divergence, A, self.max_iterations, pressure_guess, self.accuracy, back_prop=True) else: def pressure_gradient(op, grad): return sparse_cg(grad, A, max_gradient_iterations, None, self.gradient_accuracy)[0] pressure, iteration = math.with_custom_gradient( sparse_cg, [ divergence, A, self.max_iterations, pressure_guess, self.accuracy ], pressure_gradient, input_index=0, output_index=0, name_base='scg_pressure_solve') max_gradient_iterations = iteration if self.max_gradient_iterations == 'mirror' else self.max_gradient_iterations return pressure, iteration
def solve(self, divergence, domain, pressure_guess): # pressure_guess: not used in this implementation, Kernel takes the last pressure value for initial_guess active, accessible = domain.active_tensor( extend=1), domain.accessible_tensor(extend=1) def pressure_gradient(op, grad): return cuda_solve_forward(grad, active, accessible, self.gradient_accuracy, max_gradient_iterations)[0] pressure, iteration = math.with_custom_gradient( cuda_solve_forward, [ divergence, active, accessible, self.accuracy, self.max_iterations ], pressure_gradient, input_index=0, output_index=0, name_base='cuda_pressure_solve') max_gradient_iterations = iteration if self.max_gradient_iterations == 'mirror' else self.max_gradient_iterations return pressure, iteration