Exemple #1
0
 def __add__(self, other):
     if other is 0:
         return self
     assert isinstance(other, Gravity), type(other)
     if self._batch_size is not None:
         assert self._batch_size == other._batch_size
     # Add gravity
     if math.is_scalar(self.gravity) and math.is_scalar(other.gravity):
         return Gravity(self.gravity + other.gravity)
     else:
         rank = math.staticshape(other.gravity)[-1] if math.is_scalar(self.gravity)\
             else math.staticshape(self.gravity)[-1]
         sum_tensor = gravity_tensor(self, rank) + gravity_tensor(
             other, rank)
         return Gravity(sum_tensor)
Exemple #2
0
def gravity_tensor(gravity, rank):
    if isinstance(gravity, Gravity):
        gravity = gravity.gravity
    if math.is_scalar(gravity):
        gravity = gravity * GLOBAL_AXIS_ORDER.up_vector(rank)
    assert math.staticshape(gravity)[-1] == rank
    return math.to_float(
        math.expand_dims(gravity, 0,
                         rank + 2 - len(math.staticshape(gravity))))
Exemple #3
0
    def __init__(self,
                 accuracy=1e-5,
                 gradient_accuracy='same',
                 max_iterations=2000,
                 max_gradient_iterations='same',
                 autodiff=False):
        """
        Conjugate gradient solver using sparse matrix multiplications.

        :param accuracy: the maximally allowed error for each cell, measured in terms of field values.
        :param gradient_accuracy: accuracy applied during backpropagation, number of 'same' to use forward accuracy
        :param max_iterations: integer specifying maximum conjugent gradient loop iterations or None for no limit
        :param max_gradient_iterations: maximum loop iterations during backpropagation,
            'same' uses the number from max_iterations,
            'mirror' sets the maximum to the number of iterations that were actually performed in the forward pass
        :param autodiff: If autodiff=True, use the built-in autodiff for backpropagation.
            The intermediate results of each loop iteration will be permanently stored if backpropagation is used.
            If False, replaces autodiff by a forward pressure solve in reverse accumulation backpropagation.
            This requires less memory but is only accurate if the solution is fully converged.
        """
        PoissonSolver.__init__(self,
                               'Sparse Conjugate Gradient',
                               supported_devices=('CPU', 'GPU'),
                               supports_guess=True,
                               supports_loop_counter=True,
                               supports_continuous_masks=True)
        assert math.is_scalar(accuracy), 'invalid accuracy: %s' % accuracy
        assert gradient_accuracy == 'same' or math.is_scalar(
            gradient_accuracy
        ), 'invalid gradient_accuracy: %s' % gradient_accuracy
        assert max_gradient_iterations in ['same', 'mirror'] or isinstance(
            max_gradient_iterations, int
        ), 'invalid max_gradient_iterations: %s' % max_gradient_iterations
        self.accuracy = accuracy
        self.gradient_accuracy = accuracy if gradient_accuracy == 'same' else gradient_accuracy
        self.max_iterations = max_iterations
        if max_gradient_iterations == 'same':
            self.max_gradient_iterations = max_iterations
        elif max_gradient_iterations == 'mirror':
            self.max_gradient_iterations = 'mirror'
        else:
            self.max_gradient_iterations = max_gradient_iterations
            assert not autodiff, 'Cannot specify max_gradient_iterations when autodiff=True'
        self.autodiff = autodiff
Exemple #4
0
def gravity_tensor(gravity, rank):
    if isinstance(gravity, Gravity):
        gravity = gravity.gravity
    if math.is_scalar(gravity):
        return math.to_float(
            math.expand_dims([gravity] + [0] * (rank - 1), 0, rank + 1))
    else:
        assert math.staticshape(gravity)[-1] == rank
        return math.to_float(
            math.expand_dims(gravity, 0,
                             rank + 2 - len(math.staticshape(gravity))))
Exemple #5
0
    def __init__(self, accuracy=1e-5, max_iterations=2000):
        """
Conjugate gradient solver that geometrically calculates laplace pressure in each iteration.
Unlike most other solvers, this algorithm is TPU compatible but usually performs worse than SparseCG.

Obstacles are allowed to vary between examples but the same number of iterations is performed for each example in one batch.

        :param accuracy: the maximally allowed error on the divergence channel for each cell
        :param max_iterations: integer specifying maximum conjugent gradient loop iterations or None for no limit
        :param autodiff:
        """
        PoissonSolver.__init__(self,
                               'Single-Phase Conjugate Gradient',
                               supported_devices=('CPU', 'GPU', 'TPU'),
                               supports_guess=True,
                               supports_loop_counter=True,
                               supports_continuous_masks=True)
        assert math.is_scalar(accuracy), 'invalid accuracy: %s' % accuracy
        self.accuracy = accuracy
        self.max_iterations = max_iterations