Ejemplo n.º 1
0
  def test_from_edge(self, model_cls, model_kwargs):

    class Equation(equations.Equation):
      METHOD = polynomials.Method.FINITE_DIFFERENCE

      def __init__(self):
        self.key_definitions = {
            'c': StateDef('concentration', (), NO_DERIVATIVES, NO_OFFSET),
            'c_edge_x':
                StateDef('concentration', (), NO_DERIVATIVES, X_PLUS_HALF),
            'c_edge_y':
                StateDef('concentration', (), NO_DERIVATIVES, Y_PLUS_HALF),
            'c_x': StateDef('concentration', (), D_X, NO_OFFSET),
            'c_x_edge_x': StateDef('concentration', (), D_X, X_PLUS_HALF),
            'c_xx': StateDef('concentration', (), D_XX, NO_OFFSET),
        }
        self.evolving_keys = {'c_edge_x'}
        self.constant_keys = set()

    grid = grids.Grid.from_period(10, length=1)
    equation = Equation()
    model = model_cls(equation, grid, **model_kwargs)

    inputs = tf.convert_to_tensor(
        np.random.RandomState(0).random_sample((1,) + grid.shape), tf.float32)

    # create variables, then reset them all to zero
    model.spatial_derivatives({'c_edge_x': inputs})
    for variable in model.variables:
      variable.assign(tf.zeros_like(variable))

    actual_derivatives = model.spatial_derivatives({'c_edge_x': inputs})

    expected_derivatives = {
        'c': (tensor_ops.roll_2d(inputs, (1, 0)) + inputs) / 2,
        'c_edge_x': inputs,
        'c_edge_y': (
            inputs
            + tensor_ops.roll_2d(inputs, (0, -1))
            + tensor_ops.roll_2d(inputs, (1, 0))
            + tensor_ops.roll_2d(inputs, (1, -1))
        ) / 4,
        'c_x': (-tensor_ops.roll_2d(inputs, (1, 0)) + inputs) / grid.step,
        'c_x_edge_x': (
            -tensor_ops.roll_2d(inputs, (1, 0))
            + tensor_ops.roll_2d(inputs, (-1, 0))) / (2 * grid.step),
        'c_xx': (1/2 * tensor_ops.roll_2d(inputs, (2, 0))
                 - 1/2 * tensor_ops.roll_2d(inputs, (1, 0))
                 - 1/2 * inputs
                 + 1/2 * tensor_ops.roll_2d(inputs, (-1, 0))) / grid.step ** 2,
    }

    for key, expected in sorted(expected_derivatives.items()):
      np.testing.assert_allclose(
          actual_derivatives[key], expected,
          atol=1e-5, rtol=1e-5, err_msg=repr(key))
Ejemplo n.º 2
0
    def time_derivative(self, grid, concentration, x_velocity, y_velocity):
        """See base class."""
        c = concentration
        c_right = tensor_ops.roll_2d(c, (-1, 0))
        c_top = tensor_ops.roll_2d(c, (0, -1))

        x_flux = x_velocity * tf.where(x_velocity > 0, c, c_right)
        y_flux = y_velocity * tf.where(y_velocity > 0, c, c_top)
        c_t = flux_to_time_derivative(x_flux, y_flux, grid.step)
        return {'concentration': c_t}
Ejemplo n.º 3
0
def flux_to_time_derivative(x_flux_edge_x, y_flux_edge_y, grid_step):
    """Use continuity to convert from fluxes to a time derivative."""
    # right - left + top - bottom
    numerator = tf.add_n([
        x_flux_edge_x,
        -tensor_ops.roll_2d(x_flux_edge_x, (1, 0)),
        y_flux_edge_y,
        -tensor_ops.roll_2d(y_flux_edge_y, (0, 1)),
    ])
    return -(1 / grid_step) * numerator
Ejemplo n.º 4
0
    def test_roll_consistency(self, shifts):
        batch_input = tf.random_uniform(shape=(2, 50, 50))
        single_input = tf.random_uniform(shape=(25, 25))

        batch_manip = tf_roll_2d(batch_input, shifts)
        batch_concat = tensor_ops.roll_2d(batch_input, shifts)
        np.testing.assert_allclose(batch_manip, batch_concat)

        single_manip = tf_roll_2d(single_input, shifts)
        single_concat = tensor_ops.roll_2d(single_input, shifts)
        np.testing.assert_allclose(single_manip, single_concat)
Ejemplo n.º 5
0
    def time_derivative(self, grid, concentration, x_velocity, y_velocity,
                        concentration_x_edge_x, concentration_y_edge_y):
        """See base class."""
        c = concentration
        c_right = tensor_ops.roll_2d(c, (-1, 0))
        c_top = tensor_ops.roll_2d(c, (0, -1))

        D = self.diffusion_coefficient  # pylint: disable=invalid-name
        x_flux = (x_velocity * tf.where(x_velocity > 0, c, c_right) -
                  D * concentration_x_edge_x)
        y_flux = (y_velocity * tf.where(y_velocity > 0, c, c_top) -
                  D * concentration_y_edge_y)
        c_t = flux_to_time_derivative(x_flux, y_flux, grid.step)
        return {'concentration': c_t}
Ejemplo n.º 6
0
  def forward(self, state):
    index_x = slice(None, None, -1 if Dimension.X in self.axes else 1)
    index_y = slice(None, None, -1 if Dimension.Y in self.axes else 1)
    result = {}
    for key, tensor in state.items():
      definition = self.definitions[key]

      # If our grid is staggered along a reflected axis, we need a shift to
      # compensate for the staggered grid. For example, consider a grid of 6
      # values, with periodic boundary conditions where we reflect over the line
      # marked with |:
      #
      # Originals:
      #   Offset = 0:   0   1   2   3   4   5
      #  Offset = +1:     a   b   c   d   e   f
      # Axis of reflection:       |
      # Reflections:
      #   Offset = 0:   5   4   3   2   1   0
      #  Offset = +1: f   e   d   c   b   a
      #
      # To make the reflected values with offset=+1 align with offsets used by
      # the original grid definition, they should be rolled by -1, i.e.,
      #   Offset = 0:   5   4   3   2   1   0
      #  Offset = +1:     e   d   c   b   a   f
      shift = tuple(-offset if _POSITION_TO_DIMENSION[i] in self.axes else 0
                    for i, offset in enumerate(definition.offset))

      # TODO(shoyer): consider adding a notion of how quantities transform
      # (e.g., vector vs. pseudo-vector) explicitly into our data model.
      # https://en.wikipedia.org/wiki/Parity_(physics)#Effect_of_spatial_inversion_on_some_variables_of_classical_physics
      num_sign_flips = (
          # sign flips due to derivatives, e.g., d/dx -> -d/dx.
          sum(order if _POSITION_TO_DIMENSION[i] in self.axes else 0
              for i, order in enumerate(definition.derivative_orders[:2]))
          # sign flips for vector quantities, e.g., x_velocity -> -x_velocity.
          + sum(dim in self.axes for dim in definition.tensor_indices)
      )
      sign = -1 if num_sign_flips % 2 else 1

      result[key] = sign * tensor_ops.roll_2d(
          tensor[..., index_x, index_y], shift)

    return result
Ejemplo n.º 7
0
 def time_derivative(self, grid, c, c_x, c_y):
   del grid, c  # unused
   c_xx = c_x - tensor_ops.roll_2d(c_x, (1, 0))
   c_yy = c_y - tensor_ops.roll_2d(c_y, (0, 1))
   return {'c': c_xx + c_yy}