Example #1
0
  def test_polynomial_accuracy_layer_consistency(
      self, grid, method, derivative_order, accuracy_order=2):
    args = (np.array(grid), method, derivative_order, accuracy_order)
    A, b = polynomials.constraints(*args)  # pylint: disable=invalid-name
    layer = polynomials.PolynomialAccuracyLayer(*args)

    inputs = np.random.RandomState(0).randn(10, layer.input_size)
    outputs = layer.bias + np.einsum('bi,ij->bj', inputs, layer.nullspace)

    residual = np.einsum('ij,bj->bi', A, outputs) - b
    np.testing.assert_allclose(residual, 0, atol=1e-7)
 def test_polynomial_accuracy_layer_bias_zero_padding(self):
     layer = polynomials.PolynomialAccuracyLayer(np.array(
         [-1.5, -0.5, 0.5, 1.5]),
                                                 FINITE_DIFF,
                                                 derivative_order=0,
                                                 bias_zero_padding=(0, 1))
     expected_bias = np.concatenate([
         polynomials.coefficients(np.array([-1.5, -0.5, 0.5]),
                                  FINITE_DIFF,
                                  derivative_order=0), [0.0]
     ])
     np.testing.assert_allclose(layer.bias, expected_bias)
Example #3
0
def predict_coefficients(inputs: tf.Tensor,
                         hparams: tf.contrib.training.HParams,
                         reuse: object = tf.AUTO_REUSE) -> tf.Tensor:
    """Predict finite difference coefficients with a neural networks.

  Args:
    inputs: float32 Tensor with dimensions [batch, x].
    hparams: model hyperparameters.
    reuse: whether or not to reuse TensorFlow variables.

  Returns:
    Float32 Tensor with dimensions [batch, x, derivative, coefficient].

  Raises:
    ValueError: if inputs does not have the expected size for the equation.
    ValueError: if polynomial accuracy constraints are infeasible.
  """
    # TODO(shoyer): refactor to use layer classes to hold variables, like
    # tf.keras.layers, instead of relying on reuse.
    _, equation = equations.from_hparams(hparams)
    assert_consistent_solution(equation, inputs)

    with tf.variable_scope('predict_coefficients', reuse=reuse):
        num_derivatives = len(equation.DERIVATIVE_ORDERS)

        grid = polynomials.regular_grid(
            equation.GRID_OFFSET,
            derivative_order=0,
            accuracy_order=hparams.coefficient_grid_min_size,
            dx=equation.grid.solution_dx)

        net = inputs[:, :, tf.newaxis]
        net /= equation.standard_deviation

        activation = _NONLINEARITIES[hparams.nonlinearity]

        for _ in range(hparams.num_layers - 1):
            net = layers.conv1d_periodic_layer(net,
                                               filters=hparams.filter_size,
                                               kernel_size=hparams.kernel_size,
                                               activation=activation,
                                               center=True)

        if not hparams.polynomial_accuracy_order:
            if hparams.num_layers == 0:
                raise NotImplementedError

            net = layers.conv1d_periodic_layer(net,
                                               filters=num_derivatives *
                                               grid.size,
                                               kernel_size=hparams.kernel_size,
                                               activation=None,
                                               center=True)
            new_dims = [num_derivatives, grid.size]
            outputs = tf.reshape(
                net, tf.concat([tf.shape(inputs), new_dims], axis=0))
            outputs.set_shape(inputs.shape[:2].concatenate(new_dims))

            if hparams.ensure_unbiased_coefficients:
                if 0 in equation.DERIVATIVE_ORDERS:
                    raise ValueError(
                        'ensure_unbiased not yet supported for 0th order '
                        'spatial derivatives')
                outputs -= tf.reduce_mean(outputs, axis=-1, keepdims=True)

        else:
            poly_accuracy_layers = []

            for derivative_order in equation.DERIVATIVE_ORDERS:
                method = FINITE_VOL if equation.CONSERVATIVE else FINITE_DIFF
                poly_accuracy_layers.append(
                    polynomials.PolynomialAccuracyLayer(
                        grid=grid,
                        method=method,
                        derivative_order=derivative_order,
                        accuracy_order=hparams.polynomial_accuracy_order,
                        out_scale=hparams.polynomial_accuracy_scale))
            input_sizes = [layer.input_size for layer in poly_accuracy_layers]

            if hparams.num_layers > 0:
                net = layers.conv1d_periodic_layer(
                    net,
                    filters=sum(input_sizes),
                    kernel_size=hparams.kernel_size,
                    activation=None,
                    center=True)
            else:
                initializer = tf.initializers.zeros()
                coefficients = tf.get_variable('coefficients',
                                               (sum(input_sizes), ),
                                               initializer=initializer)
                net = tf.tile(coefficients[tf.newaxis, tf.newaxis, :],
                              [tf.shape(inputs)[0], inputs.shape[1].value, 1])

            cum_sizes = np.cumsum(input_sizes)
            starts = [0] + cum_sizes[:-1].tolist()
            stops = cum_sizes.tolist()
            zipped = zip(starts, stops, poly_accuracy_layers)

            outputs = tf.stack([
                layer.apply(net[..., start:stop])
                for start, stop, layer in zipped
            ],
                               axis=-2)
            assert outputs.shape.as_list()[-1] == grid.size

        return outputs