Exemple #1
0
def predict_result(inputs: tf.Tensor,
                   hparams: tf.contrib.training.HParams) -> tf.Tensor:
    """Infer predictions from inputs with our forward model.

  Args:
    inputs: float32 Tensor with dimensions [batch, x].
    hparams: model hyperparameters.

  Returns:
    Float32 Tensor with dimensions [batch, x] with inferred time derivatives.
  """
    if hparams.model_target in {'flux', 'time_derivative'}:
        # use dummy values (all zeros) for space derivatives
        if hparams.space_derivatives_weight:
            raise ValueError(
                'space derivatives are not predicted by model {}'.format(
                    hparams.model_target))
        _, equation = equations.from_hparams(hparams)
        num_derivatives = len(equation.DERIVATIVE_ORDERS)
        space_derivatives = tf.zeros(
            tf.concat([tf.shape(inputs), [num_derivatives]], axis=0))
        time_derivative = predict_time_derivative(inputs, hparams)
    else:
        space_derivatives = predict_space_derivatives(inputs, hparams)
        _, equation = equations.from_hparams(hparams)
        time_derivative = apply_space_derivatives(space_derivatives, inputs,
                                                  equation)

    if hparams.num_time_steps:
        integrated_solution = predict_time_evolution(inputs, hparams)
    else:
        integrated_solution = None

    return result_stack(space_derivatives, time_derivative,
                        integrated_solution)
def integrate_model_from_warm_start(
    checkpoint_dir: str,
    y0: np.ndarray,
    hparams: tf.contrib.training.HParams = None,
    random_seed: int = 0,
    times: np.ndarray = _DEFAULT_TIMES,
    warmup: float = 0,
    integrate_method: str = 'RK23') -> xarray.Dataset:
  """Integrate the given PDE with standard and modeled finite differences."""

  if hparams is None:
    hparams = training.load_hparams(checkpoint_dir)

  logging.info('integrating %s with seed=%s', hparams.equation, random_seed)
  _, equation_coarse = equations.from_hparams(hparams, random_seed=random_seed)

  logging.info('solving neural network model at low resolution')
  checkpoint_path = training.checkpoint_dir_to_path(checkpoint_dir)
  differentiator = SavedModelDifferentiator(
      checkpoint_path, equation_coarse, hparams)
  solution_model, num_evals_model = odeint(
      y0, differentiator, warmup+times, method=integrate_method)

  results = xarray.Dataset(
      data_vars={'y': (('time', 'x'), solution_model)},
      coords={'time': warmup+times,
              'x': equation_coarse.grid.solution_x,
              'num_evals': num_evals_model})
  return results
Exemple #3
0
def predict_flux_directly(inputs, hparams, reuse=tf.AUTO_REUSE):
    """Predict flux directly, without using the equation of motion."""
    _, equation = equations.from_hparams(hparams)
    dx = equation.grid.solution_dx
    output = _multilayer_conv1d(inputs, hparams, num_targets=1, reuse=reuse)
    flux = tf.squeeze(output, axis=-1)
    return equations.staggered_first_derivative(flux, dx)
def integrate_exact_baseline_and_model(
    checkpoint_dir: str,
    hparams: tf.contrib.training.HParams = None,
    random_seed: int = 0,
    times: np.ndarray = _DEFAULT_TIMES,
    warmup: float = 0,
    integrate_method: str = 'RK23',
    exact_filter_interval: float = None) -> xarray.Dataset:
  """Integrate the given PDE with standard and modeled finite differences."""

  if hparams is None:
    hparams = training.load_hparams(checkpoint_dir)

  logging.info('integrating %s with seed=%s', hparams.equation, random_seed)
  equation_fine, equation_coarse = equations.from_hparams(
      hparams, random_seed=random_seed)

  logging.info('solving the "exact" model at high resolution')
  ds_solution_exact = integrate_exact(
      equation_fine, times, warmup, integrate_method=integrate_method,
      filter_interval=exact_filter_interval)
  solution_exact = ds_solution_exact['y'].data
  num_evals_exact = ds_solution_exact['num_evals'].item()

  # resample to the coarse grid
  y0 = equation_coarse.grid.resample(solution_exact[0, :])

  if np.isnan(y0).any():
    raise ValueError('solution contains NaNs')

  logging.info('solving baseline finite differences at low resolution')
  differentiator = PolynomialDifferentiator(equation_coarse)
  solution_baseline, num_evals_baseline = odeint(
      y0, differentiator, warmup+times, method=integrate_method)

  logging.info('solving neural network model at low resolution')
  checkpoint_path = training.checkpoint_dir_to_path(checkpoint_dir)
  differentiator = SavedModelDifferentiator(
      checkpoint_path, equation_coarse, hparams)
  solution_model, num_evals_model = odeint(
      y0, differentiator, warmup+times, method=integrate_method)

  results = xarray.Dataset({
      'y_exact': (('time', 'x_high'), solution_exact),
      'y_baseline': (('time', 'x_low'), solution_baseline),
      'y_model': (('time', 'x_low'), solution_model),
  }, coords={
      'time': warmup+times,
      'x_low': equation_coarse.grid.solution_x,
      'x_high': equation_fine.grid.solution_x,
      'num_evals_exact': num_evals_exact,
      'num_evals_baseline': num_evals_baseline,
      'num_evals_model': num_evals_model,
  })
  return results
    def __init__(self,
                 snapshots: np.ndarray,
                 hparams: tf.contrib.training.HParams,
                 training: bool = False):
        """Initialize an object for running inference.

    Args:
      snapshots: np.ndarray with shape [examples, x] with high-resolution
        training data.
      hparams: hyperparameters for training.
      training: whether to evaluate on training or validation datasets.
    """
        if training:
            dataset_type = model.Dataset.TRAINING
        else:
            dataset_type = model.Dataset.VALIDATION
        dataset = model.make_dataset(snapshots,
                                     hparams,
                                     dataset_type=dataset_type,
                                     repeat=False,
                                     evaluation=True)
        iterator = dataset.make_initializable_iterator()
        data = iterator.get_next()

        _, coarse_equation = equations.from_hparams(hparams)

        predictions = model.predict_result(data['inputs'], hparams)
        loss_per_head = model.loss_per_head(predictions,
                                            labels=data['labels'],
                                            baseline=data['baseline'],
                                            hparams=hparams)
        loss = model.weighted_loss(loss_per_head, hparams)

        results = dict(data, predictions=predictions)
        metrics = {
            k: tf.contrib.metrics.streaming_concat(v)
            for k, v in results.items()
        }
        metrics['loss'] = tf.metrics.mean(loss)

        space_loss, time_loss, integrated_loss = model.result_unstack(
            loss_per_head, coarse_equation)
        metrics['loss/space_derivatives'] = tf.metrics.mean(space_loss)
        metrics['loss/time_derivative'] = tf.metrics.mean(time_loss)
        if integrated_loss is not None:
            metrics['loss/integrated_solution'] = tf.metrics.mean(
                integrated_loss)

        initializer = tf.group(iterator.initializer,
                               tf.local_variables_initializer())

        self._initializer = initializer
        self._metrics = metrics
Exemple #6
0
def predict_time_derivative(inputs: tf.Tensor,
                            hparams: tf.contrib.training.HParams,
                            reuse: object = tf.AUTO_REUSE) -> tf.Tensor:
    """Infer time evolution from inputs with our forward model.

  Args:
    inputs: float32 Tensor with dimensions [batch, x].
    hparams: model hyperparameters.
    reuse: whether or not to reuse TensorFlow variables.

  Returns:
    Float32 Tensor with dimensions [batch, x] with inferred time derivatives.
  """
    space_derivatives = predict_space_derivatives(inputs, hparams, reuse=reuse)
    _, equation = equations.from_hparams(hparams)
    return apply_space_derivatives(space_derivatives, inputs, equation)
Exemple #7
0
def predict_time_evolution(inputs: tf.Tensor,
                           hparams: tf.contrib.training.HParams) -> tf.Tensor:
    """Infer time evolution from inputs with our neural network model.

  Args:
    inputs: float32 Tensor with dimensions [batch, x].
    hparams: model hyperparameters.

  Returns:
    Float32 Tensor with dimensions [batch, x, num_time_steps+1] with the
    integrated solution.
  """
    def func(y, t):
        del t  # unused
        return predict_time_derivative(y, hparams, reuse=True)

    _, equation = equations.from_hparams(hparams)
    return integrate_ode(func, inputs, hparams.num_time_steps,
                         equation.time_step)
Exemple #8
0
def _multilayer_conv1d(inputs, hparams, num_targets, reuse=tf.AUTO_REUSE):
    """Apply multiple conv1d layers with input normalization."""
    _, equation = equations.from_hparams(hparams)
    assert_consistent_solution(equation, inputs)

    net = inputs[:, :, tf.newaxis]
    net /= equation.standard_deviation

    activation = _NONLINEARITIES[hparams.nonlinearity]
    for _ in range(hparams.num_layers - 1):
        net = layers.conv1d_periodic_layer(net,
                                           filters=hparams.filter_size,
                                           kernel_size=hparams.kernel_size,
                                           activation=activation,
                                           center=True)
    if hparams.num_layers == 0:
        raise NotImplementedError('not implemented yet')
    net = layers.conv1d_periodic_layer(net,
                                       filters=num_targets,
                                       kernel_size=hparams.kernel_size,
                                       activation=None,
                                       center=True)
    return net
  def run_integrate(
      seed_and_initial_condition,
      checkpoint_dir=FLAGS.checkpoint_dir,
      times=np.arange(0, FLAGS.time_max + FLAGS.time_delta, FLAGS.time_delta),
      warmup=FLAGS.warmup,
      integrate_method=FLAGS.integrate_method,
  ):
    random_seed, y0 = seed_and_initial_condition
    _, equation_coarse = equations.from_hparams(
        hparams, random_seed=random_seed)
    checkpoint_path = training.checkpoint_dir_to_path(checkpoint_dir)
    differentiator = integrate.SavedModelDifferentiator(
        checkpoint_path, equation_coarse, hparams)
    solution_model, num_evals_model = integrate.odeint(
        y0, differentiator, warmup+times, method=integrate_method)

    results = xarray.Dataset(
        data_vars={'y': (('time', 'x'), solution_model)},
        coords={'time': warmup+times,
                'x': equation_coarse.grid.solution_x,
                'num_evals': num_evals_model,
                'sample': random_seed})
    return results
Exemple #10
0
def predict_result(inputs: tf.Tensor,
                   hparams: tf.contrib.training.HParams) -> tf.Tensor:
    """Infer predictions from inputs with our forward model.

  Args:
    inputs: float32 Tensor with dimensions [batch, x].
    hparams: model hyperparameters.

  Returns:
    Float32 Tensor with dimensions [batch, x] with inferred time derivatives.
  """
    space_derivatives = predict_space_derivatives(inputs, hparams)

    _, equation = equations.from_hparams(hparams)
    time_derivative = apply_space_derivatives(space_derivatives, inputs,
                                              equation)

    if hparams.num_time_steps:
        integrated_solution = predict_time_evolution(inputs, hparams)
    else:
        integrated_solution = None

    return result_stack(space_derivatives, time_derivative,
                        integrated_solution)
Exemple #11
0
def predict_space_derivatives_directly(inputs, hparams, reuse=tf.AUTO_REUSE):
    """Predict finite difference coefficients directly from a neural net."""
    _, equation = equations.from_hparams(hparams)
    num_targets = len(equation.DERIVATIVE_ORDERS)
    return _multilayer_conv1d(inputs, hparams, num_targets, reuse=reuse)
Exemple #12
0
def predict_coefficients(inputs: tf.Tensor,
                         hparams: tf.contrib.training.HParams,
                         reuse: object = tf.AUTO_REUSE) -> tf.Tensor:
    """Predict finite difference coefficients with a neural networks.

  Args:
    inputs: float32 Tensor with dimensions [batch, x].
    hparams: model hyperparameters.
    reuse: whether or not to reuse TensorFlow variables.

  Returns:
    Float32 Tensor with dimensions [batch, x, derivative, coefficient].

  Raises:
    ValueError: if inputs does not have the expected size for the equation.
    ValueError: if polynomial accuracy constraints are infeasible.
  """
    # TODO(shoyer): refactor to use layer classes to hold variables, like
    # tf.keras.layers, instead of relying on reuse.
    _, equation = equations.from_hparams(hparams)
    assert_consistent_solution(equation, inputs)

    with tf.variable_scope('predict_coefficients', reuse=reuse):
        num_derivatives = len(equation.DERIVATIVE_ORDERS)

        grid = polynomials.regular_grid(
            equation.GRID_OFFSET,
            derivative_order=0,
            accuracy_order=hparams.coefficient_grid_min_size,
            dx=equation.grid.solution_dx)

        net = inputs[:, :, tf.newaxis]
        net /= equation.standard_deviation

        activation = _NONLINEARITIES[hparams.nonlinearity]

        for _ in range(hparams.num_layers - 1):
            net = layers.conv1d_periodic_layer(net,
                                               filters=hparams.filter_size,
                                               kernel_size=hparams.kernel_size,
                                               activation=activation,
                                               center=True)

        if not hparams.polynomial_accuracy_order:
            if hparams.num_layers == 0:
                raise NotImplementedError

            net = layers.conv1d_periodic_layer(net,
                                               filters=num_derivatives *
                                               grid.size,
                                               kernel_size=hparams.kernel_size,
                                               activation=None,
                                               center=True)
            new_dims = [num_derivatives, grid.size]
            outputs = tf.reshape(
                net, tf.concat([tf.shape(inputs), new_dims], axis=0))
            outputs.set_shape(inputs.shape[:2].concatenate(new_dims))

            if hparams.ensure_unbiased_coefficients:
                if 0 in equation.DERIVATIVE_ORDERS:
                    raise ValueError(
                        'ensure_unbiased not yet supported for 0th order '
                        'spatial derivatives')
                outputs -= tf.reduce_mean(outputs, axis=-1, keepdims=True)

        else:
            poly_accuracy_layers = []

            for derivative_order in equation.DERIVATIVE_ORDERS:
                method = FINITE_VOL if equation.CONSERVATIVE else FINITE_DIFF
                poly_accuracy_layers.append(
                    polynomials.PolynomialAccuracyLayer(
                        grid=grid,
                        method=method,
                        derivative_order=derivative_order,
                        accuracy_order=hparams.polynomial_accuracy_order,
                        out_scale=hparams.polynomial_accuracy_scale))
            input_sizes = [layer.input_size for layer in poly_accuracy_layers]

            if hparams.num_layers > 0:
                net = layers.conv1d_periodic_layer(
                    net,
                    filters=sum(input_sizes),
                    kernel_size=hparams.kernel_size,
                    activation=None,
                    center=True)
            else:
                initializer = tf.initializers.zeros()
                coefficients = tf.get_variable('coefficients',
                                               (sum(input_sizes), ),
                                               initializer=initializer)
                net = tf.tile(coefficients[tf.newaxis, tf.newaxis, :],
                              [tf.shape(inputs)[0], inputs.shape[1].value, 1])

            cum_sizes = np.cumsum(input_sizes)
            starts = [0] + cum_sizes[:-1].tolist()
            stops = cum_sizes.tolist()
            zipped = zip(starts, stops, poly_accuracy_layers)

            outputs = tf.stack([
                layer.apply(net[..., start:stop])
                for start, stop, layer in zipped
            ],
                               axis=-2)
            assert outputs.shape.as_list()[-1] == grid.size

        return outputs
Exemple #13
0
def model_inputs(fine_inputs: tf.Tensor,
                 hparams: tf.contrib.training.HParams,
                 evaluation: bool = False) -> Dict[str, tf.Tensor]:
    """Create coarse model inputs from high resolution simulations.

  Args:
    fine_inputs: float32 Tensor with shape [batch, x] with results of
      high-resolution simulations.
    hparams: model hyperparameters.
    evaluation: bool indicating whether to create data for evaluation or
      model training.

  Returns:
    Dict of tensors with entries:
    - 'labels': float32 Tensor with shape [batch, x//factor, derivative] with
      finite difference derivatives computed at high resolution.
    - 'baseline': float32 Tensor with shape [batch, x//factor, derivative] with
      finite difference derivatives computed from low resolution inputs.
    - 'inputs': float32 Tensor with shape [batch, x//factor] with low resolution
       inputs.
  """
    fine_equation, coarse_equation = equations.from_hparams(hparams)
    assert fine_equation.grid.resample_factor == 1
    resample_method = 'mean' if coarse_equation.CONSERVATIVE else 'subsample'
    resample = duckarray.RESAMPLE_FUNCS[resample_method]

    if evaluation:
        ground_truth_order = None
    else:
        if hparams.ground_truth_order == -1:
            ground_truth_order = None
        else:
            ground_truth_order = hparams.ground_truth_order

    fine_derivatives = baseline_result(fine_inputs,
                                       fine_equation,
                                       hparams.num_time_steps,
                                       accuracy_order=ground_truth_order)
    labels = resample(fine_derivatives, factor=hparams.resample_factor, axis=1)

    coarse_inputs = resample(fine_inputs,
                             factor=hparams.resample_factor,
                             axis=1)
    baseline = baseline_result(coarse_inputs,
                               coarse_equation,
                               hparams.num_time_steps,
                               accuracy_order=1)

    if not evaluation and hparams.noise_probability:
        if hparams.noise_type == 'white':
            filtered = False
        elif hparams.noise_type == 'filtered':
            filtered = True
        else:
            raise ValueError('invalid noise_type: {}'.format(
                hparams.noise_type))

        coarse_inputs = apply_noise(coarse_inputs,
                                    hparams.noise_probability,
                                    hparams.noise_amplitude,
                                    filtered=filtered)

    return {'labels': labels, 'baseline': baseline, 'inputs': coarse_inputs}