Beispiel #1
0
 def __init__(self,
              equation,
              grid,
              stencil_size=5,
              initial_accuracy_order=1,
              constrained_accuracy_order=1,
              learned_keys=None,
              fixed_keys=None,
              num_time_steps=1,
              target=None,
              geometric_transforms=None,
              predict_permutations=True,
              name='linear_model'):
     super().__init__(equation, grid, num_time_steps, target, name)
     self.learned_keys, self.fixed_keys = (normalize_learned_and_fixed_keys(
         learned_keys, fixed_keys, equation))
     self.output_layers = build_output_layers(
         equation,
         grid,
         self.learned_keys,
         stencil_size,
         initial_accuracy_order,
         constrained_accuracy_order,
         layer_cls=FixedCoefficientsLayer,
         predict_permutations=predict_permutations)
     self.fd_model = FiniteDifferenceModel(equation, grid,
                                           initial_accuracy_order)
     self.geometric_transforms = geometric_transforms or [
         geometry.Identity()
     ]
Beispiel #2
0
    def __init__(self,
                 equation,
                 grid,
                 stencil_size=5,
                 initial_accuracy_order=1,
                 constrained_accuracy_order=1,
                 learned_keys=None,
                 fixed_keys=None,
                 core_model_func=conv2d_stack,
                 num_time_steps=1,
                 geometric_transforms=None,
                 predict_permutations=True,
                 target=None,
                 name='pseudo_linear_model',
                 **kwargs):
        # NOTE(jiaweizhuang): Too many input arguments. Only document important or
        # confusing ones for now.
        # pylint: disable=g-doc-args
        """Initialize class.

    Args:
      core_model_func: callable (function or class object). It should return
        a Keras model (or layer) instance, which contains trainable weights.
        The returned core_model instance should take a dict of tensors as input
        (see the call() method in the base TimeStepModel class).
        Additional kwargs are passed to this callable to specify hyperparameters
        of core_model (such as number of layers and convolutional filters).
    """
        # pylint: enable=g-doc-args

        super().__init__(equation, grid, num_time_steps, target, name)

        self.learned_keys, self.fixed_keys = (normalize_learned_and_fixed_keys(
            learned_keys, fixed_keys, equation))
        self.output_layers = build_output_layers(
            equation,
            grid,
            self.learned_keys,
            stencil_size,
            initial_accuracy_order,
            constrained_accuracy_order,
            layer_cls=VaryingCoefficientsLayer,
            predict_permutations=predict_permutations)
        self.fd_model = FiniteDifferenceModel(equation, grid,
                                              initial_accuracy_order)
        self.geometric_transforms = geometric_transforms or [
            geometry.Identity()
        ]

        num_outputs = sum(layer.kernel_size
                          for layer in self.output_layers.values())
        self.core_model = core_model_func(num_outputs, **kwargs)
Beispiel #3
0
    def __init__(self,
                 equation,
                 grid,
                 stencil_size=5,
                 initial_accuracy_order=1,
                 constrained_accuracy_order=1,
                 learned_keys=None,
                 fixed_keys=None,
                 core_model_func=conv2d_stack,
                 num_time_steps=1,
                 geometric_transforms=None,
                 predict_permutations=True,
                 name='pseudo_linear_model',
                 **kwargs):
        super().__init__(equation, grid, num_time_steps, name)

        self.learned_keys, self.fixed_keys = (normalize_learned_and_fixed_keys(
            learned_keys, fixed_keys, equation))
        self.output_layers = build_output_layers(
            equation,
            grid,
            self.learned_keys,
            stencil_size,
            initial_accuracy_order,
            constrained_accuracy_order,
            layer_cls=VaryingCoefficientsLayer)
        self.fd_model = FiniteDifferenceModel(equation, grid,
                                              initial_accuracy_order)

        if not predict_permutations:
            # NOTE(shoyer): this only makes sense if geometric_transforms includes
            # permutations. Otherwise you won't be predicting every needed tensor.
            modeled = set()
            for key in sorted(self.output_layers):
                value = equation.key_definitions[key]
                swapped = value.swap_xy()
                if swapped in modeled:
                    del self.output_layers[key]
                modeled.add(value)

        if geometric_transforms is None:
            geometric_transforms = [geometry.Identity()]
        self.geometric_transforms = geometric_transforms

        num_outputs = sum(layer.kernel_size
                          for layer in self.output_layers.values())
        self.core_model = core_model_func(num_outputs, **kwargs)