Exemplo n.º 1
0
def test_spherical_hessian_is_symmetric():
    with AutoDifferentiator(persistent=True) as diff:
        x = tf.reshape(tf.linspace(-4.5, 3.5, 9), (3, 3))
        diff.watch(x)

        x_reverse = tf.reverse(x, axis=tf.constant([1]))

        y = tf.reshape(tf.einsum('ij,ij->i', x ** 2, x_reverse ** 2), (3, 1))

        assert np.allclose(
            diff.batch_hessian(
                x, y, 0, 1, CoordinateSystem.SPHERICAL
            ).numpy(),
            diff.batch_hessian(
                x, y, 1, 0, CoordinateSystem.SPHERICAL
            ).numpy())
        assert np.allclose(
            diff.batch_hessian(
                x, y, 0, 2, CoordinateSystem.SPHERICAL
            ).numpy(),
            diff.batch_hessian(
                x, y, 2, 0, CoordinateSystem.SPHERICAL
            ).numpy())
        assert np.allclose(
            diff.batch_hessian(
                x, y, 1, 2, CoordinateSystem.SPHERICAL
            ).numpy(),
            diff.batch_hessian(
                x, y, 2, 1, CoordinateSystem.SPHERICAL
            ).numpy())
Exemplo n.º 2
0
    def _step(self, batch: DataBatch, optimizer: tf.optimizers.Optimizer,
              diff_eq_loss_weight: float, ic_loss_weight: float,
              bc_loss_weight: float) -> Loss:
        """
        Performs a forward pass on the batch, computes the batch loss, and
        updates the model parameters.

        :param batch: the batch to compute the losses over
        :param optimizer: the optimizer to use to update parameters of the
            model
        :param diff_eq_loss_weight: the weight of the differential equation
            part of the total physics-informed loss
        :param ic_loss_weight: the weight of the initial condition part of the
            total physics-informed loss
        :param bc_loss_weight: the weight of the boundary condition part of the
            total physics-informed loss
        :return: the various losses over the batch
        """
        with AutoDifferentiator() as auto_diff:
            loss = self._physics_informed_loss(batch, diff_eq_loss_weight,
                                               ic_loss_weight, bc_loss_weight)

        optimizer.minimize(loss.weighted_total_loss,
                           self.trainable_variables,
                           tape=auto_diff)

        return loss
Exemplo n.º 3
0
    def _mean_squared_differential_equation_error(
            self, batch: DomainDataBatch) -> tf.Tensor:
        """
        Computes and returns the mean squared differential equation error.

        :param batch: the domain data batch
        :return: the mean squared differential equation error
        """
        with AutoDifferentiator(persistent=True) as auto_diff:
            auto_diff.watch(batch.t)
            if batch.x is not None:
                auto_diff.watch(batch.x)

            y_hat = self.call((batch.u, batch.t, batch.x))

            symbol_map_arg = PIDONSymbolMapArg(auto_diff, batch.t, batch.x,
                                               y_hat)
            rhs = self._symbol_mapper.map(symbol_map_arg)

            diff_eq_residual = tf.concat([
                self._diff_eq_lhs_functions[i](symbol_map_arg) - rhs[i]
                for i in range(len(rhs))
            ],
                                         axis=1)

        squared_diff_eq_error = tf.square(diff_eq_residual)
        return tf.reduce_mean(squared_diff_eq_error, axis=0)
Exemplo n.º 4
0
    def _mean_squared_boundary_condition_errors(
            self, batch: BoundaryDataBatch) -> Tuple[tf.Tensor, tf.Tensor]:
        """
        Computes and returns the mean squared Dirichlet boundary condition
        error and the mean squared Neumann boundary condition error.

        :param batch: the boundary data batch
        :return: the mean squared Dirichlet and Neumann boundary condition
            errors
        """
        with AutoDifferentiator() as auto_diff:
            auto_diff.watch(batch.x)
            y_hat = self.call((batch.u, batch.t, batch.x))

        d_y_over_d_n_hat = auto_diff.batch_gradient(batch.x, y_hat, batch.axes)

        dirichlet_bc_error = y_hat - batch.y
        dirichlet_bc_error = tf.where(tf.math.is_nan(batch.y),
                                      tf.zeros_like(batch.y),
                                      dirichlet_bc_error)
        squared_dirichlet_bc_error = tf.square(dirichlet_bc_error)
        mean_squared_dirichlet_bc_error = \
            tf.reduce_mean(squared_dirichlet_bc_error, axis=0)

        neumann_bc_error = d_y_over_d_n_hat - batch.d_y_over_d_n
        neumann_bc_error = tf.where(tf.math.is_nan(batch.d_y_over_d_n),
                                    tf.zeros_like(batch.d_y_over_d_n),
                                    neumann_bc_error)
        squared_neumann_bc_error = tf.square(neumann_bc_error)
        mean_squared_neumann_bc_error = \
            tf.reduce_mean(squared_neumann_bc_error, axis=0)

        return mean_squared_dirichlet_bc_error, mean_squared_neumann_bc_error
Exemplo n.º 5
0
def test_hessian_is_symmetric():
    with AutoDifferentiator(persistent=True) as diff:
        x = tf.zeros((3, 2), dtype=tf.float32)
        diff.watch(x)

        y = tf.reduce_sum(x, axis=1, keepdims=True) ** 2

        assert np.allclose(
            diff.batch_hessian(x, y, 0, 1).numpy(),
            diff.batch_hessian(x, y, 1, 0).numpy())
Exemplo n.º 6
0
def test_gradient_with_out_of_bounds_x_axis():
    with AutoDifferentiator(persistent=True) as diff:
        x = tf.ones((2, 2), dtype=tf.float32)
        diff.watch(x)

        c = tf.constant([[1., 2.], [-4., -5.]], dtype=tf.float32)
        y = c * x

        with pytest.raises(ValueError):
            diff.batch_gradient(x, y, 2).numpy()
Exemplo n.º 7
0
def test_1d_curl():
    with AutoDifferentiator(persistent=True) as diff:
        x = tf.ones((2, 1), dtype=tf.float32)
        diff.watch(x)

        c = tf.constant([[1.], [-4.]], dtype=tf.float32)
        y = c * x

        with pytest.raises(ValueError):
            diff.batch_curl(x, y).numpy()
Exemplo n.º 8
0
def test_gradient_with_insufficient_dimensions():
    with AutoDifferentiator(persistent=True) as diff:
        x = tf.ones((2,), dtype=tf.float32)
        diff.watch(x)

        c = tf.constant([[2.], [-5.]], dtype=tf.float32)
        y = c * x

        with pytest.raises(ValueError):
            diff.batch_gradient(x, y, 0).numpy()
Exemplo n.º 9
0
def test_vector_laplacian_with_non_matching_vector_field_dimension():
    with AutoDifferentiator(persistent=True) as diff:
        x = tf.ones((2, 2), dtype=tf.float32)
        diff.watch(x)

        c = tf.constant([[1., 2.], [-4., -5.]], dtype=tf.float32)
        y = tf.concat([c * x, c / x], axis=1)

        with pytest.raises(ValueError):
            diff.batch_vector_laplacian(x, y, 0).numpy()
Exemplo n.º 10
0
def test_divergence():
    with AutoDifferentiator(persistent=True) as diff:
        x = tf.ones((2, 2), dtype=tf.float32)
        diff.watch(x)

        c = tf.constant([[1., 2.], [-4., -5.]], dtype=tf.float32)
        y = c * x

        expected_divergence = [[3.], [-9.]]
        actual_divergence = diff.batch_divergence(x, y).numpy()
        assert np.allclose(expected_divergence, actual_divergence)
Exemplo n.º 11
0
def test_laplacian():
    with AutoDifferentiator(persistent=True) as diff:
        x = tf.constant([[1., 2.], [3., 4.]], dtype=tf.float32)
        diff.watch(x)

        c = tf.constant([[-1.], [2.]], dtype=tf.float32)
        y = c * tf.reduce_sum(x * x, axis=1, keepdims=True)

        expected_laplacian = [[-4.], [8.]]
        actual_laplacian = diff.batch_laplacian(x, y).numpy()
        assert np.allclose(expected_laplacian, actual_laplacian)
Exemplo n.º 12
0
def test_polar_laplacian_is_hessian_trace():
    with AutoDifferentiator(persistent=True) as diff:
        x = tf.constant([[1., 2.], [3., 4.]], dtype=tf.float32)
        diff.watch(x)

        c = tf.constant([[-1.], [2.]], dtype=tf.float32)
        y = c * tf.reduce_sum(x * x, axis=1, keepdims=True)

        assert np.allclose(
            diff.batch_laplacian(x, y, CoordinateSystem.POLAR).numpy(),
            (diff.batch_hessian(x, y, 0, 0, CoordinateSystem.POLAR) +
             diff.batch_hessian(x, y, 1, 1, CoordinateSystem.POLAR)).numpy())
Exemplo n.º 13
0
def test_polar_hessian_is_symmetric():
    with AutoDifferentiator(persistent=True) as diff:
        x = tf.reshape(tf.linspace(-3., 2., 6), (3, 2))
        diff.watch(x)

        x_reverse = tf.reverse(x, axis=tf.constant([1]))

        y = tf.reshape(tf.einsum('ij,ij->i', x ** 2, x_reverse ** 2), (3, 1))

        assert np.allclose(
            diff.batch_hessian(x, y, 0, 1, CoordinateSystem.POLAR).numpy(),
            diff.batch_hessian(x, y, 1, 0, CoordinateSystem.POLAR).numpy())
Exemplo n.º 14
0
def test_gradient_int_x_axis():
    with AutoDifferentiator(persistent=True) as diff:
        x = tf.ones((2, 2), dtype=tf.float32)
        diff.watch(x)

        c = tf.constant([[1., 2.], [-4., -5.]], dtype=tf.float32)
        y = c * x

        x_axis = 1
        expected_gradient = [[0., 2.], [0., -5.]]
        actual_gradient = diff.batch_gradient(x, y, x_axis).numpy()
        assert np.allclose(actual_gradient, expected_gradient)
Exemplo n.º 15
0
def test_mixed_hessian():
    with AutoDifferentiator(persistent=True) as diff:
        x = tf.zeros((3, 2), dtype=tf.float32)
        diff.watch(x)

        y = tf.reduce_sum(x, axis=1, keepdims=True) ** 2

        x_axis1 = 1
        x_axis2 = 0
        expected_hessian = [[2.], [2.], [2.]]
        actual_hessian = diff.batch_hessian(x, y, x_axis1, x_axis2).numpy()
        assert np.allclose(actual_hessian, expected_hessian)
Exemplo n.º 16
0
def test_hessian():
    with AutoDifferentiator(persistent=True) as diff:
        x = tf.constant([[1., 2.], [3., 4.]], dtype=tf.float32)
        diff.watch(x)

        c = tf.constant([[1., 2.], [-4., -5.]], dtype=tf.float32)
        y = c * x ** 2

        x_axis1 = x_axis2 = 0
        expected_hessian = [[2., 0.], [-8., 0.]]
        actual_hessian = diff.batch_hessian(x, y, x_axis1, x_axis2).numpy()
        assert np.allclose(actual_hessian, expected_hessian)
Exemplo n.º 17
0
def test_curl_with_out_of_bounds_ind():
    with AutoDifferentiator(persistent=True) as diff:
        x = tf.ones((2, 3), dtype=tf.float32)
        diff.watch(x)

        c = tf.constant(
            [[1., 2., 3.], [-4., -3., -2.]],
            dtype=tf.float32)
        y = c * x

        with pytest.raises(ValueError):
            diff.batch_curl(x, y, 4).numpy()
Exemplo n.º 18
0
def test_spherical_laplacian():
    with AutoDifferentiator(persistent=True) as diff:
        x = tf.constant([[1., 2., 3.], [6., 5., 4.]], dtype=tf.float32)
        diff.watch(x)

        c = tf.constant([[-1.], [2.]], dtype=tf.float32)
        y = c * tf.reduce_sum(x * x, axis=1, keepdims=True)

        expected_laplacian = [[-66.33603], [12.68897]]
        actual_laplacian = diff.batch_laplacian(
            x, y, CoordinateSystem.SPHERICAL).numpy()
        assert np.allclose(expected_laplacian, actual_laplacian)
Exemplo n.º 19
0
def test_gradient_tensor_x_axis():
    with AutoDifferentiator(persistent=True) as diff:
        x = tf.ones((3, 2), dtype=tf.float32)
        diff.watch(x)

        c = tf.constant([[3., -7.], [2., -1.], [4., 5.]], dtype=tf.float32)
        y = c * x

        x_axis = tf.constant([1, 0, 0], dtype=tf.int32)
        expected_gradient = [[0., -7.], [2., 0.], [4., 0.]]
        actual_gradient = diff.batch_gradient(x, y, x_axis).numpy()
        assert np.allclose(actual_gradient, expected_gradient)
Exemplo n.º 20
0
def test_spherical_divergence():
    with AutoDifferentiator(persistent=True) as diff:
        x = tf.ones((2, 3), dtype=tf.float32)
        diff.watch(x)

        c = tf.constant([[1., 2., 3.], [-4., -5., -6.]], dtype=tf.float32)
        y = c * x

        expected_divergence = [[10.303068], [-27.79453]]
        actual_divergence = diff.batch_divergence(
            x, y, CoordinateSystem.SPHERICAL).numpy()
        assert np.allclose(expected_divergence, actual_divergence)
Exemplo n.º 21
0
def test_cylindrical_hessian():
    with AutoDifferentiator(persistent=True) as diff:
        x = tf.constant([[1., 2., 3.], [4., 5., 6.]], dtype=tf.float32)
        diff.watch(x)

        c = tf.constant([[1., -2., 1.5], [-4., -5., 4.5]], dtype=tf.float32)
        y = c * x ** 2

        x_axis1 = x_axis2 = 1
        expected_hessian = [[2., -4., 0.], [-8., -.625, 0.]]
        actual_hessian = diff.batch_hessian(
            x, y, x_axis1, x_axis2, CoordinateSystem.CYLINDRICAL).numpy()
        assert np.allclose(actual_hessian, expected_hessian)
Exemplo n.º 22
0
def test_polar_gradient():
    with AutoDifferentiator(persistent=True) as diff:
        x = tf.fill((2, 2), 2.)
        diff.watch(x)

        c = tf.constant([[1., 2.], [-4., -5.]], dtype=tf.float32)
        y = c * x

        x_axis = 1
        expected_gradient = [[0., 1.], [0., -2.5]]
        actual_gradient = diff.batch_gradient(
            x, y, x_axis, CoordinateSystem.POLAR).numpy()
        assert np.allclose(actual_gradient, expected_gradient)
Exemplo n.º 23
0
def test_spherical_gradient():
    with AutoDifferentiator(persistent=True) as diff:
        x = tf.fill((2, 3), 2.)
        diff.watch(x)

        c = tf.constant([[1., 2., 3.], [-4., -5., -6.]], dtype=tf.float32)
        y = c * x

        x_axis = 2
        expected_gradient = [[0., 0., 1.5], [0., 0., -3.]]
        actual_gradient = diff.batch_gradient(
            x, y, x_axis, CoordinateSystem.SPHERICAL).numpy()
        assert np.allclose(actual_gradient, expected_gradient)
Exemplo n.º 24
0
def test_mixed_spherical_hessian():
    with AutoDifferentiator(persistent=True) as diff:
        x = tf.reshape(tf.linspace(-4.5, 3.5, 9), (3, 3))
        diff.watch(x)

        x_reverse = tf.reverse(x, axis=tf.constant([1]))

        y = tf.reshape(tf.einsum('ij,ij->i', x ** 2, x_reverse ** 2), (3, 1))

        x_axis1 = 1
        x_axis2 = 0
        expected_hessian = [[-14.151262], [.4635177], [79.187874]]
        actual_hessian = diff.batch_hessian(
            x, y, x_axis1, x_axis2, CoordinateSystem.SPHERICAL).numpy()
        assert np.allclose(actual_hessian, expected_hessian)
Exemplo n.º 25
0
        def value_and_gradients_function(
                parameters: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
            set_model_parameters(parameters)
            with AutoDifferentiator() as auto_diff:
                loss = self._physics_informed_loss(full_training_data_batch,
                                                   diff_eq_loss_weight,
                                                   ic_loss_weight,
                                                   bc_loss_weight)
                value = tf.reduce_sum(loss.weighted_total_loss, keepdims=True)

            gradients = auto_diff.gradient(value, self.trainable_variables)
            flattened_gradients = tf.concat(
                [tf.reshape(gradient, (1, -1)) for gradient in gradients],
                axis=1)
            return value, flattened_gradients
Exemplo n.º 26
0
def test_mixed_polar_hessian():
    with AutoDifferentiator(persistent=True) as diff:
        x = tf.reshape(tf.linspace(-3., 2., 6), (3, 2))
        diff.watch(x)

        x_reverse = tf.reverse(x, axis=tf.constant([1]))

        y = tf.reshape(tf.einsum('ij,ij->i', x ** 2, x_reverse ** 2), (3, 1))

        x_axis1 = 1
        x_axis2 = 0
        expected_hessian = [[-8.], [0.], [8.]]
        actual_hessian = diff.batch_hessian(
            x, y, x_axis1, x_axis2, CoordinateSystem.POLAR).numpy()
        assert np.allclose(actual_hessian, expected_hessian)
Exemplo n.º 27
0
def test_mixed_cylindrical_hessian():
    with AutoDifferentiator(persistent=True) as diff:
        x = tf.reshape(tf.linspace(-4.5, 3.5, 9), (3, 3))
        diff.watch(x)

        x_reverse = tf.reverse(x, axis=tf.constant([1]))

        y = tf.reshape(tf.einsum('ij,ij->i', x ** 2, x_reverse ** 2), (3, 1))

        x_axis1 = 1
        x_axis2 = 0
        expected_hessian = [[8.469135], [.22222222], [-27.777779]]
        actual_hessian = diff.batch_hessian(
            x, y, x_axis1, x_axis2, CoordinateSystem.CYLINDRICAL).numpy()
        assert np.allclose(actual_hessian, expected_hessian)
Exemplo n.º 28
0
def test_spherical_hessian():
    with AutoDifferentiator(persistent=True) as diff:
        x = tf.constant([[1., 2., 3.], [4., 5., 6.]], dtype=tf.float32)
        diff.watch(x)

        c = tf.constant([[1., -2., 1.5], [-4., -5., 4.5]], dtype=tf.float32)
        y = c * x ** 2

        x_axis1 = x_axis2 = 1
        expected_hessian = [
            [2., -200.85509, -63.13727],
            [-8., -8.005327, -11.597692]
        ]
        actual_hessian = diff.batch_hessian(
            x, y, x_axis1, x_axis2, CoordinateSystem.SPHERICAL).numpy()
        assert np.allclose(actual_hessian, expected_hessian)
Exemplo n.º 29
0
def test_polar_vector_laplacian():
    with AutoDifferentiator(persistent=True) as diff:
        x = tf.ones((3, 2), dtype=tf.float32)
        diff.watch(x)

        c = tf.constant(
            [[1., 2.], [-4., -5.], [0., -2.]],
            dtype=tf.float32)
        y = tf.concat(
            [
                tf.reduce_sum(c * x ** 3, axis=1, keepdims=True),
                tf.reduce_sum(c * x ** 2, axis=1, keepdims=True)
            ], axis=1)

        expected_vector_laplacian = [[7.], [-25.], [-2.]]
        actual_vector_laplacian = diff.batch_vector_laplacian(
            x, y, 0, CoordinateSystem.POLAR).numpy()
        assert np.allclose(expected_vector_laplacian, actual_vector_laplacian)
Exemplo n.º 30
0
def test_spherical_vector_laplacian():
    with AutoDifferentiator(persistent=True) as diff:
        x = tf.ones((3, 3), dtype=tf.float32)
        diff.watch(x)

        c = tf.constant(
            [[1., 2., 3.], [-4., -5., -6.], [0., -2., 4.]],
            dtype=tf.float32)
        y = tf.concat(
            [
                tf.reduce_sum(c * x ** 3, axis=1, keepdims=True),
                tf.reduce_sum(c * x ** 2, axis=1, keepdims=True),
                tf.reduce_sum(x, axis=1, keepdims=True)
            ], axis=1)

        expected_vector_laplacian = [[-15.359716], [17.915348], [3.6546054]]
        actual_vector_laplacian = diff.batch_vector_laplacian(
            x, y, 1, CoordinateSystem.SPHERICAL).numpy()
        assert np.allclose(expected_vector_laplacian, actual_vector_laplacian)