Example #1
0
def test_vector_from_scalars():
    """test how to compile vector fields from scalar fields"""
    g = UnitGrid([1, 2])
    s1 = ScalarField(g, [[0, 1]])
    s2 = ScalarField(g, [[2, 3]])
    v = VectorField.from_scalars([s1, s2], label="test")
    assert v.label == "test"
    np.testing.assert_equal(v.data, [[[0, 1]], [[2, 3]]])

    with pytest.raises(ValueError):
        VectorField.from_scalars([s1, s2, s1])
Example #2
0
def test_scalar_arithmetics():
    """test simple arithmetics involving scalar fields"""
    grid = UnitGrid([3, 4])
    s = ScalarField(grid, data=2)
    v = VectorField.random_uniform(grid)

    for f in [v, FieldCollection([v])]:
        f.data = s
        assert f.data.shape == (2, 3, 4)
        np.testing.assert_allclose(f.data, 2)

        f += s
        np.testing.assert_allclose(f.data, 4)
        np.testing.assert_allclose((f + s).data, 6)
        np.testing.assert_allclose((s + f).data, 6)
        f -= s
        np.testing.assert_allclose((f - s).data, 0)
        np.testing.assert_allclose((s - f).data, 0)

        f *= s
        np.testing.assert_allclose(f.data, 4)
        np.testing.assert_allclose((f * s).data, 8)
        np.testing.assert_allclose((s * f).data, 8)
        f /= s
        np.testing.assert_allclose((f / s).data, 1)
        with pytest.raises(TypeError):
            s / f
        with pytest.raises(TypeError):
            s /= f
        with pytest.raises(TypeError):
            s *= f
def test_storing_collection(tmp_path):
    """ test methods specific to FieldCollections in memory storage """
    grid = UnitGrid([2, 2])
    f1 = ScalarField.random_uniform(grid, 0.1, 0.4, label="a")
    f2 = VectorField.random_uniform(grid, 0.1, 0.4, label="b")
    f3 = Tensor2Field.random_uniform(grid, 0.1, 0.4, label="c")
    fc = FieldCollection([f1, f2, f3])

    storage_classes = {"MemoryStorage": MemoryStorage}
    if module_available("h5py"):
        file_path = tmp_path / "test_storage_write.hdf5"
        storage_classes["FileStorage"] = functools.partial(
            FileStorage, file_path)

    for storage_cls in storage_classes.values():
        # store some data
        storage = storage_cls()
        storage.start_writing(fc)
        storage.append(fc, 0)
        storage.append(fc, 1)
        storage.end_writing()

        assert storage.has_collection
        assert storage.extract_field(0)[0] == f1
        assert storage.extract_field(1)[0] == f2
        assert storage.extract_field(2)[0] == f3
        assert storage.extract_field(0)[0].label == "a"
        assert storage.extract_field(0,
                                     label="new label")[0].label == "new label"
        assert storage.extract_field(0)[0].label == "a"  # do not alter label
        assert storage.extract_field("a")[0] == f1
        assert storage.extract_field("b")[0] == f2
        assert storage.extract_field("c")[0] == f3
        with pytest.raises(ValueError):
            storage.extract_field("nonsense")
Example #4
0
def test_field_type_guessing():
    """ test the ability to guess the field type """
    for cls in [ScalarField, VectorField, Tensor2Field]:
        grid = UnitGrid([3])
        field = cls.random_normal(grid)
        s = MemoryStorage()
        s.start_writing(field)
        s.append(field, 0)
        s.append(field, 1)

        # delete information
        s._field = None
        s.info = {}

        assert not s.has_collection
        assert len(s) == 2
        assert s[0] == field

    field = FieldCollection([ScalarField(grid), VectorField(grid)])
    s = MemoryStorage()
    s.start_writing(field)
    s.append(field, 0)

    assert s.has_collection

    # delete information
    s._field = None
    s.info = {}

    with pytest.raises(RuntimeError):
        s[0]
Example #5
0
def test_pde_product_operators():
    """test inner and outer products"""
    eq = PDE(
        {"p": "gradient(dot(p, p) + inner(p, p)) + tensor_divergence(outer(p, p))"}
    )
    assert not eq.explicit_time_dependence
    assert not eq.complex_valued
    field = VectorField(grids.UnitGrid([4]), 1)
    res = eq.solve(field, t_range=1, dt=0.1, backend="numpy", tracker=None)
    np.testing.assert_allclose(res.data, field.data)
Example #6
0
def test_interpolation_to_grid_fields(ndim):
    """test whether data is interpolated correctly for different fields"""
    grid = CartesianGrid([[0, 2 * np.pi]] * ndim, 6)
    grid2 = CartesianGrid([[0, 2 * np.pi]] * ndim, 8)
    if ndim == 1:
        vf = VectorField.from_expression(grid, ["cos(x)"])
    elif ndim == 2:
        vf = VectorField.from_expression(grid, ["sin(y)", "cos(x)"])
    sf = vf[0]  # test extraction of fields
    fc = FieldCollection([sf, vf])

    for f in [sf, vf, fc]:
        # test self-interpolation
        f0 = f.interpolate_to_grid(grid, backend="numba")
        np.testing.assert_allclose(f.data, f0.data, atol=1e-15)

        # test interpolation to finer grid and back
        f2 = f.interpolate_to_grid(grid2, backend="numba")
        f3 = f2.interpolate_to_grid(grid, backend="numba")
        np.testing.assert_allclose(f.data, f3.data, atol=0.2, rtol=0.2)
def test_random_uniform_types():
    """test whether random uniform fields behave correctly for different types"""
    grid = UnitGrid([8])
    for dtype in [bool, int, float, complex]:
        field = VectorField.random_uniform(grid, dtype=dtype)
        assert field.dtype == np.dtype(dtype)
        assert isinstance(field.data.flat[0].item(), dtype)

    assert ScalarField.random_uniform(grid, 0, 1).dtype == np.dtype(float)
    assert ScalarField.random_uniform(grid, vmin=0 + 0j).dtype == np.dtype(complex)
    assert ScalarField.random_uniform(grid, vmax=1 + 0j).dtype == np.dtype(complex)
    assert ScalarField.random_uniform(grid, 0 + 0j, 1 + 0j).dtype == np.dtype(complex)
Example #8
0
def test_interpolation_to_grid_fields():
    """ test whether data is interpolated correctly for different fields """
    grid = CartesianGrid([[0, 2 * np.pi]] * 2, 6)
    grid2 = CartesianGrid([[0, 2 * np.pi]] * 2, 8)
    vf = VectorField.from_expression(grid, ["sin(y)", "cos(x)"])
    sf = vf[0]  # test extraction of fields
    fc = FieldCollection([sf, vf])

    for f in [sf, vf, fc]:
        f2 = f.interpolate_to_grid(grid2, method="numba")
        f3 = f2.interpolate_to_grid(grid, method="numba")
        np.testing.assert_allclose(f.data, f3.data, atol=0.2, rtol=0.2)
Example #9
0
def test_pde_vector():
    """test PDE with a single vector field"""
    eq = PDE({"u": "vector_laplace(u) + exp(-t)"})
    assert eq.explicit_time_dependence
    assert not eq.complex_valued
    grid = grids.UnitGrid([8, 8])
    field = VectorField.random_normal(grid)

    res_a = eq.solve(field, t_range=1, dt=0.01, backend="numpy", tracker=None)
    res_b = eq.solve(field, t_range=1, dt=0.01, backend="numba", tracker=None)

    res_a.assert_field_compatible(res_b)
    np.testing.assert_allclose(res_a.data, res_b.data)
Example #10
0
def test_simple_plotting(example_grid):
    """test simple plotting of various fields on various grids"""
    vf = VectorField.random_uniform(example_grid)
    tf = Tensor2Field.random_uniform(example_grid)
    sf = tf[0, 0]  # test extraction of fields
    fc = FieldCollection([sf, vf])
    for f in [sf, vf, tf, fc]:
        f.plot(action="close")
        f.plot(kind="line", action="close")
        if example_grid.dim >= 2:
            f.plot(kind="image", action="close")
        if isinstance(f, VectorField) and example_grid.dim == 2:
            f.plot(kind="quiver", action="close")
            f.plot(kind="streamplot", action="close")
Example #11
0
def test_writing_images(tmp_path):
    """test writing and reading files"""
    from matplotlib.pyplot import imread

    grid = UnitGrid([4, 4])
    s = ScalarField.random_uniform(grid, label="scalar")
    v = VectorField.random_uniform(grid, label="vector")
    t = Tensor2Field.random_uniform(grid, label="tensor")

    path = tmp_path / "test_writing_images.png"
    for f in [s, v, t]:
        f.to_file(path)
        # try reading the file
        with path.open("br") as fp:
            imread(fp)
Example #12
0
def test_pde_vector_scalar():
    """test PDE with a vector and a scalar field"""
    eq = PDE({"u": "vector_laplace(u) - u + gradient(v)", "v": "- divergence(u)"})
    assert not eq.explicit_time_dependence
    assert not eq.complex_valued
    grid = grids.UnitGrid([8, 8])
    field = FieldCollection(
        [VectorField.random_uniform(grid), ScalarField.random_uniform(grid)]
    )

    res_a = eq.solve(field, t_range=1, dt=0.01, backend="numpy", tracker=None)
    res_b = eq.solve(field, t_range=1, dt=0.01, backend="numba", tracker=None)

    res_a.assert_field_compatible(res_b)
    np.testing.assert_allclose(res_a.data, res_b.data)
Example #13
0
def test_hdf_input_output(tmp_path):
    """test writing and reading files"""
    grid = UnitGrid([4, 4])
    s = ScalarField.random_uniform(grid, label="scalar")
    v = VectorField.random_uniform(grid, label="vector")
    t = Tensor2Field.random_uniform(grid, label="tensor")
    col = FieldCollection([s, v, t], label="collection")

    path = tmp_path / "test_hdf_input_output.hdf5"
    for f in [s, v, t, col]:
        f.to_file(path)
        f2 = FieldBase.from_file(path)
        assert f == f2
        assert f.label == f2.label
        assert isinstance(str(f), str)
        assert isinstance(repr(f), str)
Example #14
0
def test_interpolation_to_cartesian(grid):
    """test whether data is interpolated correctly to Cartesian grid"""
    dim = grid.dim
    vf = VectorField(grid, 2)
    sf = vf[0]  # test extraction of fields
    fc = FieldCollection([sf, vf])

    # subset
    grid_cart = UnitGrid([4] * dim)
    for f in [sf, fc]:
        res = f.interpolate_to_grid(grid_cart)
        np.testing.assert_allclose(res.data, 2)

    # superset
    grid_cart = UnitGrid([8] * dim)
    for f in [sf, fc]:
        res = f.interpolate_to_grid(grid_cart, fill=0)
        assert res.data.min() == 0
        assert res.data.max() == pytest.approx(2)
def test_random_normal_types():
    """test whether random normal fields behave correctly for different types"""
    grid = UnitGrid([8])
    for dtype in [bool, int, float, complex]:
        field = VectorField.random_normal(grid, dtype=dtype)
        assert field.dtype == np.dtype(dtype)
        assert isinstance(field.data.flat[0].item(), dtype)

    assert ScalarField.random_normal(grid, 0, 1).dtype == np.dtype(float)
    assert ScalarField.random_normal(grid, mean=0 + 0j).dtype == np.dtype(complex)
    assert ScalarField.random_normal(grid, std=1 + 0j).dtype == np.dtype(complex)
    assert ScalarField.random_normal(grid, 0 + 0j, 1 + 0j).dtype == np.dtype(complex)

    m = complex(np.random.random(), np.random.random())
    s = complex(1 + np.random.random(), 1 + np.random.random())
    grid = UnitGrid([256, 256])
    field = field.random_normal(grid, m, s)
    assert np.mean(field.average) == pytest.approx(m, rel=0.1, abs=0.1)
    assert np.std(field.data.real) == pytest.approx(s.real, rel=0.1, abs=0.1)
    assert np.std(field.data.imag) == pytest.approx(s.imag, rel=0.1, abs=0.1)
Example #16
0
def test_dot_product():
    """test dot products between vectors and tensors"""
    g = UnitGrid([3, 2])
    vf = VectorField.random_normal(g)
    tf = Tensor2Field.random_normal(g)
    v_dot = vf.make_dot_operator()
    t_dot = tf.make_dot_operator()

    expected = np.einsum("i...,i...->...", vf.data, vf.data)
    np.testing.assert_allclose((vf @ vf).data, expected)
    np.testing.assert_allclose(v_dot(vf.data, vf.data), expected)

    expected = np.einsum("i...,i...->...", vf.data, tf.data)
    np.testing.assert_allclose((vf @ tf).data, expected)
    np.testing.assert_allclose(v_dot(vf.data, tf.data), expected)

    expected = np.einsum("ji...,i...->j...", tf.data, vf.data)
    np.testing.assert_allclose((tf @ vf).data, expected)
    np.testing.assert_allclose(t_dot(tf.data, vf.data), expected)

    expected = np.einsum("ij...,jk...->ik...", tf.data, tf.data)
    np.testing.assert_allclose((tf @ tf).data, expected)
    np.testing.assert_allclose(t_dot(tf.data, tf.data), expected)
Example #17
0
    def _prepare(self, state: FieldBase) -> None:
        """ prepare the expression by setting internal variables in the cache

        Note that the expensive calculations in this method are only carried
        out if the state attributes change.

        Args:
            state (:class:`~pde.fields.FieldBase`):
                The field describing the state of the PDE
        """
        # check whether this function actually needs to be called
        if ('state_attributes' in self._cache
                and state.attributes == self._cache['state_attributes']):
            return  # prepare was already called
        self._cache = {}  # clear cache, if there was something

        # check whether the state is compatible with the PDE
        num_fields = len(self.variables)
        self.diagnostics['num_fields'] = num_fields
        if isinstance(state, FieldCollection):
            if num_fields != len(state):
                raise ValueError(f'Expected {num_fields} fields in state, but '
                                 f'got {len(state)} ones')
        elif isinstance(state, DataFieldBase):
            if num_fields != 1:
                raise ValueError(f'Expected {num_fields} fields in state, but '
                                 'got only one')
        else:
            raise ValueError(f'Unknown state class {state.__class__.__name__}')

        # obtain functions used in the expression
        ops_general = {}

        # create a dot operator if necessary
        if 'dot' in self.diagnostics['operators']:  # type: ignore
            # add dot product between two vector fields. This can for instance
            # appear when two gradients of scalar fields need to be multiplied
            ops_general['dot'] = VectorField(state.grid).get_dot_operator()

        # obtain the python functions for the rhs
        rhs_funcs = []
        for var in self.variables:
            ops = ops_general.copy()

            # obtain the (differential) operators
            for func in self._operators[var]:
                if func in ops:
                    continue
                # determine boundary conditions
                for bc_key, bc in self.bcs.items():
                    bc_var, bc_func = bc_key.split(':')
                    var_match = (bc_var == var or bc_var == '*')
                    func_match = (bc_func == func or bc_func == '*')
                    if var_match and func_match:
                        break  # found a matching boundary condition
                else:
                    raise RuntimeError('Could not find suitable boundary '
                                       'condition for ')

                ops[func] = state.grid.get_operator(func, bc=bc)

            rhs_funcs.append(self._rhs_expr[var]._get_function(user_funcs=ops))

        self._cache['rhs_funcs'] = rhs_funcs

        # add extra information for field collection
        if isinstance(state, FieldCollection):
            # isscalar be False even if start == stop (e.g. vector fields)
            isscalar = tuple(field.rank == 0 for field in state)
            starts = tuple(slc.start for slc in state._slices)
            stops = tuple(slc.stop for slc in state._slices)

            def get_data_tuple(state_data):
                """ helper for turning state_data into a tuple of field data """
                return tuple(state_data[starts[i]]
                             if isscalar[i] else state_data[starts[i]:stops[i]]
                             for i in range(num_fields))

            self._cache['get_data_tuple'] = get_data_tuple

        # store the attributes in the cache, which allows to later circumvent
        # calculating the quantities above again. Note that this has to be the
        # last expression of the method, so the cache is only valid when the
        # prepare function worked successfully
        self._cache['state_attributes'] = state.attributes