Ejemplo n.º 1
0
def test_discrete_gradient_cuda():
    """Discretized spatial gradient operator using CUDA."""

    # Check result of operator with explicit summation
    # phantom data
    data = np.array([[0., 1., 2., 3., 4.],
                     [1., 2., 3., 4., 5.],
                     [2., 3., 4., 5., 6.]])

    # DiscreteLp Vector
    discr_space = uniform_discr([0, 0], [6, 2.5], data.shape, impl='cuda')
    dom_vec = discr_space.element(data)

    # computation of gradient components with helper function
    dx0, dx1 = discr_space.grid.stride
    df0 = finite_diff(data, axis=0, dx=dx0, zero_padding=True, edge_order=2)
    df1 = finite_diff(data, axis=1, dx=dx1, zero_padding=True, edge_order=2)

    # gradient
    grad = DiscreteGradient(discr_space)
    grad_vec = grad(dom_vec)
    assert len(grad_vec) == data.ndim
    assert all_equal(grad_vec[0].asarray(), df0)
    assert all_equal(grad_vec[1].asarray(), df1)

    # adjoint operator
    ran_vec = grad.range.element([data, data ** 2])
    adj_vec = grad.adjoint(ran_vec)
    lhs = ran_vec.inner(grad_vec)
    rhs = dom_vec.inner(adj_vec)
    assert lhs != 0
    assert rhs != 0
    assert lhs == rhs
Ejemplo n.º 2
0
def test_discrete_divergence_cuda():
    """Discretized spatial divergence operator using CUDA."""

    # Check result of operator with explicit summation
    # phantom data
    data = np.array([[0.0, 1.0, 2.0, 3.0, 4.0], [1.0, 2.0, 3.0, 4.0, 5.0], [2.0, 3.0, 4.0, 5.0, 6.0]])

    # DiscreteLp
    discr_space = uniform_discr([0, 0], [1.5, 10], data.shape, impl="cuda")

    # operator instance
    div = DiscreteDivergence(discr_space)

    # apply operator
    dom_vec = div.domain.element([data, data])
    div_dom_vec = div(dom_vec)

    # computation of divergence with helper function
    dx0, dx1 = discr_space.grid.stride
    df0 = finite_diff(data, axis=0, dx=dx0, zero_padding=True, edge_order=2)
    df1 = finite_diff(data, axis=1, dx=dx1, zero_padding=True, edge_order=2)

    assert all_equal(df0 + df1, div_dom_vec.asarray())

    # Adjoint operator
    adj_div = div.adjoint
    ran_vec = div.range.element(data ** 2)
    adj_div_ran_vec = adj_div(ran_vec)

    # Adjoint condition
    lhs = ran_vec.inner(div_dom_vec)
    rhs = dom_vec.inner(adj_div_ran_vec)
    assert lhs != 0
    assert rhs != 0
    assert almost_equal(lhs, rhs)
Ejemplo n.º 3
0
def test_discrete_gradient():
    """Discretized spatial gradient operator."""

    discr_space = Rn(1)
    with pytest.raises(TypeError):
        DiscreteGradient(discr_space)

    # Check result of operator with explicit summation
    # phantom data
    data = np.array([[0., 1., 2., 3., 4.],
                     [1., 2., 3., 4., 5.],
                     [2., 3., 4., 5., 6.]])

    data = np.array([[0., 1., 2., 3., 4.],
                     [0., 1., 2., 3., 4.],
                     [0., 1., 2., 3., 4.]])

    # DiscreteLp Vector
    discr_space = uniform_discr([0, 0], [6, 2.5], data.shape)
    dom_vec = discr_space.element(data)

    # computation of gradient components with helper function
    dx0, dx1 = discr_space.grid.stride
    df0 = finite_diff(data, axis=0, dx=dx0, zero_padding=True, edge_order=2)
    df1 = finite_diff(data, axis=1, dx=dx1, zero_padding=True, edge_order=2)

    # gradient
    grad = DiscreteGradient(discr_space)
    grad_vec = grad(dom_vec)
    assert len(grad_vec) == data.ndim
    assert all_equal(grad_vec[0].asarray(), df0)
    assert all_equal(grad_vec[1].asarray(), df1)

    # adjoint operator
    ran_vec = grad.range.element([data, data ** 2])
    adj_vec = grad.adjoint(ran_vec)
    lhs = ran_vec.inner(grad_vec)
    rhs = dom_vec.inner(adj_vec)
    assert lhs != 0
    assert rhs != 0
    assert lhs == rhs

    # higher dimensional arrays
    lin_size = 3
    for ndim in range(1, 6):

        # DiscreteLp Vector
        discr_space = uniform_discr([0.] * ndim, [lin_size] * ndim,
                                    [lin_size] * ndim)
        dom_vec = discr_space.element(ndvolume(lin_size, ndim))

        # gradient
        grad = DiscreteGradient(discr_space)
        grad(dom_vec)
Ejemplo n.º 4
0
def test_discrete_divergence():
    """Discretized spatial divergence operator."""

    # Invalid arguments
    discr_space = Rn(1)
    with pytest.raises(TypeError):
        DiscreteDivergence(discr_space)

    # Check result of operator with explicit summation
    data = np.array([[0., 1., 2., 3., 4.],
                     [1., 2., 3., 4., 5.],
                     [2., 3., 4., 5., 6.]])

    # DiscreteLp
    discr_space = uniform_discr([0, 0], [6, 2.5], data.shape)

    # Operator instance
    div = DiscreteDivergence(discr_space)

    # Apply operator
    dom_vec = div.domain.element([data, data])
    div_dom_vec = div(dom_vec)

    # computation of divergence with helper function
    dx0, dx1 = discr_space.grid.stride
    df0 = finite_diff(data, axis=0, dx=dx0, zero_padding=True, edge_order=2)
    df1 = finite_diff(data, axis=1, dx=dx1, zero_padding=True, edge_order=2)

    assert all_equal(df0 + df1, div_dom_vec.asarray())

    # Adjoint operator
    adj_div = div.adjoint
    ran_vec = div.range.element(data ** 2)
    adj_div_ran_vec = adj_div(ran_vec)

    # Adjoint condition
    lhs = ran_vec.inner(div_dom_vec)
    rhs = dom_vec.inner(adj_div_ran_vec)
    assert lhs != 0
    assert rhs != 0
    assert almost_equal(lhs, rhs)

    # Higher dimensional arrays
    for ndim in range(1, 6):
        # DiscreteLp Vector
        lin_size = 3
        discr_space = uniform_discr([0.] * ndim, [lin_size] * ndim,
                                    [lin_size] * ndim)
        # Divergence
        div = DiscreteDivergence(discr_space)
        dom_vec = div.domain.element([ndvolume(lin_size, ndim)] * ndim)
        div(dom_vec)
Ejemplo n.º 5
0
def test_finite_diff():
    """Finite differences test."""

    # phantom data
    arr = np.array([0.5, 1, 3.5, 2, -.5, 3, -1, -1, 0, 3])

    # invalid parameter values
    # edge order in {1,2}
    with pytest.raises(ValueError):
        finite_diff(arr, edge_order=0)
    with pytest.raises(ValueError):
        finite_diff(arr, edge_order=3)
    # zero padding uses second-order accurate edges
    with pytest.raises(ValueError):
        finite_diff(arr, zero_padding=True, edge_order=1)
    # at least a two-element array is required
    with pytest.raises(ValueError):
        finite_diff(np.array([0.0]))
    # axis
    with pytest.raises(IndexError):
        finite_diff(arr, axis=2)
    # in-place argument
    out = np.zeros(arr.size + 1)
    with pytest.raises(ValueError):
        finite_diff(arr, out)
    with pytest.raises(ValueError):
        finite_diff(arr, dx=0)
    # wrong method
    with pytest.raises(ValueError):
        finite_diff(arr, method='non-method')

    # explicitly calculated finite difference
    findiff_ex = np.zeros_like(arr)

    # interior: second-order accurate differences
    findiff_ex[1:-1] = (arr[2:] - arr[:-2]) / 2.0

    # default: out=None, axis=0, dx=1.0, edge_order=2, zero_padding=False
    findiff_op = finite_diff(arr, out=None, axis=0, dx=1.0, edge_order=2,
                             zero_padding=False)
    assert all_equal(findiff_op, finite_diff(arr))

    # boundary: second-order accurate forward/backward difference
    findiff_ex[0] = -(3 * arr[0] - 4 * arr[1] + arr[2]) / 2.0
    findiff_ex[-1] = (3 * arr[-1] - 4 * arr[-2] + arr[-3]) / 2.0
    assert all_equal(findiff_op, findiff_ex)

    # non-unit step length
    dx = 0.5
    findiff_op = finite_diff(arr, dx=dx)
    assert all_equal(findiff_op, findiff_ex / dx)

    # boundary: second-order accurate central differences with zero padding
    findiff_op = finite_diff(arr, zero_padding=True)
    findiff_ex[0] = arr[1] / 2.0
    findiff_ex[-1] = -arr[-2] / 2.0
    assert all_equal(findiff_op, findiff_ex)

    # boundary: one-sided first-order forward/backward difference without zero
    # padding
    findiff_op = finite_diff(arr, zero_padding=False, edge_order=1)
    findiff_ex[0] = arr[1] - arr[0]  # 1st-order accurate forward difference
    findiff_ex[-1] = arr[-1] - arr[-2]  # 1st-order accurate backward diff.
    assert all_equal(findiff_op, findiff_ex)

    # different edge order really differ
    df1 = finite_diff(arr, edge_order=1)
    df2 = finite_diff(arr, edge_order=2)
    assert all_equal(df1[1:-1], findiff_ex[1:-1])
    assert all_equal(df2[1:-1], findiff_ex[1:-1])
    assert df1[0] != df2[0]
    assert df1[-1] != df2[-1]

    # in-place evaluation
    out = np.zeros_like(arr)
    assert out is finite_diff(arr, out)
    assert all_equal(out, finite_diff(arr))
    assert out is not finite_diff(arr)

    # axis
    arr = np.array([[0., 1., 2., 3., 4.],
                    [1., 2., 3., 4., 5.]])
    df0 = finite_diff(arr, axis=0)
    df1 = finite_diff(arr, axis=1)
    assert all_equal(df0, df1)

    # complex arrays
    arr = np.array([0., 1., 2., 3., 4.]) + 1j * np.array([10., 9., 8., 7., 6.])
    findiff_op = finite_diff(arr)
    assert all(findiff_op.real == 1)
    assert all(findiff_op.imag == -1)
Ejemplo n.º 6
0
def test_backward_diff():
    arr = np.array([0., 3., 5., 6.])

    findiff_op = finite_diff(arr, zero_padding=True, method='backward')
    assert all_equal(findiff_op, [0., 3., 2., 1.])