def test_discrete_gradient_cuda(): """Discretized spatial gradient operator using CUDA.""" # Check result of operator with explicit summation # phantom data data = np.array([[0., 1., 2., 3., 4.], [1., 2., 3., 4., 5.], [2., 3., 4., 5., 6.]]) # DiscreteLp Vector discr_space = uniform_discr([0, 0], [6, 2.5], data.shape, impl='cuda') dom_vec = discr_space.element(data) # computation of gradient components with helper function dx0, dx1 = discr_space.grid.stride df0 = finite_diff(data, axis=0, dx=dx0, zero_padding=True, edge_order=2) df1 = finite_diff(data, axis=1, dx=dx1, zero_padding=True, edge_order=2) # gradient grad = DiscreteGradient(discr_space) grad_vec = grad(dom_vec) assert len(grad_vec) == data.ndim assert all_equal(grad_vec[0].asarray(), df0) assert all_equal(grad_vec[1].asarray(), df1) # adjoint operator ran_vec = grad.range.element([data, data ** 2]) adj_vec = grad.adjoint(ran_vec) lhs = ran_vec.inner(grad_vec) rhs = dom_vec.inner(adj_vec) assert lhs != 0 assert rhs != 0 assert lhs == rhs
def test_discrete_gradient(): """Discretized spatial gradient operator.""" discr_space = Rn(1) with pytest.raises(TypeError): DiscreteGradient(discr_space) # Check result of operator with explicit summation # phantom data data = np.array([[0., 1., 2., 3., 4.], [1., 2., 3., 4., 5.], [2., 3., 4., 5., 6.]]) data = np.array([[0., 1., 2., 3., 4.], [0., 1., 2., 3., 4.], [0., 1., 2., 3., 4.]]) # DiscreteLp Vector discr_space = uniform_discr([0, 0], [6, 2.5], data.shape) dom_vec = discr_space.element(data) # computation of gradient components with helper function dx0, dx1 = discr_space.grid.stride df0 = finite_diff(data, axis=0, dx=dx0, zero_padding=True, edge_order=2) df1 = finite_diff(data, axis=1, dx=dx1, zero_padding=True, edge_order=2) # gradient grad = DiscreteGradient(discr_space) grad_vec = grad(dom_vec) assert len(grad_vec) == data.ndim assert all_equal(grad_vec[0].asarray(), df0) assert all_equal(grad_vec[1].asarray(), df1) # adjoint operator ran_vec = grad.range.element([data, data ** 2]) adj_vec = grad.adjoint(ran_vec) lhs = ran_vec.inner(grad_vec) rhs = dom_vec.inner(adj_vec) assert lhs != 0 assert rhs != 0 assert lhs == rhs # higher dimensional arrays lin_size = 3 for ndim in range(1, 6): # DiscreteLp Vector discr_space = uniform_discr([0.] * ndim, [lin_size] * ndim, [lin_size] * ndim) dom_vec = discr_space.element(ndvolume(lin_size, ndim)) # gradient grad = DiscreteGradient(discr_space) grad(dom_vec)