示例#1
0
def test_gradient_cuda():
    """Discretized spatial gradient operator using CUDA."""

    # DiscreteLp Vector
    discr_space = odl.uniform_discr([0, 0], [6, 2.5], DATA_2D.shape,
                                    impl='cuda')
    dom_vec = discr_space.element(DATA_2D)

    # computation of gradient components with helper function
    dx0, dx1 = discr_space.cell_sides
    diff_0 = finite_diff(DATA_2D, axis=0, dx=dx0, padding_method='constant')
    diff_1 = finite_diff(DATA_2D, axis=1, dx=dx1, padding_method='constant')

    # gradient
    grad = Gradient(discr_space)
    grad_vec = grad(dom_vec)
    assert len(grad_vec) == DATA_2D.ndim
    assert all_equal(grad_vec[0].asarray(), diff_0)
    assert all_equal(grad_vec[1].asarray(), diff_1)

    # adjoint operator
    ran_vec = grad.range.element([DATA_2D, DATA_2D ** 2])
    adj_vec = grad.adjoint(ran_vec)
    lhs = ran_vec.inner(grad_vec)
    rhs = dom_vec.inner(adj_vec)
    assert lhs != 0
    assert rhs != 0
    assert lhs == rhs
示例#2
0
def test_gradient(space, method, padding):
    """Discretized spatial gradient operator."""

    places = 2 if space.dtype == np.float32 else 4

    with pytest.raises(TypeError):
        Gradient(odl.rn(1), method=method)

    if isinstance(padding, tuple):
        pad_mode, pad_const = padding
    else:
        pad_mode, pad_const = padding, 0

    # DiscreteLp Vector
    dom_vec = noise_element(space)
    dom_vec_arr = dom_vec.asarray()

    # gradient
    grad = Gradient(space, method=method,
                    pad_mode=pad_mode,
                    pad_const=pad_const)
    grad_vec = grad(dom_vec)
    assert len(grad_vec) == space.ndim

    # computation of gradient components with helper function
    for axis, dx in enumerate(space.cell_sides):
        diff = finite_diff(dom_vec_arr, axis=axis, dx=dx, method=method,
                           pad_mode=pad_mode,
                           pad_const=pad_const)

        assert all_almost_equal(grad_vec[axis].asarray(), diff)

    # Test adjoint operator
    derivative = grad.derivative()
    ran_vec = noise_element(derivative.range)
    deriv_grad_vec = derivative(dom_vec)
    adj_grad_vec = derivative.adjoint(ran_vec)
    lhs = ran_vec.inner(deriv_grad_vec)
    rhs = dom_vec.inner(adj_grad_vec)

    # Check not to use trivial data
    assert lhs != 0
    assert rhs != 0
    assert almost_equal(lhs, rhs, places=places)

    # higher dimensional arrays
    lin_size = 3
    for ndim in [1, 3, 6]:

        # DiscreteLpElement
        space = odl.uniform_discr([0.] * ndim, [1.] * ndim, [lin_size] * ndim)
        dom_vec = odl.phantom.cuboid(space, [0.2] * ndim, [0.8] * ndim)

        # gradient
        grad = Gradient(space, method=method,
                        pad_mode=pad_mode,
                        pad_const=pad_const)
        grad(dom_vec)
示例#3
0
def test_gradient_init():
    """Check initialization of ``Gradient``."""
    space = odl.uniform_discr([0, 0], [1, 1], (4, 5))
    vspace = space**2

    op = Gradient(space)
    assert repr(op) != ''
    op = Gradient(range=vspace)
    assert repr(op) != ''
    op = Gradient(space, range=space.astype('float32')**2)
    assert repr(op) != ''
    op = Gradient(space, method='central')
    assert repr(op) != ''
    op = Gradient(space, pad_const=1)
    assert repr(op) != ''
    op = Gradient(space, pad_mode='order1')
    assert repr(op) != ''

    with pytest.raises(TypeError):
        Gradient(odl.rn(1))

    with pytest.raises(TypeError):
        Gradient(space, range=space)

    with pytest.raises(ValueError):
        Gradient(space, range=space**3)
示例#4
0
    def _reconstruct(self, observation, out):
        observation = self.observation_space.element(observation)
        out[:] = self.x0
        gradient = Gradient(self.op.domain)
        L = [self.op, gradient]
        f = ZeroFunctional(self.op.domain)
        l2_norm = 0.5 * L2NormSquared(self.op.range).translated(observation)
        l12_norm = self.lam * GroupL1Norm(gradient.range)
        g = [l2_norm, l12_norm]
        op_norm = power_method_opnorm(self.op, maxiter=20)
        gradient_norm = power_method_opnorm(gradient, maxiter=20)
        sigma_ray_trafo = 45.0 / op_norm**2
        sigma_gradient = 45.0 / gradient_norm**2
        sigma = [sigma_ray_trafo, sigma_gradient]
        h = ZeroFunctional(self.op.domain)
        forward_backward_pd(out,
                            f,
                            g,
                            L,
                            h,
                            self.tau,
                            sigma,
                            self.niter,
                            callback=self.callback)

        return out
示例#5
0
 def _reconstruct(self, observation, out):
     observation = self.observation_space.element(observation)
     out_ = out
     if out not in self.reco_space:
         out_ = self.reco_space.zero()
     out_[:] = self.x0
     gradient = Gradient(self.op.domain)
     L = BroadcastOperator(self.op, gradient)
     f = ZeroFunctional(self.op.domain)
     l2_norm = L2NormSquared(self.op.range).translated(observation)
     l1_norm = self.lam * L1Norm(gradient.range)
     g = SeparableSum(l2_norm, l1_norm)
     op_norm = 1.1 * power_method_opnorm(L, maxiter=20)
     sigma = self.tau * op_norm**2
     admm.admm_linearized(out_,
                          f,
                          g,
                          L,
                          self.tau,
                          sigma,
                          self.niter,
                          callback=self.callback)
     if out not in self.reco_space:
         out[:] = out_
     return out
示例#6
0
def test_gradient_cpu():
    """Discretized spatial gradient operator."""

    with pytest.raises(TypeError):
        Gradient(odl.Rn(1))

    # DiscreteLp Vector
    discr_space = odl.uniform_discr([0, 0], [6, 2.5], DATA_2D.shape)
    dom_vec = discr_space.element(DATA_2D)

    # computation of gradient components with helper function
    dx0, dx1 = discr_space.cell_sides
    diff_0 = finite_diff(DATA_2D, axis=0, dx=dx0, method='forward',
                         padding_method='constant')
    diff_1 = finite_diff(DATA_2D, axis=1, dx=dx1, method='forward',
                         padding_method='constant')

    # gradient
    grad = Gradient(discr_space)
    grad_vec = grad(dom_vec)
    assert len(grad_vec) == DATA_2D.ndim
    assert all_equal(grad_vec[0].asarray(), diff_0)
    assert all_equal(grad_vec[1].asarray(), diff_1)

    # Test adjoint operator

    ran_vec = grad.range.element([DATA_2D, DATA_2D ** 2])
    adj_vec = grad.adjoint(ran_vec)
    lhs = ran_vec.inner(grad_vec)
    rhs = dom_vec.inner(adj_vec)
    # Check not to use trivial data
    assert lhs != 0
    assert rhs != 0
    assert lhs == rhs

    # higher dimensional arrays
    lin_size = 3
    for ndim in range(1, 6):

        # DiscreteLp Vector
        discr_space = odl.uniform_discr([0.] * ndim, [lin_size] * ndim,
                                        [lin_size] * ndim)
        dom_vec = discr_space.element(ndvolume(lin_size, ndim))

        # gradient
        grad = Gradient(discr_space)
        grad(dom_vec)
示例#7
0
def test_gradient(space, method, padding):
    """Discretized spatial gradient operator."""

    places = 2 if space.dtype == np.float32 else 4

    with pytest.raises(TypeError):
        Gradient(odl.rn(1), method=method)

    if isinstance(padding, tuple):
        pad_mode, pad_const = padding
    else:
        pad_mode, pad_const = padding, 0

    # DiscreteLp Vector
    dom_vec = noise_element(space)
    dom_vec_arr = dom_vec.asarray()

    # gradient
    grad = Gradient(space,
                    method=method,
                    pad_mode=pad_mode,
                    pad_const=pad_const)
    grad_vec = grad(dom_vec)
    assert len(grad_vec) == space.ndim

    # computation of gradient components with helper function
    for axis, dx in enumerate(space.cell_sides):
        diff = finite_diff(dom_vec_arr,
                           axis=axis,
                           dx=dx,
                           method=method,
                           pad_mode=pad_mode,
                           pad_const=pad_const)

        assert all_almost_equal(grad_vec[axis].asarray(), diff)

    # Test adjoint operator
    derivative = grad.derivative()
    ran_vec = noise_element(derivative.range)
    deriv_grad_vec = derivative(dom_vec)
    adj_grad_vec = derivative.adjoint(ran_vec)
    lhs = ran_vec.inner(deriv_grad_vec)
    rhs = dom_vec.inner(adj_grad_vec)

    # Check not to use trivial data
    assert lhs != 0
    assert rhs != 0
    assert almost_equal(lhs, rhs, places=places)

    # Higher-dimensional arrays
    lin_size = 3
    for ndim in [1, 3, 6]:
        space = odl.uniform_discr([0.] * ndim, [1.] * ndim, [lin_size] * ndim)
        dom_vec = odl.phantom.cuboid(space, [0.2] * ndim, [0.8] * ndim)

        grad = Gradient(space,
                        method=method,
                        pad_mode=pad_mode,
                        pad_const=pad_const)
        grad(dom_vec)
示例#8
0
def test_gradient(method, impl, padding):
    """Discretized spatial gradient operator."""

    with pytest.raises(TypeError):
        Gradient(odl.Rn(1), method=method)

    if isinstance(padding, tuple):
        padding_method, padding_value = padding
    else:
        padding_method, padding_value = padding, None

    # DiscreteLp Vector
    discr_space = odl.uniform_discr([0, 0], [1, 1], DATA_2D.shape, impl=impl)
    dom_vec = discr_space.element(DATA_2D)

    # computation of gradient components with helper function
    dx0, dx1 = discr_space.cell_sides
    diff_0 = finite_diff(DATA_2D, axis=0, dx=dx0, method=method,
                         padding_method=padding_method,
                         padding_value=padding_value)
    diff_1 = finite_diff(DATA_2D, axis=1, dx=dx1, method=method,
                         padding_method=padding_method,
                         padding_value=padding_value)

    # gradient
    grad = Gradient(discr_space, method=method,
                    padding_method=padding_method,
                    padding_value=padding_value)
    grad_vec = grad(dom_vec)
    assert len(grad_vec) == DATA_2D.ndim
    assert all_almost_equal(grad_vec[0].asarray(), diff_0)
    assert all_almost_equal(grad_vec[1].asarray(), diff_1)

    # Test adjoint operator
    derivative = grad.derivative()
    ran_vec = derivative.range.element([DATA_2D, DATA_2D ** 2])
    deriv_grad_vec = derivative(dom_vec)
    adj_grad_vec = derivative.adjoint(ran_vec)
    lhs = ran_vec.inner(deriv_grad_vec)
    rhs = dom_vec.inner(adj_grad_vec)
    # Check not to use trivial data
    assert lhs != 0
    assert rhs != 0
    assert almost_equal(lhs, rhs)

    # higher dimensional arrays
    lin_size = 3
    for ndim in [1, 3, 6]:

        # DiscreteLp Vector
        space = odl.uniform_discr([0.] * ndim, [1.] * ndim, [lin_size] * ndim)
        dom_vec = odl.phantom.cuboid(space, [0.2] * ndim, [0.8] * ndim)

        # gradient
        grad = Gradient(space, method=method,
                        padding_method=padding_method,
                        padding_value=padding_value)
        grad(dom_vec)
示例#9
0
    def __init__(self, domain, diagonal_neighbour=False):
        """Initialize a new instance.

        Parameters
        ----------
        domain : `LinearSpace` or `Field`, optional
            Set of elements on which the operator can be applied.
        """
        self.diagonal_neighbour = diagonal_neighbour

        if not isinstance(domain, DiscreteLp):
            raise NotImplementedError('Onlt works for `uniform_discr`')

        super().__init__(domain=domain, range=domain)

        self.shape_param = domain.shape
        self.elem_len = np.prod(self.shape_param)

        self.forward_grad = Gradient(domain=domain,
                                     method='forward',
                                     pad_mode='order0')
        self.backward_grad = Gradient(domain=domain,
                                      method='backward',
                                      pad_mode='order0')
示例#10
0
 def _reconstruct(self, observation, out):
     observation = self.observation_space.element(observation)
     out[:] = self.x0
     l2_norm = L2NormSquared(self.op.range)
     discrepancy = l2_norm * (self.op - observation)
     gradient = Gradient(self.op.domain)
     l1_norm = GroupL1Norm(gradient.range)
     smoothed_l1 = MoreauEnvelope(l1_norm, sigma=0.03)
     regularizer = smoothed_l1 * gradient
     f = discrepancy + self.lam * regularizer
     opnorm = power_method_opnorm(self.op)
     hessinv_estimate = ScalingOperator(self.op.domain, 1 / opnorm**2)
     newton.bfgs_method(f,
                        out,
                        maxiter=self.niter,
                        hessinv_estimate=hessinv_estimate,
                        callback=self.callback)
     return out
示例#11
0
 def _reconstruct(self, observation, out):
     observation = self.observation_space.element(observation)
     out[:] = self.x0
     gradient = Gradient(self.op.domain)
     L = BroadcastOperator(self.op, gradient)
     f = ZeroFunctional(self.op.domain)
     l2_norm = L2NormSquared(self.op.range).translated(observation)
     l1_norm = self.lam * L1Norm(gradient.range)
     g = [l2_norm, l1_norm]
     tau, sigma = douglas_rachford.douglas_rachford_pd_stepsize(L)
     douglas_rachford.douglas_rachford_pd(out,
                                          f,
                                          g,
                                          L,
                                          self.niter,
                                          tau,
                                          sigma,
                                          callback=self.callback)
     return out
示例#12
0
 def _reconstruct(self, observation, out):
     observation = self.observation_space.element(observation)
     out[:] = self.x0
     gradient = Gradient(self.op.domain)
     L = BroadcastOperator(self.op, gradient)
     f = ZeroFunctional(self.op.domain)
     l2_norm = L2NormSquared(self.op.range).translated(observation)
     l1_norm = self.lam * L1Norm(gradient.range)
     g = SeparableSum(l2_norm, l1_norm)
     tau, sigma = primal_dual_hybrid_gradient.pdhg_stepsize(L)
     primal_dual_hybrid_gradient.pdhg(out,
                                      f,
                                      g,
                                      L,
                                      self.niter,
                                      tau,
                                      sigma,
                                      callback=self.callback)
     return out