Esempio n. 1
0
axs[1].axis('tight')
axs[1].set_title('y sym')
plt.colorbar(im, ax=axs[1])
im = axs[2].imshow(Basym, interpolation='nearest', cmap='rainbow')
axs[2].axis('tight')
axs[2].set_title('y asym')
plt.colorbar(im, ax=axs[2])
plt.tight_layout()
plt.subplots_adjust(top=0.8)


###############################################################################
# We consider now the gradient operator. Given a 2-dimensional array,
# this operator applies first-order derivatives on both dimensions and
# concatenates them.
Gop = pylops.Gradient(dims=(nx, ny), dtype='float64')

B = np.reshape(Gop * A.flatten(), (2*nx, ny))
C = np.reshape(Gop.H * B.flatten(), (nx, ny))

fig, axs = plt.subplots(1, 3, figsize=(10, 3))
fig.suptitle('Gradient', fontsize=12,
             fontweight='bold', y=0.95)
im = axs[0].imshow(A, interpolation='nearest', cmap='rainbow')
axs[0].axis('tight')
axs[0].set_title('x')
plt.colorbar(im, ax=axs[0])
im = axs[1].imshow(B, interpolation='nearest', cmap='rainbow')
axs[1].axis('tight')
axs[1].set_title('y')
plt.colorbar(im, ax=axs[1])
Esempio n. 2
0
# Load image
img = camera()
ny, nx = img.shape

# Add noise
sigman = 20
n = np.random.normal(0, sigman, img.shape)
noise_img = img + n

###############################################################################
# We can now define a :class:`pylops.Gradient` operator as well as the
# different proximal operators to be passed to our solvers

# Gradient operator
sampling = 1.
Gop = pylops.Gradient(dims=(ny, nx), sampling=sampling, edge=False,
                      kind='forward', dtype='float64')
L = 8. / sampling ** 2 # maxeig(Gop^H Gop)

# L2 data term
lamda = .04
l2 = pyproximal.L2(b=noise_img.ravel(), sigma=lamda)

# L1 regularization (isotropic TV)
l1iso = pyproximal.L21(ndim=2)

###############################################################################
# To start, we solve our denoising problem with the original Primal-Dual
# algorithm

# Primal-dual
tau = 0.95 / np.sqrt(L)
Esempio n. 3
0
def Gradient(shape: tuple, step: Union[tuple, float] = 1., edge: bool = True, dtype: str = 'float64',
             kind: str = 'centered') -> PyLopLinearOperator:
    r"""
    Gradient.

    Computes the gradient of a multi-dimensional array (at least two dimensions are required).


    Parameters
    ----------
    shape: tuple
        Shape of the input array.
    step: Union[float, Tuple[float, ...]]
        Step size in each direction.
    edge: bool
        For ``kind = 'centered'``, use reduced order derivative at edges (``True``) or ignore them (``False``).
    dtype: str
        Type of elements in input vector.
    kind: str
        Derivative kind (``forward``, ``centered``, or ``backward``).

    Returns
    -------
    :py:class:`pycsou.core.linop.LinearOperator`
        Gradient operator.

    Examples
    --------

    .. testsetup::

       import numpy as np
       from pycsou.linop.diff import Gradient, FirstDerivative
       from pycsou.util.misc import peaks

    .. doctest::

       >>> x = np.linspace(-2.5, 2.5, 100)
       >>> X,Y = np.meshgrid(x,x)
       >>> Z = peaks(X, Y)
       >>> Nabla = Gradient(shape=Z.shape, kind='forward')
       >>> D = FirstDerivative(size=Z.size, shape=Z.shape, kind='forward')
       >>> np.allclose((Nabla * Z.flatten())[:Z.size], D * Z.flatten())
       True

    .. plot::

       import numpy as np
       import matplotlib.pyplot as plt
       from pycsou.linop.diff import Gradient
       from pycsou.util.misc import peaks

       x  = np.linspace(-2.5, 2.5, 25)
       X,Y = np.meshgrid(x,x)
       Z = peaks(X, Y)
       Dop = Gradient(shape=Z.shape)
       y = Dop * Z.flatten()

       plt.figure()
       h = plt.pcolormesh(X,Y,Z, shading='auto')
       plt.colorbar(h)
       plt.title('Signal')
       plt.figure()
       h = plt.pcolormesh(X,Y,y[:Z.size].reshape(X.shape), shading='auto')
       plt.colorbar(h)
       plt.title('Gradient (1st component)')
       plt.figure()
       h = plt.pcolormesh(X,Y,y[Z.size:].reshape(X.shape), shading='auto')
       plt.colorbar(h)
       plt.title('Gradient (2nd component)')
       plt.show()


    Notes
    -----
    The ``Gradient`` operator applies a first-order derivative to each dimension of
    a multi-dimensional array in forward mode.

    For simplicity, given a three dimensional array, the ``Gradient`` in forward
    mode using a centered stencil can be expressed as:

    .. math::
        \mathbf{g}_{i, j, k} =
            (f_{i+1, j, k} - f_{i-1, j, k}) / d_1 \mathbf{i_1} +
            (f_{i, j+1, k} - f_{i, j-1, k}) / d_2 \mathbf{i_2} +
            (f_{i, j, k+1} - f_{i, j, k-1}) / d_3 \mathbf{i_3}

    which is discretized as follows:

    .. math::
        \mathbf{g}  =
        \begin{bmatrix}
           \mathbf{df_1} \\
           \mathbf{df_2} \\
           \mathbf{df_3}
        \end{bmatrix}.

    In adjoint mode, the adjoints of the first derivatives along different
    axes are instead summed together.

    See Also
    --------
    :py:func:`~pycsou.linop.diff.DirectionalGradient`, :py:func:`~pycsou.linop.diff.FirstDerivative`

    """
    return PyLopLinearOperator(pylops.Gradient(dims=shape, sampling=step, edge=edge, dtype=dtype, kind=kind))
Esempio n. 4
0
def multi_class_segmentation(img,
                             classes: list,
                             beta: float = 0.001,
                             tau: float = None):

    raveld_f = np.zeros(((np.prod(img.shape), len(classes))))

    for i in range(len(classes)):
        raveld_f[:, i] = (img.ravel() - classes[i])**2

    f = raveld_f

    grad = pylops.Gradient(dims=img.shape,
                           edge=True,
                           dtype='float64',
                           kind="backward")
    K = beta * grad

    G = DatatermLinear()
    G.set_proxdata(f)
    F_star = Projection(f.shape, len(img.shape))
    solver = PdHgm(K, F_star, G)

    solver.var['x'] = np.zeros((K.shape[1], len(classes)))
    solver.var['y'] = np.zeros((K.shape[0], len(classes)))

    if tau:
        tau0 = tau
    else:
        norm = np.abs(np.asscalar(K.eigs(neigs=1, which='LM')))
        tau0 = 0.99 / norm
        print(
            "Calced tau: " + str(tau0) + ". "
            "Next run with same beta, set this tau value to decrease runtime.")
    sigma0 = tau0

    G.set_proxparam(tau0)
    F_star.set_proxparam(sigma0)
    solver.maxiter = 200
    solver.tol = 10**(-6)

    solver.solve()

    seg = np.reshape(solver.var['x'],
                     tuple(list(img.shape) + [len(classes)]),
                     order='C')

    a = seg
    result = []  #np.zeros(a.shape)
    #for i in range(a.shape[0]):  # SLOW
    #    for j in range(a.shape[1]):
    #            idx = np.argmin((a[i, j,  :]))
    #            result[i, j, idx] = 1
    tmp_result = np.argmin(a, axis=len(img.shape))

    for i, c in enumerate(classes):
        result.append((tmp_result == i).astype(int))

    result0 = sum([i * result[i] for i in range(len(classes))])

    result = np.array(result)

    return result0, result
Esempio n. 5
0
    def solve(self, f: np.ndarray):
        """
        Description of main primal-dual iteration.
        :return: None
        """

        (primal_n, primal_m) = self.image_size

        v = w = 0
        g = f.ravel()
        p = p_bar = np.zeros(primal_n*primal_m)
        q = q_bar = np.zeros(2*primal_n*primal_m)

        if self.reg_mode != 'tik':
            grad = pylops.Gradient(dims=(primal_n, primal_m), dtype='float64', edge=True, kind="backward")
        else:
            grad = pylops.Identity(np.prod(self.image_size))

        grad1 = pylops.BlockDiag([grad, grad])  # symmetric dxdy <-> dydx not necessary (expensive) but easy and functional

        proj_0 = IndicatorL2((primal_n, primal_m), upper_bound=self.alpha[0])
        proj_1 = IndicatorL2((2 * primal_n, primal_m), upper_bound=self.alpha[1])

        if not self.silent_mode:
            progress = progressbar.ProgressBar(max_value=self.max_iter)

        k = 0

        while (self.tol < self.sens or k == 0) and (k < self.max_iter):

            p_old = p
            q_old = q

            # Dual Update
            g = self.lam / (self.tau + self.lam) * (g + self.tau * (self.A*(p_bar ) - f)) #- self.alpha[0]*self.breg_p

            if self.reg_mode != 'tik':
                v = proj_0.prox(v + self.tau * (grad * p_bar - q_bar))
            else:
                v = self.alpha[0] / (self.tau + self.alpha[0]) * \
                    (v + self.tau * (grad*p_bar - self.data))


            if self.reg_mode == 'tgv':
                w = proj_1.prox(w + self.tau * grad1*q_bar)

            # Primal Update
            p = p - self.tau * (-self.alpha[0]*self.breg_p + self.A.H*g + grad.H * v)


            if self.reg_mode == 'tgv':
                q = q + self.tau * (v - grad1.H * w)

            # Extragradient Update

            p_bar = 2 * p - p_old
            q_bar = 2 * q - q_old


            if k % 50 == 0:
                p_gap = p - p_old
                self.sens = np.linalg.norm(p_gap)/np.linalg.norm(p_old)
                print(self.sens)

            if self.gamma:
                raise NotImplementedError("The adjustment of the step size in the "
                                          "Primal-Dual is not yet fully developed.")
                thetha  = 1 / np.sqrt(1 + 2*self.gamma * self.G.prox_param)
                self.G.prox_param = thetha * self.G.prox_param
                self.F_star.prox_param = self.F_star.prox_param / thetha

            k += 1
            if not self.silent_mode:
                progress.update(k)

        self.x = p

        if k <= self.max_iter:
            print(" Early stopping.")

        return self.x
Esempio n. 6
0
def multi_class_segmentation(img,
                             classes: list,
                             beta: float = 0.001,
                             tau: float = None):

    #f = np.zeros( tuple( list(img.shape) + [len(classes)]))
    raveld_f = np.zeros(((np.prod(img.shape), len(classes))))

    for i in range(len(classes)):
        #f[:, :, i] = (img.T - classes[i]) ** 2
        raveld_f[:, i] = (img.ravel() - classes[i])**2

    #f = np.ravel(f, order='C')
    f = raveld_f

    grad = pylops.Gradient(dims=img.shape, dtype='float64')
    # grad = FirstDerivative(262144, boundaries=boundaries)
    K = beta * grad
    # vd1 = convex_segmentation(u0, beta0, classes)

    G = DatatermLinear()
    G.set_proxdata(f)
    F_star = Projection(f.shape, len(img.shape))
    solver = PdHgm(K, F_star, G)

    solver.var['x'] = np.zeros((K.shape[1], len(classes)))
    solver.var['y'] = np.zeros((K.shape[0], len(classes)))

    if tau:
        tau0 = tau
    else:
        tau0 = 0.99 / normest(K)
        print(tau0)
    sigma0 = tau0

    G.set_proxparam(tau0)
    F_star.set_proxparam(sigma0)
    solver.maxiter = 200
    solver.tol = 10**(-6)

    # G.set_proxdata(f)
    solver.solve()

    seg = np.reshape(solver.var['x'],
                     tuple(list(img.shape) + [len(classes)]),
                     order='C')

    a = seg
    result = []  #np.zeros(a.shape)
    #for i in range(a.shape[0]):  # SLOW
    #    for j in range(a.shape[1]):
    #            idx = np.argmin((a[i, j,  :]))
    #            result[i, j, idx] = 1
    tmp_result = np.argmin(a, axis=len(img.shape))

    for i, c in enumerate(classes):
        result.append((tmp_result == i).astype(int))

    result0 = sum([i * result[i] for i in range(len(classes))])

    result = np.array(result)

    return result0, result