Example #1
0
class MaskedAffineFct(Functional):
    """ F(x) = sum(c[mask,:]*x[mask,:]) + \delta_{x[not(mask),:] == 0} """
    def __init__(self, mask, c, conj=None):
        Functional.__init__(self)
        self.x = Variable(c.shape)
        self.mask = mask.astype(bool)
        self.nmask = ~self.mask
        self.c = c
        if conj is None:
            from opymize.functionals import MaskedIndicatorFct
            self.conj = MaskedIndicatorFct(mask, c, conj=self)
        else:
            self.conj = conj
        scale = self.x.vars(self.x.new())[0]
        scale[self.mask, :] = 1.0
        self._prox = ShiftScaleOp(self.x.size, self.c.ravel(), scale.ravel(),
                                  -1)

    def __call__(self, x, grad=False):
        x = self.x.vars(x)[0]
        val = np.einsum('ik,ik->', x[self.mask, :], self.c[self.mask, :])
        infeas = 0.0 if np.all(self.mask) else norm(x[self.nmask, :],
                                                    ord=np.inf)
        result = (val, infeas)
        if grad:
            dF = self.x.new()
            dF[self.mask, :] = self.c[self.mask, :]
            result = (result, dF.ravel)
        return result

    def prox(self, tau):
        self._prox.b = -tau
        if hasattr(self._prox, 'gpuvars'):
            self._prox.gpuvars['b'][:] = np.atleast_1d(self._prox.b)
        return self._prox
Example #2
0
class SplitSum(Functional):
    """ F(x1,x2,...) = F1(x1) + F2(x2) + ... """
    def __init__(self, fcts, conj=None):
        Functional.__init__(self)
        self.x = Variable(*[(F.x.size, ) for F in fcts])
        self.fcts = fcts
        self.conj = SplitSum([F.conj for F in fcts],
                             conj=self) if conj is None else conj

    def __call__(self, x, grad=False):
        X = self.x.vars(x)
        results = [F(xi, grad=grad) for F, xi in zip(self.fcts, X)]
        if grad:
            val = sum([res[0][0] for res in results])
            infeas = sum([res[0][1] for res in results])
            dF = np.concatenate([res[1] for res in results])
            return (val, infeas), dF
        else:
            val = sum([res[0] for res in results])
            infeas = max([res[1] for res in results])
            return (val, infeas)

    def prox(self, tau):
        if type(tau) is np.ndarray:
            tau = self.x.vars(tau)
            prox_ops = []
            for F, Ftau in zip(self.fcts, tau):
                prox_ops.append(F.prox(Ftau))
            return SplitOp(prox_ops)
        else:
            return SplitOp([F.prox(tau) for F in self.fcts])
Example #3
0
class L12ProjJacobian(LinOp):
    """ Jacobian of L1NormsProj for Frobenius norm """
    def __init__(self, N, M, lbd):
        # xnorms[i] = 1.0/|xbar[i,:,:]|_2
        #  exterior = (xbar > lbd)
        LinOp.__init__(self)
        self.x = Variable((N, M[0] * M[1]))
        self.y = self.x
        self.lbd = lbd
        self.adjoint = self
        self.extind = np.zeros(N, dtype=bool)
        self.intind = np.zeros(N, dtype=bool)
        self.xbar_normed = self.x.vars(self.x.new())[0]
        self.lbd_norms = np.zeros(N)

    def update(self, xbar, exterior, xnorms):
        self.extind[:] = exterior
        self.intind[:] = ~self.extind

        self.xbar_normed[:] = xbar.reshape(self.xbar_normed.shape)
        self.xbar_normed[exterior, :] *= xnorms[exterior, None]

        self.lbd_norms[:] = self.lbd * xnorms

    def _call_cpu(self, x, y=None, add=False):
        x = self.x.vars(x)[0]
        if add or y is None:
            yy = self.y.vars(self.y.new())[0]
        else:
            yy = self.y.vars(y)[0]

        # xn[i,k] = xbar[i,k]/|xbar[i,:]|
        xn = self.xbar_normed

        # y[norms <= lbd] = x
        yy[self.intind, :] = x[self.intind, :]

        # y[norms > lbd] = lbd/|xbar|*(x - <xn,x>*xn)
        yy[self.extind, :] = xn[self.extind, :]
        yy[self.extind, :] *= -np.einsum('ik,ik->i', xn[self.extind, :],
                                         x[self.extind, :])[:, None]
        yy[self.extind, :] += x[self.extind, :]
        yy[self.extind, :] *= self.lbd_norms[self.extind, None]

        if y is None:
            if add: x += yy
            else: x[:] = yy
        elif add:
            y += yy
Example #4
0
class ConstrainFct(Functional):
    """ F(x) = 0 if x[mask,:]==c[mask,:] else infty
        The mask is only applied to the first component of x
    """
    def __init__(self, mask, c, conj=None):
        Functional.__init__(self)
        self.x = Variable(c.shape)
        self.mask = mask
        self.c = c
        if conj is None:
            from opymize.functionals import MaskedAffineFct
            self.conj = MaskedAffineFct(mask, c, conj=self)
        else:
            self.conj = conj
        self._prox = ConstrainOp(mask, c)

    def __call__(self, x, grad=False):
        x = self.x.vars(x)[0]
        val = 0
        infeas = norm(x[self.mask, :] - self.c[self.mask, :], ord=np.inf)
        result = (val, infeas)
        if grad:
            result = result, self.x.new()
        return result

    def prox(self, tau):
        # independent of tau
        return self._prox
Example #5
0
class L1NormsConj(Functional):
    """ F(x) = \sum_i \delta_{|x[i,:,:]| \leq lbd}
    Supported norms are 'frobenius' and 'spectral'
    """
    def __init__(self, N, M, lbd, matrixnorm="frobenius", conj=None):
        Functional.__init__(self)
        assert matrixnorm in ['frobenius', 'spectral']
        self.x = Variable((N,) + M)
        self.lbd = lbd
        self.matrixnorm = matrixnorm
        conjnorm = 'nuclear' if matrixnorm == 'spectral' else 'frobenius'
        self.conj = L1Norms(N, M, lbd, conjnorm, conj=self) if conj is None else conj
        self._prox = L1NormsProj(N, M, self.lbd, matrixnorm)
        self._xnorms = np.zeros((N,), order='C')

    def __call__(self, x, grad=False):
        x = self.x.vars(x)[0]
        norms(x, self._xnorms, self.matrixnorm)
        val = 0
        infeas = norm(np.fmax(0, self._xnorms - self.lbd), ord=np.inf)
        result = (val, infeas)
        if grad:
            result = result, self.x.new()
        return result

    def prox(self, tau):
        # independent of tau!
        return self._prox
Example #6
0
class L1Norms(Functional):
    """ F(x) = lbd * \sum_i |x[i,:,:]|
    Supported norms are 'frobenius' and 'nuclear'
    """
    def __init__(self, N, M, lbd, matrixnorm="frobenius", conj=None):
        Functional.__init__(self)
        assert matrixnorm in ['frobenius', 'nuclear']
        self.x = Variable((N,) + M)
        self.lbd = lbd
        self.matrixnorm = matrixnorm
        conjnorm = 'spectral' if matrixnorm == 'nuclear' else 'frobenius'
        self.conj = L1NormsConj(N, M, lbd, conjnorm, conj=self) if conj is None else conj
        self._xnorms = np.zeros((N,), order='C')

    def __call__(self, x, grad=False):
        x = self.x.vars(x)[0]
        norms(x, self._xnorms, self.matrixnorm)
        val = self.lbd*self._xnorms.sum()
        infeas = 0
        result = (val, infeas)
        if grad:
            assert self.matrixnorm == 'frobenius'
            """ dF = 0 if x == 0 else x/|x| """
            dF = x.copy()
            where0 = (self._xnorms == 0)
            wheren0 = ~where0
            dF[where0,:,:] = 0
            dF[wheren0,:,:] /= self._xnorms[wheren0]
            result = result, dF.ravel()
        return result
Example #7
0
class MaxFct(Functional):
    """ \sum_ik b[k]*max(0, x[i,k] - f[i,k]) """
    def __init__(self, data, vol=None, mask=None, conj=None):
        Functional.__init__(self)
        self.f = np.atleast_2d(data)
        self.x = Variable(self.f.shape)
        self.vol = np.ones(data.shape[1]) if vol is None else vol
        self.mask = np.ones(data.shape[0],
                            dtype=bool) if mask is None else mask
        if conj is None:
            self.conj = MaxFctConj(data, weights=vol, mask=mask, conj=self)
        else:
            self.conj = conj

    def __call__(self, x, grad=False):
        x = self.x.vars(x)[0]
        posval = np.fmax(0, (x - self.f)[self.mask, :])
        val = np.einsum('ik,k->', posval, self.vol)
        infeas = 0
        result = (val, infeas)
        if grad:
            df = np.zeros_like(x)
            posval[posval > 0] = 1.0
            df[self.mask, :] = np.einsum('ik,k->', posval, self.vol)
            return result, df
        else:
            return result
Example #8
0
class MaxFctConj(Functional):
    """ sum_i <x[i,:],f[i,:]> + \delta_{0 <= b[k]*x[i,k] <= 1} """
    def __init__(self, data, weights=None, mask=None, conj=None):
        Functional.__init__(self)
        self.f = np.atleast_2d(data)
        self.x = Variable(self.f.shape)
        self.weights = np.ones(data.shape[1]) if weights is None else weights
        self.mask = np.ones(data.shape[0],
                            dtype=bool) if mask is None else mask
        if conj is None:
            self.conj = MaxFct(data, vol=vol, mask=mask, conj=self)
        else:
            self.conj = conj
        self._prox = IntervProj(self.weights, self.f, mask=mask)

    def __call__(self, x, grad=False):
        x = self.x.vars(x)[0]
        val = np.einsum('ik,ik->', x[self.mask, :], self.f[self.mask, :])
        infeas = norm(np.fmin(0, x[self.mask, :]), ord=np.inf)
        bx = np.einsum('ik,k->ik', x[self.mask, :], self.weights)
        infeas += norm(np.fmin(0, 1.0 - bx), ord=np.inf)
        result = (val, infeas)
        if grad:
            df = np.zeros_like(x)
            df[self.mask, :] = self.f[self.mask, :]
            result = result, df
        return result

    def prox(self, tau):
        self._prox.a = tau
        return self._prox
Example #9
0
class HuberPerspective(Functional):
    """ \sum_i  -x[i,-1]*f(-x[i,:-1]/x[i,-1])  if x[i,-1] < 0
        \sum_i  lbd*|x[i,:-1]|                 if x[i,-1] == 0
                +inf                           if x[i,-1] > 0

        f(x) := |  lbd*(0.5/alph*|x|^2),   if |x| < alph,
                |  lbd*(|x| - alph/2),     if |x| > alph.
    """
    def __init__(self, N, M, lbd=1.0, alph=1.0, conj=None):
        Functional.__init__(self)
        assert lbd > 0
        assert alph > 0
        self.x = Variable((N, M + 1))
        self.lbd = lbd
        self.alph = alph
        if conj is None:
            dlbd, dalph = self.lbd, self.alph / self.lbd
            self.conj = TruncQuadEpiInd(N, M, lbd=dlbd, alph=dalph, conj=self)
        else:
            self.conj = conj

    def __call__(self, x, grad=False):
        assert not grad
        x = self.x.vars(x)[0]
        lbd, alph = self.lbd, self.alph
        x1, x2 = x[:, :-1], -x[:, -1]
        x1norm = np.linalg.norm(x1, axis=-1)
        qmsk = (x1norm < alph * x2)
        lmsk = (~qmsk) & (x2 > -1e-4)  # relaxed positivity condition
        val = 0.5 / alph * (x1norm[qmsk]**2 / x2[qmsk]).sum()
        val += (x1norm[lmsk] - x2[lmsk] * alph / 2).sum()
        val *= lbd
        infeas = np.linalg.norm(np.fmin(0, x2), ord=np.inf)
        return (val, infeas)
Example #10
0
class QuadEpiInd(Functional):
    """ \sum_i \delta_{f_i(x[i,:-1]) \leq x[i,-1]}
        f_i(x) := 0.5*a*|x|^2 + <b[i],x> + c[i]
     """
    def __init__(self, N, M, a=1.0, b=None, c=None, conj=None):
        Functional.__init__(self)
        assert a > 0
        self.x = Variable((N, M + 1))
        self.a = a
        self.b = np.zeros((N, M)) if b is None else b
        self.c = np.zeros((N, )) if c is None else c
        if conj is None:
            da, db, dc = quad_dual_coefficients(self.a, self.b, self.c)
            self.conj = QuadEpiSupp(N, M, a=da, b=db, c=dc, conj=self)
        else:
            self.conj = conj
        self._prox = QuadEpiProj(N, M, alph=a, b=b, c=c)

    def __call__(self, x, grad=False):
        assert not grad
        x = self.x.vars(x)[0]
        fx = (0.5 * self.a * x[:, :-1]**2 +
              self.b * x[:, :-1]).sum(axis=1) + self.c
        dif = fx - x[:, -1]
        val = 0
        infeas = np.linalg.norm(np.fmax(0, dif), ord=np.inf)
        result = (val, infeas)
        if grad:
            result = result, self.x.new()
        return result

    def prox(self, tau):
        # independent of tau!
        return self._prox
Example #11
0
class QuadEpiSupp(Functional):
    """ \sum_i -x[i,-1]*f_i(-x[i,:-1]/x[i,-1]) if x[i,-1] < 0
        and inf if x[i,-1] >= 0

        f_i(x) := 0.5*a*|x|^2 + <b[i],x> + c[i]
    """
    def __init__(self, N, M, a=1.0, b=None, c=None, conj=None):
        Functional.__init__(self)
        assert a > 0
        self.x = Variable((N, M + 1))
        self.a = a
        self.b = np.zeros((N, M)) if b is None else b
        self.c = np.zeros((N, )) if c is None else c
        if conj is None:
            da, db, dc = quad_dual_coefficients(self.a, self.b, self.c)
            self.conj = QuadEpiInd(N, M, a=da, b=db, c=dc, conj=self)
        else:
            self.conj = conj

    def __call__(self, x, grad=False):
        assert not grad
        a, b, c = self.a, self.b, self.c
        x = self.x.vars(x)[0]
        msk = x[:, -1] < -1e-8
        x1, x2 = x[msk, :-1], -x[msk, -1]
        val = (0.5 * a * x1**2 / x2[:, None] +
               b[msk] * x1).sum(axis=1) + x2 * c[msk]
        val = val.sum()
        if np.all(msk):
            infeas = 0
        else:
            infeas = np.linalg.norm(x[~msk, -1], ord=np.inf)
        return (val, infeas)
Example #12
0
class BndSSDConj(Functional):
    """ 0.5*<x,x>_b + sum[max(f1*x,f2*x)]
        same as 0.5*<x,x>_b + sum[(a + sign(x)*b)*x]
        where a = (f1+f2)/2 and b = (f2-f1)/2
    """
    def __init__(self, f1, f2, vol=None, mask=None, conj=None):
        Functional.__init__(self)
        self.x = Variable(f1.shape)
        self.a = 0.5 * (f1 + f2)
        self.b = 0.5 * (f2 - f1)
        self.vol = np.ones(f1.shape[1]) if vol is None else vol
        self.mask = np.ones(f1.shape[0], dtype=bool) if mask is None else mask
        if conj is None:
            cj_vol = 1.0 / self.vol
            self.conj = BndSSD(f1, f2, vol=cj_vol, mask=mask, conj=self)
        else:
            self.conj = conj

    def __call__(self, x, grad=False):
        assert not grad
        x = self.x.vars(x)[0]
        x_msk = x[self.mask, :]
        a_msk, b_msk = self.a[self.mask, :], self.b[self.mask, :]
        val = 0.5*np.einsum('ik,k->', x_msk**2, self.vol) \
            + np.einsum('ik,ik->', a_msk + np.sign(x_msk)*b_msk, x_msk)
        infeas = 0
        return (val, infeas)
Example #13
0
class SSD(Functional):
    """ 0.5*<x-f, x-f>_b + shift
        where b is the volume element
    """
    def __init__(self, data, vol=None, shift=0, mask=None, conj=None):
        Functional.__init__(self)
        self.x = Variable(data.shape)
        self.f = np.atleast_2d(data)
        self.shift = shift
        self.vol = np.ones(data.shape[1]) if vol is None else vol
        self.mask = np.ones(data.shape[0],
                            dtype=bool) if mask is None else mask
        if conj is None:
            cj_vol = 1.0 / self.vol
            cj_data = np.zeros_like(self.f)
            cj_data[self.mask, :] = np.einsum('ik,k->ik', self.f[self.mask, :],
                                              -self.vol)
            cj_shift = -0.5 * np.einsum('ik,k->', cj_data**2, cj_vol)
            cj_shift -= self.shift
            self.conj = SSD(cj_data,
                            shift=cj_shift,
                            vol=cj_vol,
                            mask=mask,
                            conj=self)
        else:
            self.conj = conj
        prox_shift = np.zeros_like(self.f)
        prox_shift[self.mask, :] = self.f[self.mask, :]
        prox_shift = prox_shift.ravel()
        self._prox = ShiftScaleOp(self.x.size, prox_shift, 0.5, 1.0)

    def __call__(self, x, grad=False):
        x = self.x.vars(x)[0]
        val = 0.5 * np.einsum('ik,k->',
                              (x - self.f)[self.mask, :]**2, self.vol)
        val += self.shift
        infeas = 0
        result = (val, infeas)
        if grad:
            df = np.zeros_like(x)
            df[self.mask, :] = np.einsum('ik,k->ik',
                                         (x - self.f)[self.mask, :], self.vol)
            return result, df.ravel()
        else:
            return result

    def prox(self, tau):
        msk = self.mask
        tauvol = np.zeros_like(self.f)
        tauvol[msk, :] = (tau * np.ones(self.f.size)).reshape(
            self.f.shape)[msk, :]
        tauvol[msk, :] = np.einsum('ik,k->ik', tauvol[msk, :], self.vol)
        self._prox.a = 1.0 / (1.0 + tauvol.ravel())
        self._prox.b = tauvol.ravel()
        if hasattr(self._prox, 'gpuvars'):
            self._prox.gpuvars['a'][:] = self._prox.a
            self._prox.gpuvars['b'][:] = self._prox.b
        return self._prox
Example #14
0
class ProxBndSSD(Operator):
    """ y = max(x + tau*f1, min(x + tau*f2, alpha*x))/alpha """
    def __init__(self, f1, f2, alpha=2.0, tau=1.0):
        Operator.__init__(self)
        self.x = Variable(f1.shape)
        self.y = Variable(f1.shape)
        self.f1 = f1
        self.f2 = f2
        self.alpha = alpha
        self.tau = tau

    def prepare_gpu(self, type_t="double"):
        np_dtype = np.float64 if type_t == "double" else np.float32
        taufact = "tau[i]*" if type(self.tau) is np.ndarray else "tau[0]*"
        self.gpuvars = {
            'f1': gpuarray.to_gpu(self.f1),
            'f2': gpuarray.to_gpu(self.f2),
            'alpha': gpuarray.to_gpu(self.alpha),
            'tau': gpuarray.to_gpu(np.asarray(self.tau, dtype=np_dtype))
        }
        headstr = "{0} *x, {0} *y, "
        headstr += "{0} *f1, {0} *f2, {0} *alpha, {0} *tau"
        self._kernel = ElementwiseKernel(headstr.format(type_t),
            ("y[i] = fmax(x[i] + {0}f1[i], "\
                    + "fmin(x[i] + {0}f2[i], "\
                        + "alpha[i]*x[i]))/alpha[i]").format(taufact))

    def _call_gpu(self, x, y=None, add=False, jacobian=False):
        assert not jacobian
        assert not add
        g = self.gpuvars
        y = x if y is None else y
        self._kernel(x, y, g['f1'], g['f2'], g['alpha'], g['tau'])

    def _call_cpu(self, x, y=None, add=False, jacobian=False):
        assert not jacobian
        assert not add
        x = self.x.vars(x)[0]
        y = self.y.vars(y)[0] if y is not None else x
        np.fmax(x + self.tau * self.f1,
                np.fmin(x + self.tau * self.f2, self.alpha * x),
                out=y)
        y /= self.alpha
Example #15
0
class IntervProj(Operator):
    """ y[i,k] = proj_[0,b[k]](x[i,k] - a[i,k]*shift[i,k]) """
    def __init__(self, b, shift, a=1, mask=None):
        Operator.__init__(self)
        assert b.size == shift.shape[1]
        self.x = Variable(shift.shape)
        self.y = Variable(shift.shape)
        self.b = b
        self.a = a
        self.shift = shift
        self.mask = np.ones(shift.shape[0],
                            dtype=bool) if mask is None else mask

    def prepare_gpu(self, type_t="double"):
        # don't multiply with a if a is 1 (not 1.0!)
        afact = "" if self.a is 1 else "a[0]*"
        if type(self.a) is np.ndarray:
            afact = "a[i]*"

        np_dtype = np.float64 if type_t == "double" else np.float32
        self.gpuvars = {
            'b': gpuarray.to_gpu(self.b),
            'shift': gpuarray.to_gpu(self.shift),
            'a': gpuarray.to_gpu(np.asarray(self.a, dtype=np_dtype))
        }
        headstr = "{0} *x, {0} *y, {0} *shift, {0} *a, {0} *b".format(type_t)
        self._kernel = ElementwiseKernel(headstr,
            "y[i] = fmax(0.0, fmin(b[i%{}], x[i] - {}shift[i]))"\
                .format(self.b.size, afact))

    def _call_gpu(self, x, y=None, add=False, jacobian=False):
        assert not jacobian
        assert not add
        g = self.gpuvars
        y = x if y is None else y
        self._kernel(x, y, g['shift'], g['a'], g['b'])

    def _call_cpu(self, x, y=None, add=False, jacobian=False):
        assert not jacobian
        assert not add
        x = self.x.vars(x)[0]
        y = self.y.vars(y)[0] if y is not None else x
        np.fmax(0.0, np.fmin(self.b[None, :], x - self.a * self.shift), out=y)
Example #16
0
class SemismoothNewtonSystem(LinOp):
    """ Block matrix of the following form:
        [[      I - K, tau*K*A^T ],
         [ -sigma*H*A,     I - H ]]
    """
    def __init__(self, A, tau, sigma):
        LinOp.__init__(self)
        self.A = A
        self.tau = tau
        self.sigma = sigma
        self.x = Variable((A.x.size,), (A.y.size,))
        self.y = self.x
        self.xtmp = self.x.new()
        self.K = None
        self.H = None
        self.adjoint = SemismoothNewtonSystemAdjoint(self)

    def _call_cpu(self, x, y=None, add=False):
        assert y is not None
        assert add is False

        x1tmp, x2tmp = self.x.vars(self.xtmp)
        x1, x2 = self.x.vars(x)
        y1, y2 = self.y.vars(y)

        self.A.adjoint(x2, x1tmp)
        self.K(x1tmp, y1)
        y1[:] = x1 + self.tau*y1
        self.K(x1, x1tmp)
        y1 -= x1tmp

        self.A(x1, x2tmp)
        self.H(x2tmp, y2)
        y2[:] = x2 - self.sigma*y2
        self.H(x2, x2tmp)
        y2 -= x2tmp
Example #17
0
class BndSSD(Functional):
    """ 0.5*|max(0, f1 - x)|^2 + 0.5*|max(0, x - f2)|^2 """
    def __init__(self, f1, f2, vol=None, mask=None, conj=None):
        Functional.__init__(self)
        self.x = Variable(f1.shape)
        self.f1 = f1
        self.f2 = f2
        self.vol = np.ones(f1.shape[1]) if vol is None else vol
        self.mask = np.ones(f1.shape[0], dtype=bool) if mask is None else mask
        if conj is None:
            cj_vol = 1.0 / self.vol
            self.conj = BndSSDConj(f1, f2, vol=cj_vol, mask=mask, conj=self)
        else:
            self.conj = conj
        f1_msk, f2_msk = [np.zeros_like(a) for a in [f1, f2]]
        f1_msk[self.mask, :] = self.vol[None, :] * f1[self.mask, :]
        f2_msk[self.mask, :] = self.vol[None, :] * f2[self.mask, :]
        self._prox = ProxBndSSD(f1_msk, f2_msk)

    def __call__(self, x, grad=False):
        assert not grad
        x = self.x.vars(x)[0]
        posval1 = np.fmax(0.0, (self.f1 - x)[self.mask, :])
        posval2 = np.fmax(0.0, (x - self.f2)[self.mask, :])
        val = 0.5*np.einsum('ik,k->', posval1**2, self.vol) \
            + 0.5*np.einsum('ik,k->', posval2**2, self.vol)
        infeas = 0
        return (val, infeas)

    def prox(self, tau):
        msk = self.mask
        tauvol = np.zeros_like(self.f1)
        tauvol[msk, :] = (tau * np.ones(self.f1.size)).reshape(
            self.f1.shape)[msk, :]
        tauvol[msk, :] = np.einsum('ik,k->ik', tauvol[msk, :], self.vol)
        tauvol += 1.0
        self._prox.alpha = tauvol
        self._prox.tau = tau
        return self._prox
Example #18
0
class ConstrainOp(Operator):
    """ y[mask,:] = const, else identity """
    def __init__(self, mask, const):
        Operator.__init__(self)
        self.x = Variable(const.shape)
        self.y = self.x
        self.mask = mask
        self.const = const
        scale = np.ones_like(const)
        scale[mask, :] = 0.0
        self._jacobian = ScaleOp(self.x.size, scale.ravel())

    def prepare_gpu(self, type_t="double"):
        self.const_gpu = gpuarray.to_gpu(self.const)
        self.mask_gpu = gpuarray.to_gpu(self.mask.astype(np.int8))
        N, M = self.x[0]['shape']
        headstr = "%s *x, %s *y, %s *c, char *mask" % ((type_t, ) * 3)
        self._kernel = ElementwiseKernel(
            headstr, "y[i] = (mask[i/{}]) ? c[i] : x[i]".format(M))
        self._kernel_add = ElementwiseKernel(
            headstr, "y[i] += (mask[i/{}]) ? c[i] : x[i]".format(M))
        self._jacobian.prepare_gpu(type_t=type_t)

    def _call_gpu(self, x, y=None, add=False, jacobian=False):
        y = x if y is None else y
        if add: self._kernel_add(x, y, self.const_gpu, self.mask_gpu)
        else: self._kernel(x, y, self.const_gpu, self.mask_gpu)
        if jacobian:
            return self._jacobian

    def _call_cpu(self, x, y=None, add=False, jacobian=False):
        if y is not None:
            y[:] = x
            x = y
        x = self.x.vars(x)[0]
        x[self.mask, :] = self.const[self.mask, :]
        if jacobian:
            return self._jacobian
Example #19
0
class TruncQuadEpiInd(Functional):
    """ \sum_i \delta_{|x| \leq lbd} + \delta_{f(x[i,:-1]) \leq x[i,-1]}
        f(x) := 0.5*alph*|x|^2
     """
    def __init__(self, N, M, lbd=1.0, alph=1.0, conj=None):
        Functional.__init__(self)
        assert lbd > 0
        assert alph > 0
        self.x = Variable((N, M + 1))
        self.lbd = lbd
        self.alph = alph
        if conj is None:
            dlbd, dalph = self.lbd, self.alph * self.lbd
            self.conj = HuberPerspective(N, M, lbd=dlbd, alph=dalph, conj=self)
        else:
            self.conj = conj
        self._prox = QuadEpiProj(N, M, lbd=self.lbd, alph=self.alph)

    def __call__(self, x, grad=False):
        assert not grad
        x = self.x.vars(x)[0]
        val = 0
        lbd, alph = self.lbd, self.alph
        x1norm = np.linalg.norm(x[:, :-1], axis=-1)
        dif = x1norm - self.lbd
        infeas = np.linalg.norm(np.fmax(0, dif), ord=np.inf)
        fx = 0.5 * alph * np.fmin(self.lbd, x1norm)**2
        dif = fx - x[:, -1]
        infeas += np.linalg.norm(np.fmax(0, dif), ord=np.inf)
        result = (val, infeas)
        if grad:
            result = result, self.x.new()
        return result

    def prox(self, tau):
        # independent of tau!
        return self._prox
Example #20
0
class PosSSDConj(Functional):
    """ 0.5*|max(0, x + f)|^2 - 0.5*|f|^2 """
    def __init__(self, data, vol=None, mask=None, conj=None):
        Functional.__init__(self)
        self.x = Variable(data.shape)
        self.f = np.atleast_2d(data)
        self.shift = -0.5*np.einsum('ik,k->', data**2, vol)
        self.vol = np.ones(data.shape[1]) if vol is None else vol
        self.mask = np.ones(data.shape[0], dtype=bool) if mask is None else mask
        if conj is None:
            cj_vol = 1.0/self.vol
            cj_data = np.zeros_like(self.f)
            cj_data[self.mask,:] = np.einsum('ik,k->ik', self.f[self.mask,:], self.vol)
            self.conj = PosSSD(cj_data, vol=cj_vol, mask=mask, conj=self)
        else:
            self.conj = conj

    def __call__(self, x, grad=False):
        assert not grad
        x = self.x.vars(x)[0]
        val = 0.5*np.einsum('ik,k->', np.fmax(0.0, (x + self.f)[self.mask,:])**2, self.vol)
        val += self.shift
        infeas = 0
        return (val, infeas)
Example #21
0
class SemismoothNewton(object):
    def __init__(self, g, f, A):
        self.g = g
        self.f = f
        self.linop = A
        self.xy = Variable((self.g.x.size,), (self.f.x.size,))
        self.itervars = { 'xyk': self.xy.new() }
        self.constvars = { 'tau': 1.0, 'sigma': 1.0 }

    def obj_primal(self, x, Ax):
        obj, infeas = self.g(x)
        obj2, infeas2 = self.f(Ax)
        return obj + obj2, infeas + infeas2

    def obj_dual(self, ATy, y):
        obj, infeas = self.g.conj(-ATy)
        obj2, infeas2 = self.f.conj(y)
        return -obj - obj2, infeas + infeas2

    def res(self, xy, xyres, subdiff=False, norm=True):
        c = self.constvars
        x, y = self.xy.vars(xy)
        xgrad, ygrad = self.xy.vars(xyres)

        # xgrad = x - Prox[tau*g](x - tau * A^T * y)
        self.linop.adjoint(y, xgrad)
        xgrad[:] =  x - c['tau']*xgrad
        K = self.gprox(xgrad, jacobian=subdiff)
        xgrad[:] = x - xgrad

        # ygrad = y - Prox[sigma*fc](y + sigma * A * x)
        self.linop(x, ygrad)
        ygrad[:] = y + c['sigma']*ygrad
        H = self.fconjprox(ygrad, jacobian=subdiff)
        ygrad[:] = y - ygrad

        if subdiff:
            self.M.K = K
            self.M.H = H

        if norm:
            return 0.5*np.einsum('i,i->', xyres, xyres)

    def iteration_step(self, _iter, use_scipy=False):
        i = self.itervars
        c = self.constvars

        # Set up Newton system
        res_normsq = self.res(i['xyk'], i['xyres'], subdiff=True)

        # Solve modified Newton system
        logging.info('Start linsolve')
        if use_scipy:
            scipy_solve(self.M, i['xyres'], i['dk'], lbd=self.lbd,
                        tmpvars=(i['cg_rk'], i['cg_rkp1'], i['xytmp'], i['cg_dk']),
                        tol=res_normsq, solver=scipy.sparse.linalg.lsqr)
        else:
            CG_solve(self.M, i['xyres'], i['dk'], lbd=self.lbd,
                     tmpvars=(i['cg_rk'], i['cg_rkp1'], i['xytmp'], i['cg_dk']),
                     tol=res_normsq)
        logging.info('Stop linsolve')

        # Armijo backtracking
        self.M(i['dk'], i['xytmp'])
        p_gradf = np.einsum('i,i->', i['xyres'], i['xytmp'])
        armijo_fun = lambda xy: self.res(xy, i['xyres'], subdiff=False)
        new_normsq, alpha = armijo(armijo_fun, i['xyk'], i['xykp1'], i['dk'], \
                                   p_gradf, res_normsq)

        # Update, taken from:
        #   "Distributed Newton Methods for Deep Neural Networks"
        #   by C.-C. Wang et al. (arxiv: https://arxiv.org/abs/1802.00130)
        Md_normsq = 0.5*np.einsum('i,i->', i['xytmp'], i['xytmp'])
        rho = (new_normsq - res_normsq)/(alpha*p_gradf + alpha**2*Md_normsq)
        if rho > 0.75:
            self.lbd *= self.lbd_drop
        elif rho < 0.25:
            self.lbd *= self.lbd_boost
        logging.debug("#{:6d}: alpha = {: 9.6g}, "\
                     "rho = {: 9.6g}, lbd = {: 9.6g}, normsq = {: 9.6g}"\
                     .format(_iter, alpha, rho, self.lbd, res_normsq))
        i['xyk'][:] = i['xykp1']

    def prepare_stepsizes(self):
        i = self.itervars
        c = self.constvars
        step_factor = 1.0

        logging.info("Estimating optimal step bound...")
        op_norm, itn = normest(self.linop)
        # round (floor) to 3 significant digits
        bnd = truncate(1.0/op_norm**2, 3) # < 1/|K|^2
        bnd *= 1.25 # boost
        fact = step_factor # tau/sigma
        c['sigma'] = np.sqrt(bnd/fact)
        c['tau'] = bnd/c['sigma']
        logging.info("Constant steps: %f (%f | %f)" % (bnd,c['sigma'],c['tau']))
        self.gprox = self.g.prox(c['tau'])
        self.fconjprox = self.f.conj.prox(c['sigma'])
        self.M = SemismoothNewtonSystem(self.linop, c['tau'], c['sigma'])
        self.lbd = 1.0
        self.lbd_drop = 0.1
        self.lbd_boost = 5.0

    def solve(self, continue_at=None, granularity=50,
                    term_relgap=1e-5, term_infeas=None, term_maxiter=int(5e2)):
        i = self.itervars
        c = self.constvars

        if continue_at is not None:
            i['xyk'][:] = continue_at

        i['xykp1'] = i['xyk'].copy()
        i['xyres'] = i['xyk'].copy()
        i['dk'] = i['xyk'].copy()
        i['xytmp'] = i['xyk'].copy()
        i['cg_dk'] = i['xyk'].copy()
        i['cg_rk'] = i['xyk'].copy()
        i['cg_rkp1'] = i['xyk'].copy()

        xk, yk = self.xy.vars(i['xyk'])
        xkp1, ykp1 = self.xy.vars(i['xykp1'])

        self.prepare_stepsizes()

        if term_infeas is None:
            term_infeas = term_relgap

        obj_p = obj_d = infeas_p = infeas_d = relgap = 0.

        logging.info("Solving (steps<%d)..." % term_maxiter)

        with GracefulInterruptHandler() as interrupt_hdl:
            _iter = 0
            while _iter < term_maxiter:
                self.iteration_step(_iter)
                _iter += 1

                if interrupt_hdl.interrupted or _iter % granularity == 0:
                    if interrupt_hdl.interrupted:
                        print("Interrupt (SIGINT) at iter=%d" % _iter)

                    self.linop(xk, ykp1)
                    self.linop.adjoint(yk, xkp1)
                    obj_p, infeas_p = self.obj_primal(xk, ykp1)
                    obj_d, infeas_d = self.obj_dual(xkp1, yk)

                    # compute relative primal-dual gap
                    relgap = (obj_p - obj_d) / max(np.spacing(1), obj_d)

                    logging.info("#{:6d}: objp = {: 9.6g} ({: 9.6g}), " \
                        "objd = {: 9.6g} ({: 9.6g}), " \
                        "gap = {: 9.6g}, " \
                        "relgap = {: 9.6g} ".format(
                        _iter, obj_p, infeas_p,
                        obj_d, infeas_d,
                        obj_p - obj_d,
                        relgap
                    ))

                    if np.abs(relgap) < term_relgap \
                       and max(infeas_p, infeas_d) < term_infeas:
                        break

                    if interrupt_hdl.interrupted:
                        break

        return {
            'objp': obj_p,
            'objd': obj_d,
            'infeasp': infeas_p,
            'infeasd': infeas_d,
            'relgap': relgap
        }

    @property
    def state(self):
        return self.xy.vars(self.itervars['xyk'])
Example #22
0
class Model(SublabelModel):
    name = "quadratic"

    def __init__(self,
                 *args,
                 lbd=5.0,
                 alph=np.inf,
                 fdscheme="centered",
                 **kwargs):
        SublabelModel.__init__(self, *args, **kwargs)
        self.lbd = lbd
        self.alph = alph
        self.fdscheme = fdscheme
        logging.info("Init model '%s' (lambda=%.2e, alpha=%.2e, fdscheme=%s)" \
                     % (self.name, self.lbd, self.alph, self.fdscheme))

        imagedims = self.data.imagedims
        N_image = self.data.N_image
        L_labels = self.data.L_labels
        M_tris = self.data.M_tris
        s_gamma = self.data.s_gamma
        d_image = self.data.d_image

        xvars = [('u', (N_image, L_labels)),
                 ('w12', (M_tris, N_image, s_gamma + 1)),
                 ('w', (M_tris, N_image, d_image, s_gamma))]
        yvars = [
            ('p', (N_image, d_image, L_labels)),
            ('q', (N_image, L_labels)),
            ('v12', (M_tris, N_image, s_gamma + 1)),
            ('v3', (N_image, )),
            ('g12', (M_tris, N_image, d_image * s_gamma + 1)),
        ]

        self.x = Variable(*xvars)
        self.y = Variable(*yvars)

    def setup_solver(self, *args):
        imagedims = self.data.imagedims
        N_image = self.data.N_image
        L_labels = self.data.L_labels
        d_image = self.data.d_image
        M_tris = self.data.M_tris
        s_gamma = self.data.s_gamma

        Id_w2 = np.zeros((s_gamma + 1, d_image * s_gamma + 1), order='C')
        Id_w2[-1, -1] = 1.0

        Adext = np.zeros((M_tris, d_image * s_gamma, d_image * s_gamma + 1),
                         order='C')
        Adext[:, :, :-1] = np.kron(np.eye(d_image), self.data.Ad)

        self.linblocks.update({
            'Grad':
            GradientOp(imagedims, L_labels, scheme=self.fdscheme),
            'PB':
            IndexedMultAdj(L_labels, d_image * N_image, self.data.P,
                           self.data.B),
            'Adext':
            MatrixMultRBatched(N_image, Adext),
            'Id_w2':
            MatrixMultR(M_tris * N_image, Id_w2),
        })
        SublabelModel.setup_solver(self, *args)

    def setup_solver_pdhg(self):
        x, y = self.x.vars(named=True), self.y.vars(named=True)
        imagedims = self.data.imagedims
        N_image = self.data.N_image
        L_labels = self.data.L_labels
        M_tris = self.data.M_tris
        s_gamma = self.data.s_gamma
        d_image = self.data.d_image

        PAbOp = self.linblocks['PAb']
        S_u_k = self.linblocks['S']
        GradOp = self.linblocks['Grad']
        PBLinOp = self.linblocks['PB']
        AdMult = self.linblocks['Adext']
        Id_w2 = self.linblocks['Id_w2']

        if self.alph < np.inf:
            etahat = HuberPerspective(M_tris * N_image,
                                      s_gamma * d_image,
                                      lbd=self.lbd,
                                      alph=self.alph)
        else:
            etahat = QuadEpiSupp(M_tris * N_image,
                                 s_gamma * d_image,
                                 a=self.lbd)

        Id_u = IdentityOp(x['u']['size'])
        Id_w12 = IdentityOp(x['w12']['size'])

        if self.data.constraints is not None:
            constrmask, constru = self.data.constraints
            constru_lifted = self.data.mfd.embed_barycentric(constru)[1]
            Gu = ConstrainFct(constrmask, constru_lifted)
        else:
            Gu = PositivityFct(x['u']['size'])

        self.pdhg_G = SplitSum([
            Gu,  # \delta_{u >= 0} or constraints
            ZeroFct(x['w12']['size']),  # 0
            ZeroFct(x['w']['size']),  # 0
        ])

        self.pdhg_F = SplitSum([
            IndicatorFct(y['p']['size']),  # \delta_{p = 0}
            IndicatorFct(y['q']['size']),  # \delta_{q = 0}
            self.epifct,  # \max_{v \in epi(rho*)} <v12,v>
            IndicatorFct(y['v3']['size'], c1=1),  # \delta_{v3^i = 1}
            etahat,  # 0.5*lbd*\sum_ji |g1[j,i]|^2/|g2[j,i]|
        ])

        self.pdhg_linop = BlockOp([
            [GradOp, 0, PBLinOp],  # p = Du - P'B'w
            [Id_u, PAbOp, 0],  # q = u - P'Ab'w12
            [0, Id_w12, 0],  # v12 = w12
            [S_u_k, 0, 0],  # v3^i = sum_k u[i,k]
            [0, Id_w2, AdMult],  # g12 = (Ad'w, w2)
        ])
Example #23
0
class QuadEpiProj(Operator):
    """ T(z)[i] = proj[epi(f_i)](z[i])

        f_i(x) := 0.5*alph*|x|^2 + <b[i],x> + c[i]
                = 0.5*alph*|x - shift1[i]|^2 + shift2[i]

        shift1 := -b/alph
        shift2 := -(0.5/alph*|b|^2 - c)

    Orthogonal projections onto a (translated) paraboloid which requires root
    finding for cubic polynomials.

    Optionally, the paraboloid is truncated at |x1 - shift1| < lbd.
    """
    def __init__(self, N, M, alph=1.0, shift=None, b=None, c=None, lbd=np.inf):
        Operator.__init__(self)
        assert lbd > 0
        assert alph > 0
        self.N, self.M = N, M
        self.x = Variable((self.N, self.M + 1))
        self.y = self.x
        self.lbd = lbd
        self.alph = alph
        self.shift = shift

        if not (b is None and c is None):
            assert self.shift is None
            self.b = np.zeros((N, M)) if b is None else b
            self.c = np.zeros((N, )) if c is None else c
            self.shift = np.zeros((N, M + 1))
            self.shift[:, :-1] = -self.b / self.alph
            self.shift[:, -1] = -(0.5 / self.alph *
                                  (self.b**2).sum(axis=-1) - self.c)

    def prepare_gpu(self, type_t="double"):
        constvars = {
            'QUAD_EPI_PROJ': 1,
            'alph': np.float64(self.alph),
            'N': self.N,
            'M': self.M,
            'TYPE_T': type_t,
        }
        if self.lbd < np.inf:
            constvars['lbd'] = np.float64(self.lbd)
            constvars['USE_LBD'] = 1
        if self.shift is not None:
            constvars['shift'] = self.shift
            constvars['USE_SHIFT'] = 1
        for f in ['sqrt', 'cbrt', 'acos', 'cos', 'fabs']:
            constvars[f.upper()] = f if type_t == "double" else (f + "f")
        files = [resource_stream('opymize.operators', 'proj.cu')]
        templates = [("quadepiproj", "P", (self.N, 1, 1), (200, 1, 1))]
        self._kernel = prepare_kernels(files, templates,
                                       constvars)['quadepiproj']

    def _call_gpu(self, x, y=None, add=False, jacobian=False):
        assert not add
        assert not jacobian
        if y is not None:
            y[:] = x.copy()
            x = y
        self._kernel(x)

    def _call_cpu(self, x, y=None, add=False, jacobian=False):
        assert not add
        assert not jacobian
        if y is None:
            y = x
        else:
            y[:] = x
        x, y = self.x.vars(x)[0], self.y.vars(y)[0]
        alph, shift, lbd = self.alph, self.shift, self.lbd
        if shift is not None: y -= shift
        xnorms = np.linalg.norm(y[:, :-1], axis=-1)
        msk_c0 = (xnorms == 0)

        if lbd < np.inf:
            msk_c1 = -xnorms / (
                lbd * alph) + 0.5 * alph * lbd**2 + 1 / alph > y[:, -1]
            msk_c2 = 0.5 * alph * lbd**2 > y[:, -1]

            msk = (xnorms > lbd) & (~msk_c2)
            y[msk, :-1] *= lbd / xnorms[msk, None]

            msk = msk_c2 & (~msk_c1)
            y[msk, :-1] *= lbd / xnorms[msk, None]
            y[msk, -1] = 0.5 * alph * lbd**2
        else:
            msk_c1 = np.ones(y.shape[:-1], dtype=bool)

        msk = msk_c1 & (~msk_c0) & (0.5 * alph * xnorms**2 > y[:, -1])
        a_2 = 2.0 / alph**2
        a = a_2 * (1 - y[msk, -1] * alph)
        b = -a_2 * xnorms[msk]
        ynorms = solve_reduced_monic_cubic(a, b)
        y[msk, :-1] *= (ynorms / xnorms[msk])[:, None]
        y[msk, -1] = 0.5 * alph * ynorms**2

        y[msk_c0 & (y[:, -1] < 0), -1] = 0.0

        if shift is not None: y += shift
Example #24
0
class Model(ModelHARDI):
    name = "n_w_tvw"

    def __init__(self, *args, dataterm="W1", gradnorm="frobenius", **kwargs):
        ModelHARDI.__init__(self, *args, **kwargs)
        self.gradnorm = 'nuclear' if gradnorm == "spectral" else "frobenius"

        c = self.constvars
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        m_gradients = c['m_gradients']
        s_manifold = c['s_manifold']

        xvars = [('u', (n_image, l_labels)),
                 ('w', (m_gradients, n_image, d_image, s_manifold))]
        yvars = [('p', (n_image, d_image, l_labels)),
                 ('g', (m_gradients, n_image, d_image, s_manifold)),
                 ('q', (n_image, ))]

        self.dataterm = dataterm
        if self.dataterm == "W1":
            xvars.append(('w0', (m_gradients, n_image, s_manifold)))
            yvars.append(('p0', (n_image, l_labels)))
            yvars.append(('g0', (m_gradients, n_image, s_manifold)))
        elif self.dataterm != "quadratic":
            raise Exception("Dataterm unknown: %s" % self.dataterm)

        self.x = Variable(*xvars)
        self.y = Variable(*yvars)

        # start with a uniform distribution in each voxel
        self.state = (self.x.new(), self.y.new())
        x = self.x.vars(self.state[0], named=True)
        x['u'][:] = 1.0 / np.einsum('k->', c['b'])

        f = self.data.odf
        f_flat = f.reshape(-1, l_labels).T
        f = np.array(f_flat.reshape((l_labels, ) + imagedims), order='C')
        normalize_odf(f, c['b'])
        self.f = np.array(f.reshape(l_labels, -1).T, order='C')

        logging.info("HARDI setup ({l_labels} labels; " \
                     "img: {imagedims}; lambda={lbd:.3g}) ready.".format(
                         lbd=c['lbd'],
                         l_labels=c['l_labels'],
                         imagedims="x".join(map(str,c['imagedims']))))

    def setup_solver_pdhg(self):
        x, y = self.x.vars(named=True), self.y.vars(named=True)
        c = self.constvars
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        m_gradients = c['m_gradients']
        s_manifold = c['s_manifold']

        GradOp = GradientOp(imagedims, l_labels, weights=c['b'])

        PBLinOp = IndexedMultAdj(l_labels, d_image * n_image, c['P'], c['B'])
        AMult = MatrixMultRBatched(n_image * d_image, c['A'])

        bMult = MatrixMultR(n_image, c['b_precond'] * c['b'][:, None])

        l1norms = L1Norms(m_gradients * n_image, (d_image, s_manifold),
                          c['lbd'], self.gradnorm)

        if self.dataterm == "W1":
            self.pdhg_G = SplitSum([
                PositivityFct(x['u']['size']),  # \delta_{u >= 0}
                ZeroFct(x['w']['size']),  # 0
                ZeroFct(x['w0']['size']),  # 0
            ])

            PBLinOp0 = IndexedMultAdj(l_labels, n_image, c['P'], c['B'])
            AMult0 = MatrixMultRBatched(n_image, c['A'])
            bMult0 = DiagMatrixMultR(n_image, c['b'])

            self.pdhg_linop = BlockOp([
                [GradOp, PBLinOp, 0],  # p = diag(b)Du - P'B'w
                [0, AMult, 0],  # g = A'w
                [bMult, 0, 0],  # q = <b,u>
                [bMult0, 0, PBLinOp0],  # p0 = diag(b) u - P'B'w0
                [0, 0, AMult0]  # g0 = A'w0
            ])

            diag_b_f = np.einsum('ik,k->ik', self.f, c['b'])
            dataterm = ConstrainFct(c['inpaint_nloc'], diag_b_f)

            l1norms0 = L1Norms(m_gradients * n_image, (1, s_manifold), 1.0,
                               "frobenius")

            self.pdhg_F = SplitSum([
                IndicatorFct(y['p']['size']),  # \delta_{p = 0}
                l1norms,  # lbd*\sum_ji |g[j,i,:,:]|_nuc
                IndicatorFct(y['q']['size'],
                             c1=c['b_precond']),  # \delta_{q = 1}
                dataterm,  # \delta_{p0 = diag(b)f}
                l1norms0,  # \sum_ji |g0[j,i,:]|_2
            ])
        elif self.dataterm == "quadratic":
            dataterm = PosSSD(self.f, vol=c['b'], mask=c['inpaint_nloc'])
            self.pdhg_G = SplitSum([
                dataterm,  # 0.5*<u-f,u-f>_b + \delta_{u >= 0}
                ZeroFct(x['w']['size']),  # 0
            ])

            self.pdhg_linop = BlockOp([
                [GradOp, PBLinOp],  # p = diag(b)Du - P'B'w
                [0, AMult],  # g = A'w
                [bMult, 0],  # q = <b,u>
            ])

            self.pdhg_F = SplitSum([
                IndicatorFct(y['p']['size']),  # \delta_{p = 0}
                l1norms,  # lbd*\sum_ji |g[j,i,:,:]|_nuc
                IndicatorFct(y['q']['size'],
                             c1=c['b_precond']),  # \delta_{q = 1}
            ])
Example #25
0
class Model(ModelHARDI_SHM):
    name = "sh_l_tvw2"

    def __init__(self, *args, gradnorm="frobenius", **kwargs):
        ModelHARDI_SHM.__init__(self, *args, **kwargs)
        self.gradnorm = 'nuclear' if gradnorm == "spectral" else "frobenius"

        c = self.constvars
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        m_gradients = c['m_gradients']
        s_manifold = c['s_manifold']
        l_shm = c['l_shm']

        self.x = Variable(
            ('u1', (n_image, l_labels)),
            ('u2', (n_image, l_labels)),
            ('v', (n_image, l_shm)),
            ('w', (m_gradients, n_image, d_image, s_manifold)),
        )

        self.y = Variable(
            ('p', (n_image, d_image, l_shm)),
            ('g', (m_gradients, n_image, d_image, s_manifold)),
            ('q0', (n_image, )),
            ('q1', (n_image, l_labels)),
            ('q2', (n_image, l_labels)),
        )

        # start with a uniform distribution in each voxel
        self.state = (self.x.new(), self.y.new())
        u1k, u2k, vk, wk = self.x.vars(self.state[0])
        u1k[:] = 1.0 / np.einsum('k->', c['b'])
        vk[:, 0] = .5 / np.sqrt(np.pi)

        c['G'] = np.zeros((m_gradients, s_manifold, l_shm), order='C')
        c['G'][:] = sym_shm_sample_grad(c['Y'], self.data.b_sph.v_grad)

    def setup_solver_pdhg(self):
        x, y = self.x.vars(named=True), self.y.vars(named=True)
        c = self.constvars
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        m_gradients = c['m_gradients']
        s_manifold = c['s_manifold']
        l_shm = c['l_shm']

        dataterm = SSD(c['f'], vol=c['b'], mask=c['inpaint_nloc'])

        self.pdhg_G = SplitSum([
            PositivityFct(x['u1']['size']),  # \delta_{u1 >= 0}
            dataterm,  # 0.5*<u2-f,u2-f>
            ZeroFct(x['v']['size']),  # 0
            ZeroFct(x['w']['size'])  # 0
        ])

        GradOp = GradientOp(imagedims, l_shm)

        GMult = TangledMatrixMultR(n_image * d_image, c['G'][:, :, None, :])

        bMult = MatrixMultR(n_image, c['b_precond'] * c['b'][:, None])
        YMult = MatrixMultR(n_image, c['Y'], trans=True)
        YMMult = MatrixMultR(n_image, c['YM'], trans=True)

        m_u = ScaleOp(x['u1']['size'], -1)
        m_w = ScaleOp(x['w']['size'], -1)

        self.pdhg_linop = BlockOp([
            [0, 0, GradOp, GMult],  # p = Dv + G'w
            [0, 0, 0, m_w],  # g = -w
            [bMult, 0, 0, 0],  # q0 = <b,u1>
            [m_u, 0, YMult, 0],  # q1 = Yv - u1
            [0, m_u, YMMult, 0]  # q2 = YMv - u2
        ])

        l1norms = L1Norms(m_gradients * n_image, (d_image, s_manifold),
                          c['lbd'], self.gradnorm)

        self.pdhg_F = SplitSum([
            IndicatorFct(y['p']['size']),  # \delta_{p = 0}
            l1norms,  # lbd*\sum_ji |g[j,i,:,:]|_nuc
            IndicatorFct(y['q0']['size'],
                         c1=c['b_precond']),  # \delta_{q0 = 1}
            IndicatorFct(x['u1']['size']),  # \delta_{q1 = 0}
            IndicatorFct(x['u1']['size'])  # \delta_{q2 = 0}
        ])

    def setup_solver_cvx(self):
        c = self.constvars
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        m_gradients = c['m_gradients']
        s_manifold = c['s_manifold']
        l_shm = c['l_shm']

        self.cvx_x = Variable(
            ('p', (l_shm, d_image, n_image)),
            ('g', (n_image, m_gradients, s_manifold, d_image)),
            ('q0', (n_image, )),
            ('q1', (l_labels, n_image)),
            ('q2', (l_labels, n_image)),
        )

        self.cvx_y = Variable(
            ('u1', (n_image, l_labels)),
            ('v', (n_image, l_shm)),
            ('w', (m_gradients, n_image, d_image, s_manifold)),
            ('misc', (n_image * m_gradients, )),
        )

        p, g, q0, q1, q2 = [
            cvxVariable(*a['shape']) for a in self.cvx_x.vars()
        ]
        self.cvx_vars = p + sum(g, []) + [q0, q1, q2]

        self.cvx_obj = cvx.Maximize(
            0.5 * cvx.sum(cvx.diag(c['b']) * cvx.square(c['f'].T)) -
            0.5 * cvx.sum(
                cvx.diag(1.0 / c['b']) *
                cvx.square(q2 + cvx.diag(c['b']) * c['f'].T)) - cvx.sum(q0))

        div_op = sparse_div_op(imagedims)

        self.cvx_dual = True
        self.cvx_constr = []

        # u1_constr
        for i in range(n_image):
            self.cvx_constr.append(c['b'] * q0[i] - q1[:, i] >= 0)

        # v_constr
        for i in range(n_image):
            for k in range(l_shm):
                Yk = cvx.vec(c['Y'][:, k])
                self.cvx_constr.append(
                    Yk.T*(c['M'][k]*q2[:,i] + q1[:,i]) \
                        - cvxOp(div_op, p[k], i) == 0)

        # w_constr
        for j in range(m_gradients):
            Gj = c['G'][j, :, :]
            for i in range(n_image):
                for t in range(d_image):
                    for l in range(s_manifold):
                        self.cvx_constr.append(
                            g[i][j][l,t] == sum([Gj[l,k]*p[k][t,i] \
                                                 for k in range(l_shm)]))

        # additional inequality constraints
        for i in range(n_image):
            for j in range(m_gradients):
                self.cvx_constr.append(cvx.norm(g[i][j], 2) <= c['lbd'])
Example #26
0
class Model(SublabelModel):
    name = "rof"

    def __init__(self, *args, lbd=1.0, regularizer="tv", alph=np.inf,
                              fdscheme="centered", **kwargs):
        SublabelModel.__init__(self, *args, **kwargs)
        self.lbd = lbd
        self.regularizer = regularizer
        self.alph = alph
        self.fdscheme = fdscheme
        logging.info("Init model '%s' (%s regularizer, lambda=%.2e, "
                                      "alpha=%.2e, fdscheme=%s)" \
                     % (self.name, self.regularizer, self.lbd,
                        self.alph, self.fdscheme))

        imagedims = self.data.imagedims
        N_image = self.data.N_image
        L_labels = self.data.L_labels
        M_tris = self.data.M_tris
        s_gamma = self.data.s_gamma
        d_image = self.data.d_image

        xvars = [('u', (N_image, L_labels)),
                 ('w12', (M_tris, N_image, s_gamma+1)),
                 ('w', (M_tris, N_image, d_image, s_gamma))]
        yvars = [('p', (N_image, d_image, L_labels)),
                 ('q', (N_image, L_labels)),
                 ('v12a', (M_tris, N_image, s_gamma+1)),
                 ('v12b', (M_tris, N_image, s_gamma+1)),
                 ('v3', (N_image,)),]

        if self.regularizer == "tv":
            yvars.append(('g', (M_tris, N_image, d_image, s_gamma)))
        elif self.regularizer == "quadratic":
            yvars.append(('g12', (M_tris, N_image, d_image*s_gamma+1)))

        self.x = Variable(*xvars)
        self.y = Variable(*yvars)

    def setup_solver(self, *args):
        imagedims = self.data.imagedims
        N_image = self.data.N_image
        L_labels = self.data.L_labels
        M_tris = self.data.M_tris
        s_gamma = self.data.s_gamma
        d_image = self.data.d_image

        Id_w2 = np.zeros((s_gamma+1,d_image*s_gamma+1), order='C')
        Id_w2[-1,-1] = 1.0

        Adext = np.zeros((M_tris,s_gamma,d_image*s_gamma+1), order='C')
        Adext[:,:,:-1] = np.tile(self.data.Ad, (1,1,d_image))

        # Ab (M_tris, s_gamma+1, s_gamma+1)
        Ab_mats = -np.ones((M_tris, s_gamma+1, s_gamma+1),
                            dtype=np.float64, order='C')
        Ab_mats[:,:,0:-1] = self.data.T[self.data.P]
        Ab_mats[:] = np.linalg.inv(Ab_mats)

        self.linblocks.update({
            'PAbTri': IndexedMultAdj(L_labels, N_image, self.data.P, Ab_mats),
            'Grad': GradientOp(imagedims, L_labels, scheme=self.fdscheme),
            'PB': IndexedMultAdj(L_labels, d_image*N_image, self.data.P, self.data.B),
            'Ad': MatrixMultRBatched(N_image*d_image, self.data.Ad),
            'Adext': MatrixMultRBatched(N_image, Adext),
            'Id_w2': MatrixMultR(M_tris*N_image, Id_w2),
        })
        SublabelModel.setup_solver(self, *args)

    def setup_solver_pdhg(self):
        x, y = self.x.vars(named=True), self.y.vars(named=True)
        imagedims = self.data.imagedims
        N_image = self.data.N_image
        L_labels = self.data.L_labels
        M_tris = self.data.M_tris
        s_gamma = self.data.s_gamma
        d_image = self.data.d_image

        PAbOp = self.linblocks['PAbTri']
        S_u_k = self.linblocks['S']
        GradOp = self.linblocks['Grad']
        PBLinOp = self.linblocks['PB']
        AdMult = self.linblocks['Ad']

        shift = np.tile(self.data.data_b, (M_tris,1,1)).reshape((-1, s_gamma))
        c = 0.5*(shift**2).sum(axis=-1)
        epifct1 = QuadEpiSupp(M_tris*N_image, s_gamma, b=-shift, c=c)
        epifct2 = EpigraphSupp(np.ones((N_image, L_labels), dtype=bool),
            [[np.arange(s_gamma+1)[None]]*M_tris]*N_image,
            self.data.P, self.data.T, np.zeros((N_image, L_labels)))

        Id_u = IdentityOp(x['u']['size'])
        Id_w12 = IdentityOp(x['w12']['size'])

        if self.data.constraints is not None:
            constrmask, constru = self.data.constraints
            constru_lifted = self.data.mfd.embed_barycentric(constru)[1]
            Gu = ConstrainFct(constrmask, constru_lifted)
        else:
            Gu = PositivityFct(x['u']['size'])

        self.pdhg_G = SplitSum([
            Gu,                         # \delta_{u >= 0} or constraints
            ZeroFct(x['w12']['size']),  # 0
            ZeroFct(x['w']['size']),    # 0
        ])

        F_summands = [
            IndicatorFct(y['p']['size']),        # \delta_{p = 0}
            IndicatorFct(y['q']['size']),        # \delta_{q = 0}
            epifct1,                             # -0.5*v2a*|v1a/v2a + b|^2
            epifct2,                             # \max_{v \in Delta} <v12b,v>
            IndicatorFct(y['v3']['size'], c1=1), # \delta_{v3^i = 1}
        ]

        op_blocks = [
            [GradOp,       0, PBLinOp], # p = Du - P'B'w
            [  Id_u,   PAbOp,       0], # q = u - P'Ab'w12
            [     0,  Id_w12,       0], # v12a = w12
            [     0,  Id_w12,       0], # v12b = w12
            [ S_u_k,       0,       0], # v3^i = sum_k u[i,k]
        ]

        if self.regularizer == "tv":
            l1norms = L1Norms(M_tris*N_image, (d_image, s_gamma), self.lbd, "nuclear")
            F_summands.append(l1norms) # lbd*\sum_ji |g[j,i,:,:]|_nuc
            op_blocks.append([     0,       0,  AdMult]) # g = A'w
        elif self.regularizer == "quadratic":
            if self.alph < np.inf:
                etahat = HuberPerspective(M_tris*N_image, s_gamma*d_image,
                                          lbd=self.lbd, alph=self.alph)
            else:
                etahat = QuadEpiSupp(M_tris*N_image, s_gamma*d_image, a=self.lbd)
            F_summands.append(etahat) # 0.5*lbd*\sum_ji |g1[j,i]|^2/|g2[j,i]|
            AdMult = self.linblocks['Adext']
            Id_w2 = self.linblocks['Id_w2']
            op_blocks.append([     0,   Id_w2,   AdMult]) # g12 = (Ad'w, w2)
        self.pdhg_F = SplitSum(F_summands)
        self.pdhg_linop = BlockOp(op_blocks)
Example #27
0
class Model(ModelHARDI_SHM):
    name = "sh_bndl2_tvw"

    def __init__(self, *args, gradnorm="frobenius", conf_lvl=0.9, **kwargs):
        ModelHARDI_SHM.__init__(self, *args, **kwargs)
        self.gradnorm = 'nuclear' if gradnorm == "spectral" else "frobenius"

        c = self.constvars
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        m_gradients = c['m_gradients']
        s_manifold = c['s_manifold']
        l_shm = c['l_shm']

        self.data.init_bounds(conf_lvl)
        _, f1, f2 = self.data.bounds
        c['f1'], c['f2'] = [np.array(a.T, order='C') for a in [f1, f2]]

        self.x = Variable(
            ('u1', (n_image, l_labels)),
            ('u2', (n_image, l_labels)),
            ('v', (n_image, l_shm)),
            ('w', (m_gradients, n_image, d_image, s_manifold)),
        )

        self.y = Variable(
            ('p', (n_image, d_image, l_labels)),
            ('g', (m_gradients, n_image, d_image, s_manifold)),
            ('q0', (n_image, )),
            ('q1', (n_image, l_labels)),
            ('q2', (n_image, l_labels)),
        )

        # start with a uniform distribution in each voxel
        self.state = (self.x.new(), self.y.new())
        u1k, u2k, vk, wk = self.x.vars(self.state[0])
        u1k[:] = 1.0 / np.einsum('k->', c['b'])
        vk[:, 0] = .5 / np.sqrt(np.pi)

    def setup_solver_pdhg(self):
        x, y = self.x.vars(named=True), self.y.vars(named=True)
        c = self.constvars
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        m_gradients = c['m_gradients']
        s_manifold = c['s_manifold']
        l_shm = c['l_shm']

        dataterm = BndSSD(c['f1'], c['f2'], vol=c['b'], mask=c['inpaint_nloc'])

        self.pdhg_G = SplitSum([
            PositivityFct(x['u1']['size']),  # \delta_{u1 >= 0}
            dataterm,  # 0.5*|max(0, f1 - u2)|^2 + 0.5*|max(0, u2 - f2)|^2
            ZeroFct(x['v']['size']),  # 0
            ZeroFct(x['w']['size'])  # 0
        ])

        GradOp = GradientOp(imagedims, l_labels, weights=c['b'])

        PBLinOp = IndexedMultAdj(l_labels, d_image * n_image, c['P'], c['B'])
        AMult = MatrixMultRBatched(n_image * d_image, c['A'])

        bMult = MatrixMultR(n_image, c['b_precond'] * c['b'][:, None])
        YMult = MatrixMultR(n_image, c['Y'], trans=True)
        YMMult = MatrixMultR(n_image, c['YM'], trans=True)

        m_u = ScaleOp(x['u1']['size'], -1)

        self.pdhg_linop = BlockOp([
            [GradOp, 0, 0, PBLinOp],  # p = diag(b)Du1 - P'B'w
            [0, 0, 0, AMult],  # g = A'w
            [bMult, 0, 0, 0],  # q0 = <b,u1>
            [m_u, 0, YMult, 0],  # q1 = Yv - u1
            [0, m_u, YMMult, 0]  # q2 = YMv - u2
        ])

        l1norms = L1Norms(m_gradients * n_image, (d_image, s_manifold),
                          c['lbd'], self.gradnorm)

        self.pdhg_F = SplitSum([
            IndicatorFct(y['p']['size']),  # \delta_{p = 0}
            l1norms,  # lbd*\sum_ji |g[j,i,:,:]|_nuc
            IndicatorFct(y['q0']['size'],
                         c1=c['b_precond']),  # \delta_{q0 = 1}
            IndicatorFct(x['u1']['size']),  # \delta_{q1 = 0}
            IndicatorFct(x['u1']['size'])  # \delta_{q2 = 0}
        ])

    def setup_solver_cvx(self):
        c = self.constvars
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        m_gradients = c['m_gradients']
        s_manifold = c['s_manifold']
        l_shm = c['l_shm']

        self.cvx_x = Variable(
            ('p', (l_labels, d_image, n_image)),
            ('g', (n_image, m_gradients, s_manifold, d_image)),
            ('q0', (n_image, )),
            ('q1', (l_labels, n_image)),
            ('q2', (l_labels, n_image)),
        )

        self.cvx_y = Variable(
            ('u1', (n_image, l_labels)),
            ('v', (n_image, l_shm)),
            ('w', (m_gradients, n_image, d_image, s_manifold)),
            ('misc', (n_image * m_gradients, )),
        )

        p, g, q0, q1, q2 = [
            cvxVariable(*a['shape']) for a in self.cvx_x.vars()
        ]
        self.cvx_vars = p + sum(g, []) + [q0, q1, q2]

        fid_fun_dual = 0
        for i in range(n_image):
            for k in range(l_labels):
                fid_fun_dual += -cvx.power(q2[k,i],2)/2 \
                             - cvx.maximum(q2[k,i]*c['f1'][i,k],
                                 q2[k,i]*c['f2'][i,k])

        self.cvx_obj = cvx.Maximize(fid_fun_dual - cvx.sum(q0))

        div_op = sparse_div_op(imagedims)

        self.cvx_dual = True
        self.cvx_constr = []

        # u1_constr
        for i in range(n_image):
            for k in range(l_labels):
                self.cvx_constr.append(
                    c['b'][k] *
                    (q0[i] - cvxOp(div_op, p[k], i)) - q1[k, i] >= 0)

        # v_constr
        for i in range(n_image):
            for k in range(l_shm):
                Yk = cvx.vec(c['Y'][:, k])
                self.cvx_constr.append(Yk.T *
                                       (c['M'][k] * q2[:, i] + q1[:, i]) == 0)

        # w_constr
        for j in range(m_gradients):
            Aj = c['A'][j, :, :]
            Bj = c['B'][j, :, :]
            Pj = c['P'][j, :]
            for i in range(n_image):
                for t in range(d_image):
                    for l in range(s_manifold):
                        self.cvx_constr.append(Aj * g[i][j][l, t] == sum(
                            [Bj[l, m] * p[Pj[m]][t, i] for m in range(3)]))

        # additional inequality constraints
        for i in range(n_image):
            for j in range(m_gradients):
                self.cvx_constr.append(cvx.norm(g[i][j], 2) <= c['lbd'])
Example #28
0
class Model(SublabelModel):
    name = "tv"

    def __init__(self, *args, lbd=1.0, fdscheme="centered", **kwargs):
        SublabelModel.__init__(self, *args, **kwargs)
        self.lbd = lbd
        self.fdscheme = fdscheme
        logging.info("Init model '%s' (lambda=%.2e,fdscheme=%s)" \
                     % (self.name, self.lbd, self.fdscheme))

        imagedims = self.data.imagedims
        N_image = self.data.N_image
        L_labels = self.data.L_labels
        M_tris = self.data.M_tris
        s_gamma = self.data.s_gamma
        d_image = self.data.d_image

        xvars = [('u', (N_image, L_labels)),
                 ('w12', (M_tris, N_image, s_gamma + 1)),
                 ('w', (M_tris, N_image, d_image, s_gamma))]
        yvars = [('p', (N_image, d_image, L_labels)),
                 ('q', (N_image, L_labels)),
                 ('v12', (M_tris, N_image, s_gamma + 1)), ('v3', (N_image, )),
                 ('g', (M_tris, N_image, d_image, s_gamma))]

        if self.data.R.shape[-1] == s_gamma + 1:
            del xvars[1]
            del yvars[2]

        self.x = Variable(*xvars)
        self.y = Variable(*yvars)

    def setup_solver(self, *args):
        imagedims = self.data.imagedims
        N_image = self.data.N_image
        L_labels = self.data.L_labels
        d_image = self.data.d_image
        self.linblocks.update({
            'Grad':
            GradientOp(imagedims, L_labels, scheme=self.fdscheme),
            'PB':
            IndexedMultAdj(L_labels, d_image * N_image, self.data.P,
                           self.data.B),
            'Ad':
            MatrixMultRBatched(N_image * d_image, self.data.Ad),
        })
        SublabelModel.setup_solver(self, *args)

    def setup_dataterm_blocks(self):
        if hasattr(self, 'epifct') or hasattr(self, 'rho'):
            return
        N_image = self.data.N_image
        L_labels = self.data.L_labels
        M_tris = self.data.M_tris
        s_gamma = self.data.s_gamma
        P = self.data.P

        if self.data.R.shape[-1] == s_gamma + 1:
            logging.info("Setup for model without sublabels...")
            R = self.data.R.reshape(M_tris, N_image, s_gamma + 1)
            self.rho = np.zeros((N_image, L_labels), order='C')
            for j in range(M_tris):
                for m in range(s_gamma + 1):
                    self.rho[:, P[j, m]] = R[j, :, m]
            self.rho = self.rho.ravel()
        else:
            logging.info("Setup for sublabel-accurate model...")
            self.epifct = EpigraphSupp(self.data.Rbase, self.data.Rfaces,
                                       self.data.Qbary, self.data.Sbary,
                                       self.data.R)

            # Ab (M_tris, s_gamma+1, s_gamma+1)
            Ab = np.zeros((M_tris, s_gamma + 1, s_gamma + 1),
                          dtype=np.float64,
                          order='C')
            Ab[:] = np.eye(s_gamma + 1)[None]
            Ab[..., -1] = -1
            self.linblocks['PAb'] = IndexedMultAdj(L_labels, N_image, P, Ab)

        self.linblocks.update({
            'S':
            MatrixMultR(N_image, np.ones((L_labels, 1), order='C')),
        })

    def setup_solver_pdhg(self):
        x, y = self.x.vars(named=True), self.y.vars(named=True)
        imagedims = self.data.imagedims
        N_image = self.data.N_image
        L_labels = self.data.L_labels
        M_tris = self.data.M_tris
        s_gamma = self.data.s_gamma
        d_image = self.data.d_image

        S_u_k = self.linblocks['S']
        GradOp = self.linblocks['Grad']
        PBLinOp = self.linblocks['PB']
        AdMult = self.linblocks['Ad']

        l1norms = L1Norms(M_tris * N_image, (d_image, s_gamma), self.lbd,
                          "nuclear")

        Id_u = IdentityOp(x['u']['size'])

        if self.data.constraints is not None:
            constrmask, constru = self.data.constraints
            constru_lifted = self.data.mfd.embed_barycentric(constru)[1]
            Gu = ConstrainFct(constrmask, constru_lifted)
        else:
            Gu = PositivityFct(x['u']['size'])

        if hasattr(self, 'epifct'):
            PAbOp = self.linblocks['PAb']
            Id_w12 = IdentityOp(x['w12']['size'])

            G_summands = [
                Gu,  # \delta_{u >= 0} or constraints
                ZeroFct(x['w12']['size']),  # 0
                ZeroFct(x['w']['size']),  # 0
            ]

            F_summands = [
                IndicatorFct(y['p']['size']),  # \delta_{p = 0}
                IndicatorFct(y['q']['size']),  # \delta_{q = 0}
                self.epifct,  # \max_{v \in epi(rho*)} <v12,v>
                IndicatorFct(y['v3']['size'], c1=1),  # \delta_{v3^i = 1}
                l1norms,  # lbd*\sum_ji |g[j,i,:,:]|_nuc
            ]

            op_blocks = [
                [GradOp, 0, PBLinOp],  # p = Du - P'B'w
                [Id_u, PAbOp, 0],  # q = u - P'Ab'w12
                [0, Id_w12, 0],  # v12 = w12
                [S_u_k, 0, 0],  # v3^i = sum_k u[i,k]
                [0, 0, AdMult],  # g = A'w
            ]
        else:
            G_summands = [
                Gu,  # \delta_{u >= 0} or constraints
                ZeroFct(x['w']['size']),  # 0
            ]

            F_summands = [
                IndicatorFct(y['p']['size']),  # \delta_{p = 0}
                AffineFct(y['q']['size'], c=self.rho),  # <q,rho>
                IndicatorFct(y['v3']['size'], c1=1),  # \delta_{v3^i = 1}
                l1norms,  # lbd*\sum_ji |g[j,i,:,:]|_nuc
            ]

            op_blocks = [
                [GradOp, PBLinOp],  # p = Du - P'B'w
                [Id_u, 0],  # q = u
                [S_u_k, 0],  # v3^i = sum_k u[i,k]
                [0, AdMult],  # g = A'w
            ]

        self.pdhg_G = SplitSum(G_summands)
        self.pdhg_F = SplitSum(F_summands)
        self.pdhg_linop = BlockOp(op_blocks)
Example #29
0
class Model(ModelHARDI_SHM):
    name = "sh_bndl1_tvc"

    def __init__(self, *args, conf_lvl=0.9, **kwargs):
        ModelHARDI_SHM.__init__(self, *args, **kwargs)

        c = self.constvars
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        l_shm = c['l_shm']

        self.data.init_bounds(conf_lvl)
        _, f1, f2 = self.data.bounds
        c['f1'], c['f2'] = [np.array(a.T, order='C') for a in [f1, f2]]

        self.x = Variable(
            ('u1', (n_image, l_labels)),
            ('u2', (n_image, l_labels)),
            ('v', (n_image, l_shm)),
        )

        self.y = Variable(
            ('p', (n_image, d_image, l_shm)),
            ('q0', (n_image, )),
            ('q1', (n_image, l_labels)),
            ('q2', (n_image, l_labels)),
            ('q3', (n_image, l_labels)),
            ('q4', (n_image, l_labels)),
        )

        # start with a uniform distribution in each voxel
        self.state = (self.x.new(), self.y.new())
        u1k, u2k, vk = self.x.vars(self.state[0])
        u1k[:] = 1.0 / np.einsum('k->', c['b'])
        vk[:, 0] = .5 / np.sqrt(np.pi)

    def setup_solver_pdhg(self):
        x, y = self.x.vars(named=True), self.y.vars(named=True)
        c = self.constvars
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        l_shm = c['l_shm']

        self.pdhg_G = SplitSum([
            PositivityFct(x['u1']['size']),  # \delta_{u1 >= 0}
            ZeroFct(x['u1']['size']),  # 0
            ZeroFct(x['v']['size'])  # 0
        ])

        GradOp = GradientOp(imagedims, l_shm)

        bMult = MatrixMultR(n_image, c['b_precond'] * c['b'][:, None])
        YMult = MatrixMultR(n_image, c['Y'], trans=True)
        YMMult = MatrixMultR(n_image, c['YM'], trans=True)

        m_u = ScaleOp(x['u1']['size'], -1)

        dbMult = DiagMatrixMultR(n_image, c['b'])
        mdbMult = DiagMatrixMultR(n_image, -c['b'])

        self.pdhg_linop = BlockOp([
            [0, 0, GradOp],  # p  = Dv
            [bMult, 0, 0],  # q0 = <b,u1>
            [m_u, 0, YMult],  # q1 = Yv - u1
            [0, m_u, YMMult],  # q2 = YMv - u2
            [0, mdbMult, 0],  # q3 = -diag(b) u2
            [0, dbMult, 0]  # q4 = diag(b) u2
        ])

        l1norms = L1Norms(n_image, (d_image, l_shm), c['lbd'], "frobenius")
        LowerBoundFct = MaxFct(np.einsum('ik,k->ik', c['f1'], -c['b']))
        UpperBoundFct = MaxFct(np.einsum('ik,k->ik', c['f2'], c['b']))

        self.pdhg_F = SplitSum([
            l1norms,  # lbd*\sum_i |p[i,:,:]|_2
            IndicatorFct(y['q0']['size'],
                         c1=c['b_precond']),  # \delta_{q0 = 1}
            IndicatorFct(x['u1']['size']),  # \delta_{q1 = 0}
            IndicatorFct(x['u1']['size']),  # \delta_{q2 = 0}
            LowerBoundFct,  # |max(0, q3 + diag(b)f1)|_1
            UpperBoundFct  # |max(0, q4 - diag(b)f2)|_1
        ])

    def setup_solver_cvx(self):
        c = self.constvars
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        l_shm = c['l_shm']

        self.cvx_x = Variable(
            ('p', (l_shm, d_image, n_image)),
            ('q0', (n_image, )),
            ('q1', (l_labels, n_image)),
            ('q2', (l_labels, n_image)),
            ('q3', (l_labels, n_image)),
            ('q4', (l_labels, n_image)),
        )

        self.cvx_y = Variable(
            ('u1', (n_image, l_labels)),
            ('v', (n_image, l_shm)),
            ('misc', (n_image * l_labels * 5 + n_image, )),
        )

        p, q0, q1, q2, q3, q4 = [
            cvxVariable(*a['shape']) for a in self.cvx_x.vars()
        ]
        self.cvx_vars = p + [q0, q1, q2, q3, q4]

        self.cvx_obj = cvx.Maximize(
            cvx.vec(q3).T * cvx.vec(cvx.diag(c['b']) * c['f1'].T) -
            cvx.vec(q4).T * cvx.vec(cvx.diag(c['b']) * c['f2'].T) -
            cvx.sum(q0))

        div_op = sparse_div_op(imagedims)

        self.cvx_dual = True
        self.cvx_constr = []

        # u1_constr
        for i in range(n_image):
            self.cvx_constr.append(c['b'][:] * q0[i] - q1[:, i] >= 0)

        # v_constr
        for i in range(n_image):
            for k in range(l_shm):
                Yk = cvx.vec(c['Y'][:, k])
                self.cvx_constr.append(
                    Yk.T*(c['M'][k]*q2[:,i] + q1[:,i]) \
                        - cvxOp(div_op, p[k], i) == 0)

        # additional inequality constraints
        for i in range(n_image):
            for k in range(l_labels):
                self.cvx_constr.append(0 <= q3[k, i])
                self.cvx_constr.append(q3[k, i] <= 1)
                self.cvx_constr.append(0 <= q4[k, i])
                self.cvx_constr.append(q4[k, i] <= 1)
                self.cvx_constr.append(q4[k, i] - q3[k, i] - q2[k, i] == 0)

        for i in range(n_image):
            self.cvx_constr.append(sum(cvx.sum_squares(p[k][:,i]) \
                                       for k in range(l_shm)) <= c['lbd']**2)
Example #30
0
class Model(ModelHARDI_SHM):
    name = "sh_w_tvw"

    def __init__(self, *args, dataterm="W1", gradnorm="frobenius", **kwargs):
        ModelHARDI_SHM.__init__(self, *args, **kwargs)
        self.gradnorm = 'nuclear' if gradnorm == "spectral" else "frobenius"

        c = self.constvars
        b_sph = self.data.b_sph
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        m_gradients = c['m_gradients']
        s_manifold = c['s_manifold']
        l_shm = c['l_shm']

        xvars = [('u', (n_image, l_labels)), ('v', (n_image, l_shm)),
                 ('w', (m_gradients, n_image, d_image, s_manifold))]
        yvars = [('p', (n_image, d_image, l_labels)),
                 ('g', (m_gradients, n_image, d_image, s_manifold)),
                 ('q0', (n_image, )), ('q1', (n_image, l_labels))]

        self.dataterm = dataterm
        if self.dataterm == "W1":
            xvars.append(('w0', (m_gradients, n_image, s_manifold)))
            yvars.append(('p0', (n_image, l_labels)))
            yvars.append(('g0', (m_gradients, n_image, s_manifold)))
        elif self.dataterm != "quadratic":
            raise Exception("Dataterm unknown: %s" % self.dataterm)

        self.x = Variable(*xvars)
        self.y = Variable(*yvars)

        # start with a uniform distribution in each voxel
        self.state = (self.x.new(), self.y.new())
        x = self.x.vars(self.state[0], named=True)
        x['u'][:] = 1.0 / np.einsum('k->', c['b'])
        x['v'][:, 0] = .5 / np.sqrt(np.pi)

        f = self.data.odf
        f_flat = f.reshape(-1, l_labels).T
        f = np.array(f_flat.reshape((l_labels, ) + imagedims), order='C')
        normalize_odf(f, c['b'])
        self.f = np.array(f.reshape(l_labels, -1).T, order='C')

    def setup_solver_pdhg(self):
        x, y = self.x.vars(named=True), self.y.vars(named=True)
        c = self.constvars
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        m_gradients = c['m_gradients']
        s_manifold = c['s_manifold']
        l_shm = c['l_shm']

        GradOp = GradientOp(imagedims, l_labels, weights=c['b'])

        PBLinOp = IndexedMultAdj(l_labels, d_image * n_image, c['P'], c['B'])
        AMult = MatrixMultRBatched(n_image * d_image, c['A'])

        bMult = MatrixMultR(n_image, c['b_precond'] * c['b'][:, None])

        YMult = MatrixMultR(n_image, c['Y'], trans=True)
        m_u = ScaleOp(x['u']['size'], -1)

        l1norms = L1Norms(m_gradients * n_image, (d_image, s_manifold),
                          c['lbd'], self.gradnorm)

        if self.dataterm == "W1":
            self.pdhg_G = SplitSum([
                PositivityFct(x['u']['size']),  # \delta_{u >= 0}
                ZeroFct(x['v']['size']),  # 0
                ZeroFct(x['w']['size']),  # 0
                ZeroFct(x['w0']['size']),  # 0
            ])

            PBLinOp0 = IndexedMultAdj(l_labels, n_image, c['P'], c['B'])
            AMult0 = MatrixMultRBatched(n_image, c['A'])
            bMult0 = DiagMatrixMultR(n_image, c['b'])

            self.pdhg_linop = BlockOp([
                [GradOp, 0, PBLinOp, 0],  # p = diag(b)Du - P'B'w
                [0, 0, AMult, 0],  # g = A'w
                [bMult, 0, 0, 0],  # q0 = <b,u>
                [m_u, YMult, 0, 0],  # q1 = Yv - u
                [bMult0, 0, 0, PBLinOp0],  # p0 = diag(b) u - P'B'w0
                [0, 0, 0, AMult0]  # g0 = A'w0
            ])

            diag_b_f = np.einsum('ik,k->ik', self.f, c['b'])
            dataterm = ConstrainFct(c['inpaint_nloc'], diag_b_f)

            l1norms0 = L1Norms(m_gradients * n_image, (1, s_manifold), 1.0,
                               "frobenius")

            self.pdhg_F = SplitSum([
                IndicatorFct(y['p']['size']),  # \delta_{p = 0}
                l1norms,  # lbd*\sum_ji |g[j,i,:,:]|_nuc
                IndicatorFct(y['q0']['size'],
                             c1=c['b_precond']),  # \delta_{q0 = 1}
                IndicatorFct(x['u']['size']),  # \delta_{q1 = 0}
                dataterm,  # \delta_{p0 = diag(b)f}
                l1norms0,  # \sum_ji |g0[j,i,:]|_2
            ])
        elif self.dataterm == "quadratic":
            dataterm = PosSSD(self.f, vol=c['b'], mask=c['inpaint_nloc'])
            self.pdhg_G = SplitSum([
                dataterm,  # 0.5*<u-f,u-f>_b + \delta_{u >= 0}
                ZeroFct(x['v']['size']),  # 0
                ZeroFct(x['w']['size']),  # 0
            ])

            self.pdhg_linop = BlockOp([
                [GradOp, 0, PBLinOp],  # p = diag(b)Du - P'B'w
                [0, 0, AMult],  # g = A'w
                [bMult, 0, 0],  # q0 = <b,u>
                [m_u, YMult, 0],  # q1 = Yv - u
            ])

            self.pdhg_F = SplitSum([
                IndicatorFct(y['p']['size']),  # \delta_{p = 0}
                l1norms,  # lbd*\sum_ji |g[j,i,:,:]|_nuc
                IndicatorFct(y['q0']['size'],
                             c1=c['b_precond']),  # \delta_{q0 = 1}
                IndicatorFct(x['u']['size']),  # \delta_{q1 = 0}
            ])

    def setup_solver_cvx(self):
        if self.dataterm != "W1":
            raise Exception("Only W1 dataterm is implemented in CVX")

        c = self.constvars
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        m_gradients = c['m_gradients']
        s_manifold = c['s_manifold']
        l_shm = c['l_shm']

        f_flat = self.data.odf.reshape(-1, l_labels).T
        f = np.array(f_flat.reshape((l_labels, ) + imagedims), order='C')
        normalize_odf(f, c['b'])
        f_flat = f.reshape(l_labels, n_image)

        self.cvx_x = Variable(
            ('p', (l_labels, d_image, n_image)),
            ('g', (n_image, m_gradients, s_manifold, d_image)),
            ('q0', (n_image, )),
            ('q1', (l_labels, n_image)),
            ('p0', (l_labels, n_image)),
            ('g0', (n_image, m_gradients, s_manifold)),
        )

        self.cvx_y = Variable(
            ('u', (n_image, l_labels)),
            ('v', (n_image, l_shm)),
            ('w', (m_gradients, n_image, d_image, s_manifold)),
            ('w0', (m_gradients, n_image, s_manifold)),
            ('misc', (n_image * m_gradients, )),
        )

        p, g, q0, q1, p0, g0 = [
            cvxVariable(*a['shape']) for a in self.cvx_x.vars()
        ]
        self.cvx_vars = p + sum(g, []) + [q0, q1, p0] + g0

        self.cvx_obj = cvx.Maximize(-cvx.vec(f_flat).T *
                                    cvx.vec(cvx.diag(c['b']) * p0) -
                                    cvx.sum(q0))

        div_op = sparse_div_op(imagedims)

        self.cvx_dual = True
        self.cvx_constr = []

        # u_constr
        for i in range(n_image):
            for k in range(l_labels):
                self.cvx_constr.append(
                    c['b'][k]*(q0[i] + p0[k,i] \
                        - cvxOp(div_op, p[k], i)) - q1[k,i] >= 0)

        # v_constr
        for i in range(n_image):
            for k in range(l_shm):
                Yk = cvx.vec(c['Y'][:, k])
                self.cvx_constr.append(-Yk.T * q1[:, i] == 0)

        # w_constr
        for j in range(m_gradients):
            Aj = c['A'][j, :, :]
            Bj = c['B'][j, :, :]
            Pj = c['P'][j, :]
            for i in range(n_image):
                for t in range(d_image):
                    for l in range(s_manifold):
                        self.cvx_constr.append(
                            Aj*g[i][j][l,t] == sum([Bj[l,m]*p[Pj[m]][t,i] \
                                                    for m in range(3)]))

        # w0_constr
        for j in range(m_gradients):
            Aj = c['A'][j, :, :]
            Bj = c['B'][j, :, :]
            Pj = c['P'][j, :]
            for i in range(n_image):
                self.cvx_constr.append(Aj * g0[i][j, :].T == Bj * p0[Pj, i])

        # additional inequality constraints
        for i in range(n_image):
            for j in range(m_gradients):
                self.cvx_constr.append(cvx.norm(g[i][j], 2) <= c['lbd'])
                self.cvx_constr.append(cvx.norm(g0[i][j, :], 2) <= 1.0)