Esempio n. 1
0
class MaskedAffineFct(Functional):
    """ F(x) = sum(c[mask,:]*x[mask,:]) + \delta_{x[not(mask),:] == 0} """
    def __init__(self, mask, c, conj=None):
        Functional.__init__(self)
        self.x = Variable(c.shape)
        self.mask = mask.astype(bool)
        self.nmask = ~self.mask
        self.c = c
        if conj is None:
            from opymize.functionals import MaskedIndicatorFct
            self.conj = MaskedIndicatorFct(mask, c, conj=self)
        else:
            self.conj = conj
        scale = self.x.vars(self.x.new())[0]
        scale[self.mask, :] = 1.0
        self._prox = ShiftScaleOp(self.x.size, self.c.ravel(), scale.ravel(),
                                  -1)

    def __call__(self, x, grad=False):
        x = self.x.vars(x)[0]
        val = np.einsum('ik,ik->', x[self.mask, :], self.c[self.mask, :])
        infeas = 0.0 if np.all(self.mask) else norm(x[self.nmask, :],
                                                    ord=np.inf)
        result = (val, infeas)
        if grad:
            dF = self.x.new()
            dF[self.mask, :] = self.c[self.mask, :]
            result = (result, dF.ravel)
        return result

    def prox(self, tau):
        self._prox.b = -tau
        if hasattr(self._prox, 'gpuvars'):
            self._prox.gpuvars['b'][:] = np.atleast_1d(self._prox.b)
        return self._prox
Esempio n. 2
0
class NegProj(Operator):
    """ T(x) = min(0, x), in the elementwise sense"""
    def __init__(self, N):
        Operator.__init__(self)
        self.x = Variable(N)
        self.y = Variable(N)
        self._jacobian = NihilOp(N, keep=self.x.new(dtype=bool))

    def prepare_gpu(self, type_t="double"):
        self._kernel = ElementwiseKernel("%s *x, %s *y" % ((type_t, ) * 2),
                                         "y[i] = (x[i] > 0) ? 0 : x[i]")
        self._kernel_add = ElementwiseKernel("%s *x, %s *y" % ((type_t, ) * 2),
                                             "y[i] += (x[i] > 0) ? 0 : x[i]")

    def _call_gpu(self, x, y=None, add=False, jacobian=False):
        y = x if y is None else y
        if add:
            self._kernel_add(x, y)
        else:
            self._kernel(x, y)

        if jacobian:
            self._jacobian.keep = (x.get() < 0)
            return self._jacobian

    def _call_cpu(self, x, y=None, add=False, jacobian=False):
        y = x if y is None else y
        if jacobian:
            self._jacobian.keep = (x < 0)
        if add:
            y += np.fmin(0, x)
        else:
            np.fmin(0, x, out=y)
        if jacobian: return self._jacobian
Esempio n. 3
0
class ConstrainFct(Functional):
    """ F(x) = 0 if x[mask,:]==c[mask,:] else infty
        The mask is only applied to the first component of x
    """
    def __init__(self, mask, c, conj=None):
        Functional.__init__(self)
        self.x = Variable(c.shape)
        self.mask = mask
        self.c = c
        if conj is None:
            from opymize.functionals import MaskedAffineFct
            self.conj = MaskedAffineFct(mask, c, conj=self)
        else:
            self.conj = conj
        self._prox = ConstrainOp(mask, c)

    def __call__(self, x, grad=False):
        x = self.x.vars(x)[0]
        val = 0
        infeas = norm(x[self.mask, :] - self.c[self.mask, :], ord=np.inf)
        result = (val, infeas)
        if grad:
            result = result, self.x.new()
        return result

    def prox(self, tau):
        # independent of tau
        return self._prox
Esempio n. 4
0
class QuadEpiInd(Functional):
    """ \sum_i \delta_{f_i(x[i,:-1]) \leq x[i,-1]}
        f_i(x) := 0.5*a*|x|^2 + <b[i],x> + c[i]
     """
    def __init__(self, N, M, a=1.0, b=None, c=None, conj=None):
        Functional.__init__(self)
        assert a > 0
        self.x = Variable((N, M + 1))
        self.a = a
        self.b = np.zeros((N, M)) if b is None else b
        self.c = np.zeros((N, )) if c is None else c
        if conj is None:
            da, db, dc = quad_dual_coefficients(self.a, self.b, self.c)
            self.conj = QuadEpiSupp(N, M, a=da, b=db, c=dc, conj=self)
        else:
            self.conj = conj
        self._prox = QuadEpiProj(N, M, alph=a, b=b, c=c)

    def __call__(self, x, grad=False):
        assert not grad
        x = self.x.vars(x)[0]
        fx = (0.5 * self.a * x[:, :-1]**2 +
              self.b * x[:, :-1]).sum(axis=1) + self.c
        dif = fx - x[:, -1]
        val = 0
        infeas = np.linalg.norm(np.fmax(0, dif), ord=np.inf)
        result = (val, infeas)
        if grad:
            result = result, self.x.new()
        return result

    def prox(self, tau):
        # independent of tau!
        return self._prox
Esempio n. 5
0
class L1NormsConj(Functional):
    """ F(x) = \sum_i \delta_{|x[i,:,:]| \leq lbd}
    Supported norms are 'frobenius' and 'spectral'
    """
    def __init__(self, N, M, lbd, matrixnorm="frobenius", conj=None):
        Functional.__init__(self)
        assert matrixnorm in ['frobenius', 'spectral']
        self.x = Variable((N,) + M)
        self.lbd = lbd
        self.matrixnorm = matrixnorm
        conjnorm = 'nuclear' if matrixnorm == 'spectral' else 'frobenius'
        self.conj = L1Norms(N, M, lbd, conjnorm, conj=self) if conj is None else conj
        self._prox = L1NormsProj(N, M, self.lbd, matrixnorm)
        self._xnorms = np.zeros((N,), order='C')

    def __call__(self, x, grad=False):
        x = self.x.vars(x)[0]
        norms(x, self._xnorms, self.matrixnorm)
        val = 0
        infeas = norm(np.fmax(0, self._xnorms - self.lbd), ord=np.inf)
        result = (val, infeas)
        if grad:
            result = result, self.x.new()
        return result

    def prox(self, tau):
        # independent of tau!
        return self._prox
Esempio n. 6
0
class IndicatorFct(Functional):
    """ F(x) = c2 if x==c1 else infty (use broadcasting in c1 if necessary) """
    def __init__(self, N, c1=0, c2=0, conj=None):
        Functional.__init__(self)
        self.c1 = c1
        self.c2 = c2
        self.x = Variable(N)
        from opymize.functionals import AffineFct
        self.conj = AffineFct(N, b=-c2, c=c1,
                              conj=self) if conj is None else conj
        self._prox = ConstOp(N, self.x.new() + c1)

    def __call__(self, x, grad=False):
        val = self.c2
        infeas = norm(x - self.c1, ord=np.inf)
        result = (val, infeas)
        if grad:
            result = result, self.x.new()
        return result

    def prox(self, tau):
        # independent of tau
        return self._prox
Esempio n. 7
0
class L12ProjJacobian(LinOp):
    """ Jacobian of L1NormsProj for Frobenius norm """
    def __init__(self, N, M, lbd):
        # xnorms[i] = 1.0/|xbar[i,:,:]|_2
        #  exterior = (xbar > lbd)
        LinOp.__init__(self)
        self.x = Variable((N, M[0] * M[1]))
        self.y = self.x
        self.lbd = lbd
        self.adjoint = self
        self.extind = np.zeros(N, dtype=bool)
        self.intind = np.zeros(N, dtype=bool)
        self.xbar_normed = self.x.vars(self.x.new())[0]
        self.lbd_norms = np.zeros(N)

    def update(self, xbar, exterior, xnorms):
        self.extind[:] = exterior
        self.intind[:] = ~self.extind

        self.xbar_normed[:] = xbar.reshape(self.xbar_normed.shape)
        self.xbar_normed[exterior, :] *= xnorms[exterior, None]

        self.lbd_norms[:] = self.lbd * xnorms

    def _call_cpu(self, x, y=None, add=False):
        x = self.x.vars(x)[0]
        if add or y is None:
            yy = self.y.vars(self.y.new())[0]
        else:
            yy = self.y.vars(y)[0]

        # xn[i,k] = xbar[i,k]/|xbar[i,:]|
        xn = self.xbar_normed

        # y[norms <= lbd] = x
        yy[self.intind, :] = x[self.intind, :]

        # y[norms > lbd] = lbd/|xbar|*(x - <xn,x>*xn)
        yy[self.extind, :] = xn[self.extind, :]
        yy[self.extind, :] *= -np.einsum('ik,ik->i', xn[self.extind, :],
                                         x[self.extind, :])[:, None]
        yy[self.extind, :] += x[self.extind, :]
        yy[self.extind, :] *= self.lbd_norms[self.extind, None]

        if y is None:
            if add: x += yy
            else: x[:] = yy
        elif add:
            y += yy
Esempio n. 8
0
class EpigraphInd(Functional):
    """ F(x) = 0 if x[j,i] \in epi(f[i]_j*) for every j,i else infty

    More precisely:

        F(x) = 0 if <v[k],x[j,i,:-1]> - b[i,k] <= x[j,i,-1]
                    for any i,j,k with I[i,J[j]][k] == True
    """
    def __init__(self, I, If, J, v, b, conj=None):
        """
        Args:
            I : ndarray of bools, shape (nfuns, npoints)
            If : nfuns lists of nregions arrays, shape (nfaces,ndim+1) each
            J : ndarray of ints, shape (nregions, nsubpoints)
            v : ndarray of floats, shape (npoints, ndim)
            b : ndarray of floats, shape (nfuns, npoints)
        """
        Functional.__init__(self)

        nfuns, npoints = I.shape
        nregions, nsubpoints = J.shape
        ndim = v.shape[1]
        self.I, self.J, self.v, self.b = I, J, v, b

        self.x = Variable((nregions, nfuns, ndim + 1))

        if conj is None:
            self.conj = EpigraphSupp(I, If, J, v, b, conj=self)
        else:
            self.conj = conj

        self.A = self.conj.A
        self.b = self.conj.b
        self._prox = EpigraphProj(I, J, v, b, Ab=(self.A, self.b))

    def __call__(self, x, grad=False):
        val = 0
        infeas = max(0, np.amax(self.A.dot(x) - self.b))
        result = (val, infeas)
        if grad:
            result = result, self.x.new()
        return result

    def prox(self, tau):
        # independent of tau
        return self._prox
Esempio n. 9
0
class NegativityFct(Functional):
    """ F(x) = 0 if x <= 0 else infty """
    def __init__(self, N, conj=None):
        Functional.__init__(self)
        self.x = Variable(N)
        self.conj = PositivityFct(N, conj=self) if conj is None else conj
        self._prox = NegProj(N)

    def __call__(self, x, grad=False):
        val = 0
        infeas = norm(np.fmax(0, x), ord=np.inf)
        result = (val, infeas)
        if grad:
            result = result, self.x.new()
        return result

    def prox(self, tau):
        # independent of tau
        return self._prox
Esempio n. 10
0
class TruncQuadEpiInd(Functional):
    """ \sum_i \delta_{|x| \leq lbd} + \delta_{f(x[i,:-1]) \leq x[i,-1]}
        f(x) := 0.5*alph*|x|^2
     """
    def __init__(self, N, M, lbd=1.0, alph=1.0, conj=None):
        Functional.__init__(self)
        assert lbd > 0
        assert alph > 0
        self.x = Variable((N, M + 1))
        self.lbd = lbd
        self.alph = alph
        if conj is None:
            dlbd, dalph = self.lbd, self.alph * self.lbd
            self.conj = HuberPerspective(N, M, lbd=dlbd, alph=dalph, conj=self)
        else:
            self.conj = conj
        self._prox = QuadEpiProj(N, M, lbd=self.lbd, alph=self.alph)

    def __call__(self, x, grad=False):
        assert not grad
        x = self.x.vars(x)[0]
        val = 0
        lbd, alph = self.lbd, self.alph
        x1norm = np.linalg.norm(x[:, :-1], axis=-1)
        dif = x1norm - self.lbd
        infeas = np.linalg.norm(np.fmax(0, dif), ord=np.inf)
        fx = 0.5 * alph * np.fmin(self.lbd, x1norm)**2
        dif = fx - x[:, -1]
        infeas += np.linalg.norm(np.fmax(0, dif), ord=np.inf)
        result = (val, infeas)
        if grad:
            result = result, self.x.new()
        return result

    def prox(self, tau):
        # independent of tau!
        return self._prox
Esempio n. 11
0
class SemismoothNewtonSystem(LinOp):
    """ Block matrix of the following form:
        [[      I - K, tau*K*A^T ],
         [ -sigma*H*A,     I - H ]]
    """
    def __init__(self, A, tau, sigma):
        LinOp.__init__(self)
        self.A = A
        self.tau = tau
        self.sigma = sigma
        self.x = Variable((A.x.size,), (A.y.size,))
        self.y = self.x
        self.xtmp = self.x.new()
        self.K = None
        self.H = None
        self.adjoint = SemismoothNewtonSystemAdjoint(self)

    def _call_cpu(self, x, y=None, add=False):
        assert y is not None
        assert add is False

        x1tmp, x2tmp = self.x.vars(self.xtmp)
        x1, x2 = self.x.vars(x)
        y1, y2 = self.y.vars(y)

        self.A.adjoint(x2, x1tmp)
        self.K(x1tmp, y1)
        y1[:] = x1 + self.tau*y1
        self.K(x1, x1tmp)
        y1 -= x1tmp

        self.A(x1, x2tmp)
        self.H(x2tmp, y2)
        y2[:] = x2 - self.sigma*y2
        self.H(x2, x2tmp)
        y2 -= x2tmp
Esempio n. 12
0
class SemismoothNewton(object):
    def __init__(self, g, f, A):
        self.g = g
        self.f = f
        self.linop = A
        self.xy = Variable((self.g.x.size,), (self.f.x.size,))
        self.itervars = { 'xyk': self.xy.new() }
        self.constvars = { 'tau': 1.0, 'sigma': 1.0 }

    def obj_primal(self, x, Ax):
        obj, infeas = self.g(x)
        obj2, infeas2 = self.f(Ax)
        return obj + obj2, infeas + infeas2

    def obj_dual(self, ATy, y):
        obj, infeas = self.g.conj(-ATy)
        obj2, infeas2 = self.f.conj(y)
        return -obj - obj2, infeas + infeas2

    def res(self, xy, xyres, subdiff=False, norm=True):
        c = self.constvars
        x, y = self.xy.vars(xy)
        xgrad, ygrad = self.xy.vars(xyres)

        # xgrad = x - Prox[tau*g](x - tau * A^T * y)
        self.linop.adjoint(y, xgrad)
        xgrad[:] =  x - c['tau']*xgrad
        K = self.gprox(xgrad, jacobian=subdiff)
        xgrad[:] = x - xgrad

        # ygrad = y - Prox[sigma*fc](y + sigma * A * x)
        self.linop(x, ygrad)
        ygrad[:] = y + c['sigma']*ygrad
        H = self.fconjprox(ygrad, jacobian=subdiff)
        ygrad[:] = y - ygrad

        if subdiff:
            self.M.K = K
            self.M.H = H

        if norm:
            return 0.5*np.einsum('i,i->', xyres, xyres)

    def iteration_step(self, _iter, use_scipy=False):
        i = self.itervars
        c = self.constvars

        # Set up Newton system
        res_normsq = self.res(i['xyk'], i['xyres'], subdiff=True)

        # Solve modified Newton system
        logging.info('Start linsolve')
        if use_scipy:
            scipy_solve(self.M, i['xyres'], i['dk'], lbd=self.lbd,
                        tmpvars=(i['cg_rk'], i['cg_rkp1'], i['xytmp'], i['cg_dk']),
                        tol=res_normsq, solver=scipy.sparse.linalg.lsqr)
        else:
            CG_solve(self.M, i['xyres'], i['dk'], lbd=self.lbd,
                     tmpvars=(i['cg_rk'], i['cg_rkp1'], i['xytmp'], i['cg_dk']),
                     tol=res_normsq)
        logging.info('Stop linsolve')

        # Armijo backtracking
        self.M(i['dk'], i['xytmp'])
        p_gradf = np.einsum('i,i->', i['xyres'], i['xytmp'])
        armijo_fun = lambda xy: self.res(xy, i['xyres'], subdiff=False)
        new_normsq, alpha = armijo(armijo_fun, i['xyk'], i['xykp1'], i['dk'], \
                                   p_gradf, res_normsq)

        # Update, taken from:
        #   "Distributed Newton Methods for Deep Neural Networks"
        #   by C.-C. Wang et al. (arxiv: https://arxiv.org/abs/1802.00130)
        Md_normsq = 0.5*np.einsum('i,i->', i['xytmp'], i['xytmp'])
        rho = (new_normsq - res_normsq)/(alpha*p_gradf + alpha**2*Md_normsq)
        if rho > 0.75:
            self.lbd *= self.lbd_drop
        elif rho < 0.25:
            self.lbd *= self.lbd_boost
        logging.debug("#{:6d}: alpha = {: 9.6g}, "\
                     "rho = {: 9.6g}, lbd = {: 9.6g}, normsq = {: 9.6g}"\
                     .format(_iter, alpha, rho, self.lbd, res_normsq))
        i['xyk'][:] = i['xykp1']

    def prepare_stepsizes(self):
        i = self.itervars
        c = self.constvars
        step_factor = 1.0

        logging.info("Estimating optimal step bound...")
        op_norm, itn = normest(self.linop)
        # round (floor) to 3 significant digits
        bnd = truncate(1.0/op_norm**2, 3) # < 1/|K|^2
        bnd *= 1.25 # boost
        fact = step_factor # tau/sigma
        c['sigma'] = np.sqrt(bnd/fact)
        c['tau'] = bnd/c['sigma']
        logging.info("Constant steps: %f (%f | %f)" % (bnd,c['sigma'],c['tau']))
        self.gprox = self.g.prox(c['tau'])
        self.fconjprox = self.f.conj.prox(c['sigma'])
        self.M = SemismoothNewtonSystem(self.linop, c['tau'], c['sigma'])
        self.lbd = 1.0
        self.lbd_drop = 0.1
        self.lbd_boost = 5.0

    def solve(self, continue_at=None, granularity=50,
                    term_relgap=1e-5, term_infeas=None, term_maxiter=int(5e2)):
        i = self.itervars
        c = self.constvars

        if continue_at is not None:
            i['xyk'][:] = continue_at

        i['xykp1'] = i['xyk'].copy()
        i['xyres'] = i['xyk'].copy()
        i['dk'] = i['xyk'].copy()
        i['xytmp'] = i['xyk'].copy()
        i['cg_dk'] = i['xyk'].copy()
        i['cg_rk'] = i['xyk'].copy()
        i['cg_rkp1'] = i['xyk'].copy()

        xk, yk = self.xy.vars(i['xyk'])
        xkp1, ykp1 = self.xy.vars(i['xykp1'])

        self.prepare_stepsizes()

        if term_infeas is None:
            term_infeas = term_relgap

        obj_p = obj_d = infeas_p = infeas_d = relgap = 0.

        logging.info("Solving (steps<%d)..." % term_maxiter)

        with GracefulInterruptHandler() as interrupt_hdl:
            _iter = 0
            while _iter < term_maxiter:
                self.iteration_step(_iter)
                _iter += 1

                if interrupt_hdl.interrupted or _iter % granularity == 0:
                    if interrupt_hdl.interrupted:
                        print("Interrupt (SIGINT) at iter=%d" % _iter)

                    self.linop(xk, ykp1)
                    self.linop.adjoint(yk, xkp1)
                    obj_p, infeas_p = self.obj_primal(xk, ykp1)
                    obj_d, infeas_d = self.obj_dual(xkp1, yk)

                    # compute relative primal-dual gap
                    relgap = (obj_p - obj_d) / max(np.spacing(1), obj_d)

                    logging.info("#{:6d}: objp = {: 9.6g} ({: 9.6g}), " \
                        "objd = {: 9.6g} ({: 9.6g}), " \
                        "gap = {: 9.6g}, " \
                        "relgap = {: 9.6g} ".format(
                        _iter, obj_p, infeas_p,
                        obj_d, infeas_d,
                        obj_p - obj_d,
                        relgap
                    ))

                    if np.abs(relgap) < term_relgap \
                       and max(infeas_p, infeas_d) < term_infeas:
                        break

                    if interrupt_hdl.interrupted:
                        break

        return {
            'objp': obj_p,
            'objd': obj_d,
            'infeasp': infeas_p,
            'infeasd': infeas_d,
            'relgap': relgap
        }

    @property
    def state(self):
        return self.xy.vars(self.itervars['xyk'])
Esempio n. 13
0
class Model(ModelHARDI_SHM):
    name = "sh_l_tvw2"

    def __init__(self, *args, gradnorm="frobenius", **kwargs):
        ModelHARDI_SHM.__init__(self, *args, **kwargs)
        self.gradnorm = 'nuclear' if gradnorm == "spectral" else "frobenius"

        c = self.constvars
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        m_gradients = c['m_gradients']
        s_manifold = c['s_manifold']
        l_shm = c['l_shm']

        self.x = Variable(
            ('u1', (n_image, l_labels)),
            ('u2', (n_image, l_labels)),
            ('v', (n_image, l_shm)),
            ('w', (m_gradients, n_image, d_image, s_manifold)),
        )

        self.y = Variable(
            ('p', (n_image, d_image, l_shm)),
            ('g', (m_gradients, n_image, d_image, s_manifold)),
            ('q0', (n_image, )),
            ('q1', (n_image, l_labels)),
            ('q2', (n_image, l_labels)),
        )

        # start with a uniform distribution in each voxel
        self.state = (self.x.new(), self.y.new())
        u1k, u2k, vk, wk = self.x.vars(self.state[0])
        u1k[:] = 1.0 / np.einsum('k->', c['b'])
        vk[:, 0] = .5 / np.sqrt(np.pi)

        c['G'] = np.zeros((m_gradients, s_manifold, l_shm), order='C')
        c['G'][:] = sym_shm_sample_grad(c['Y'], self.data.b_sph.v_grad)

    def setup_solver_pdhg(self):
        x, y = self.x.vars(named=True), self.y.vars(named=True)
        c = self.constvars
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        m_gradients = c['m_gradients']
        s_manifold = c['s_manifold']
        l_shm = c['l_shm']

        dataterm = SSD(c['f'], vol=c['b'], mask=c['inpaint_nloc'])

        self.pdhg_G = SplitSum([
            PositivityFct(x['u1']['size']),  # \delta_{u1 >= 0}
            dataterm,  # 0.5*<u2-f,u2-f>
            ZeroFct(x['v']['size']),  # 0
            ZeroFct(x['w']['size'])  # 0
        ])

        GradOp = GradientOp(imagedims, l_shm)

        GMult = TangledMatrixMultR(n_image * d_image, c['G'][:, :, None, :])

        bMult = MatrixMultR(n_image, c['b_precond'] * c['b'][:, None])
        YMult = MatrixMultR(n_image, c['Y'], trans=True)
        YMMult = MatrixMultR(n_image, c['YM'], trans=True)

        m_u = ScaleOp(x['u1']['size'], -1)
        m_w = ScaleOp(x['w']['size'], -1)

        self.pdhg_linop = BlockOp([
            [0, 0, GradOp, GMult],  # p = Dv + G'w
            [0, 0, 0, m_w],  # g = -w
            [bMult, 0, 0, 0],  # q0 = <b,u1>
            [m_u, 0, YMult, 0],  # q1 = Yv - u1
            [0, m_u, YMMult, 0]  # q2 = YMv - u2
        ])

        l1norms = L1Norms(m_gradients * n_image, (d_image, s_manifold),
                          c['lbd'], self.gradnorm)

        self.pdhg_F = SplitSum([
            IndicatorFct(y['p']['size']),  # \delta_{p = 0}
            l1norms,  # lbd*\sum_ji |g[j,i,:,:]|_nuc
            IndicatorFct(y['q0']['size'],
                         c1=c['b_precond']),  # \delta_{q0 = 1}
            IndicatorFct(x['u1']['size']),  # \delta_{q1 = 0}
            IndicatorFct(x['u1']['size'])  # \delta_{q2 = 0}
        ])

    def setup_solver_cvx(self):
        c = self.constvars
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        m_gradients = c['m_gradients']
        s_manifold = c['s_manifold']
        l_shm = c['l_shm']

        self.cvx_x = Variable(
            ('p', (l_shm, d_image, n_image)),
            ('g', (n_image, m_gradients, s_manifold, d_image)),
            ('q0', (n_image, )),
            ('q1', (l_labels, n_image)),
            ('q2', (l_labels, n_image)),
        )

        self.cvx_y = Variable(
            ('u1', (n_image, l_labels)),
            ('v', (n_image, l_shm)),
            ('w', (m_gradients, n_image, d_image, s_manifold)),
            ('misc', (n_image * m_gradients, )),
        )

        p, g, q0, q1, q2 = [
            cvxVariable(*a['shape']) for a in self.cvx_x.vars()
        ]
        self.cvx_vars = p + sum(g, []) + [q0, q1, q2]

        self.cvx_obj = cvx.Maximize(
            0.5 * cvx.sum(cvx.diag(c['b']) * cvx.square(c['f'].T)) -
            0.5 * cvx.sum(
                cvx.diag(1.0 / c['b']) *
                cvx.square(q2 + cvx.diag(c['b']) * c['f'].T)) - cvx.sum(q0))

        div_op = sparse_div_op(imagedims)

        self.cvx_dual = True
        self.cvx_constr = []

        # u1_constr
        for i in range(n_image):
            self.cvx_constr.append(c['b'] * q0[i] - q1[:, i] >= 0)

        # v_constr
        for i in range(n_image):
            for k in range(l_shm):
                Yk = cvx.vec(c['Y'][:, k])
                self.cvx_constr.append(
                    Yk.T*(c['M'][k]*q2[:,i] + q1[:,i]) \
                        - cvxOp(div_op, p[k], i) == 0)

        # w_constr
        for j in range(m_gradients):
            Gj = c['G'][j, :, :]
            for i in range(n_image):
                for t in range(d_image):
                    for l in range(s_manifold):
                        self.cvx_constr.append(
                            g[i][j][l,t] == sum([Gj[l,k]*p[k][t,i] \
                                                 for k in range(l_shm)]))

        # additional inequality constraints
        for i in range(n_image):
            for j in range(m_gradients):
                self.cvx_constr.append(cvx.norm(g[i][j], 2) <= c['lbd'])
Esempio n. 14
0
class Model(ModelHARDI):
    name = "n_w_tvw"

    def __init__(self, *args, dataterm="W1", gradnorm="frobenius", **kwargs):
        ModelHARDI.__init__(self, *args, **kwargs)
        self.gradnorm = 'nuclear' if gradnorm == "spectral" else "frobenius"

        c = self.constvars
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        m_gradients = c['m_gradients']
        s_manifold = c['s_manifold']

        xvars = [('u', (n_image, l_labels)),
                 ('w', (m_gradients, n_image, d_image, s_manifold))]
        yvars = [('p', (n_image, d_image, l_labels)),
                 ('g', (m_gradients, n_image, d_image, s_manifold)),
                 ('q', (n_image, ))]

        self.dataterm = dataterm
        if self.dataterm == "W1":
            xvars.append(('w0', (m_gradients, n_image, s_manifold)))
            yvars.append(('p0', (n_image, l_labels)))
            yvars.append(('g0', (m_gradients, n_image, s_manifold)))
        elif self.dataterm != "quadratic":
            raise Exception("Dataterm unknown: %s" % self.dataterm)

        self.x = Variable(*xvars)
        self.y = Variable(*yvars)

        # start with a uniform distribution in each voxel
        self.state = (self.x.new(), self.y.new())
        x = self.x.vars(self.state[0], named=True)
        x['u'][:] = 1.0 / np.einsum('k->', c['b'])

        f = self.data.odf
        f_flat = f.reshape(-1, l_labels).T
        f = np.array(f_flat.reshape((l_labels, ) + imagedims), order='C')
        normalize_odf(f, c['b'])
        self.f = np.array(f.reshape(l_labels, -1).T, order='C')

        logging.info("HARDI setup ({l_labels} labels; " \
                     "img: {imagedims}; lambda={lbd:.3g}) ready.".format(
                         lbd=c['lbd'],
                         l_labels=c['l_labels'],
                         imagedims="x".join(map(str,c['imagedims']))))

    def setup_solver_pdhg(self):
        x, y = self.x.vars(named=True), self.y.vars(named=True)
        c = self.constvars
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        m_gradients = c['m_gradients']
        s_manifold = c['s_manifold']

        GradOp = GradientOp(imagedims, l_labels, weights=c['b'])

        PBLinOp = IndexedMultAdj(l_labels, d_image * n_image, c['P'], c['B'])
        AMult = MatrixMultRBatched(n_image * d_image, c['A'])

        bMult = MatrixMultR(n_image, c['b_precond'] * c['b'][:, None])

        l1norms = L1Norms(m_gradients * n_image, (d_image, s_manifold),
                          c['lbd'], self.gradnorm)

        if self.dataterm == "W1":
            self.pdhg_G = SplitSum([
                PositivityFct(x['u']['size']),  # \delta_{u >= 0}
                ZeroFct(x['w']['size']),  # 0
                ZeroFct(x['w0']['size']),  # 0
            ])

            PBLinOp0 = IndexedMultAdj(l_labels, n_image, c['P'], c['B'])
            AMult0 = MatrixMultRBatched(n_image, c['A'])
            bMult0 = DiagMatrixMultR(n_image, c['b'])

            self.pdhg_linop = BlockOp([
                [GradOp, PBLinOp, 0],  # p = diag(b)Du - P'B'w
                [0, AMult, 0],  # g = A'w
                [bMult, 0, 0],  # q = <b,u>
                [bMult0, 0, PBLinOp0],  # p0 = diag(b) u - P'B'w0
                [0, 0, AMult0]  # g0 = A'w0
            ])

            diag_b_f = np.einsum('ik,k->ik', self.f, c['b'])
            dataterm = ConstrainFct(c['inpaint_nloc'], diag_b_f)

            l1norms0 = L1Norms(m_gradients * n_image, (1, s_manifold), 1.0,
                               "frobenius")

            self.pdhg_F = SplitSum([
                IndicatorFct(y['p']['size']),  # \delta_{p = 0}
                l1norms,  # lbd*\sum_ji |g[j,i,:,:]|_nuc
                IndicatorFct(y['q']['size'],
                             c1=c['b_precond']),  # \delta_{q = 1}
                dataterm,  # \delta_{p0 = diag(b)f}
                l1norms0,  # \sum_ji |g0[j,i,:]|_2
            ])
        elif self.dataterm == "quadratic":
            dataterm = PosSSD(self.f, vol=c['b'], mask=c['inpaint_nloc'])
            self.pdhg_G = SplitSum([
                dataterm,  # 0.5*<u-f,u-f>_b + \delta_{u >= 0}
                ZeroFct(x['w']['size']),  # 0
            ])

            self.pdhg_linop = BlockOp([
                [GradOp, PBLinOp],  # p = diag(b)Du - P'B'w
                [0, AMult],  # g = A'w
                [bMult, 0],  # q = <b,u>
            ])

            self.pdhg_F = SplitSum([
                IndicatorFct(y['p']['size']),  # \delta_{p = 0}
                l1norms,  # lbd*\sum_ji |g[j,i,:,:]|_nuc
                IndicatorFct(y['q']['size'],
                             c1=c['b_precond']),  # \delta_{q = 1}
            ])
Esempio n. 15
0
class Model(ModelHARDI_SHM):
    name = "sh_bndl2_tvw"

    def __init__(self, *args, gradnorm="frobenius", conf_lvl=0.9, **kwargs):
        ModelHARDI_SHM.__init__(self, *args, **kwargs)
        self.gradnorm = 'nuclear' if gradnorm == "spectral" else "frobenius"

        c = self.constvars
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        m_gradients = c['m_gradients']
        s_manifold = c['s_manifold']
        l_shm = c['l_shm']

        self.data.init_bounds(conf_lvl)
        _, f1, f2 = self.data.bounds
        c['f1'], c['f2'] = [np.array(a.T, order='C') for a in [f1, f2]]

        self.x = Variable(
            ('u1', (n_image, l_labels)),
            ('u2', (n_image, l_labels)),
            ('v', (n_image, l_shm)),
            ('w', (m_gradients, n_image, d_image, s_manifold)),
        )

        self.y = Variable(
            ('p', (n_image, d_image, l_labels)),
            ('g', (m_gradients, n_image, d_image, s_manifold)),
            ('q0', (n_image, )),
            ('q1', (n_image, l_labels)),
            ('q2', (n_image, l_labels)),
        )

        # start with a uniform distribution in each voxel
        self.state = (self.x.new(), self.y.new())
        u1k, u2k, vk, wk = self.x.vars(self.state[0])
        u1k[:] = 1.0 / np.einsum('k->', c['b'])
        vk[:, 0] = .5 / np.sqrt(np.pi)

    def setup_solver_pdhg(self):
        x, y = self.x.vars(named=True), self.y.vars(named=True)
        c = self.constvars
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        m_gradients = c['m_gradients']
        s_manifold = c['s_manifold']
        l_shm = c['l_shm']

        dataterm = BndSSD(c['f1'], c['f2'], vol=c['b'], mask=c['inpaint_nloc'])

        self.pdhg_G = SplitSum([
            PositivityFct(x['u1']['size']),  # \delta_{u1 >= 0}
            dataterm,  # 0.5*|max(0, f1 - u2)|^2 + 0.5*|max(0, u2 - f2)|^2
            ZeroFct(x['v']['size']),  # 0
            ZeroFct(x['w']['size'])  # 0
        ])

        GradOp = GradientOp(imagedims, l_labels, weights=c['b'])

        PBLinOp = IndexedMultAdj(l_labels, d_image * n_image, c['P'], c['B'])
        AMult = MatrixMultRBatched(n_image * d_image, c['A'])

        bMult = MatrixMultR(n_image, c['b_precond'] * c['b'][:, None])
        YMult = MatrixMultR(n_image, c['Y'], trans=True)
        YMMult = MatrixMultR(n_image, c['YM'], trans=True)

        m_u = ScaleOp(x['u1']['size'], -1)

        self.pdhg_linop = BlockOp([
            [GradOp, 0, 0, PBLinOp],  # p = diag(b)Du1 - P'B'w
            [0, 0, 0, AMult],  # g = A'w
            [bMult, 0, 0, 0],  # q0 = <b,u1>
            [m_u, 0, YMult, 0],  # q1 = Yv - u1
            [0, m_u, YMMult, 0]  # q2 = YMv - u2
        ])

        l1norms = L1Norms(m_gradients * n_image, (d_image, s_manifold),
                          c['lbd'], self.gradnorm)

        self.pdhg_F = SplitSum([
            IndicatorFct(y['p']['size']),  # \delta_{p = 0}
            l1norms,  # lbd*\sum_ji |g[j,i,:,:]|_nuc
            IndicatorFct(y['q0']['size'],
                         c1=c['b_precond']),  # \delta_{q0 = 1}
            IndicatorFct(x['u1']['size']),  # \delta_{q1 = 0}
            IndicatorFct(x['u1']['size'])  # \delta_{q2 = 0}
        ])

    def setup_solver_cvx(self):
        c = self.constvars
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        m_gradients = c['m_gradients']
        s_manifold = c['s_manifold']
        l_shm = c['l_shm']

        self.cvx_x = Variable(
            ('p', (l_labels, d_image, n_image)),
            ('g', (n_image, m_gradients, s_manifold, d_image)),
            ('q0', (n_image, )),
            ('q1', (l_labels, n_image)),
            ('q2', (l_labels, n_image)),
        )

        self.cvx_y = Variable(
            ('u1', (n_image, l_labels)),
            ('v', (n_image, l_shm)),
            ('w', (m_gradients, n_image, d_image, s_manifold)),
            ('misc', (n_image * m_gradients, )),
        )

        p, g, q0, q1, q2 = [
            cvxVariable(*a['shape']) for a in self.cvx_x.vars()
        ]
        self.cvx_vars = p + sum(g, []) + [q0, q1, q2]

        fid_fun_dual = 0
        for i in range(n_image):
            for k in range(l_labels):
                fid_fun_dual += -cvx.power(q2[k,i],2)/2 \
                             - cvx.maximum(q2[k,i]*c['f1'][i,k],
                                 q2[k,i]*c['f2'][i,k])

        self.cvx_obj = cvx.Maximize(fid_fun_dual - cvx.sum(q0))

        div_op = sparse_div_op(imagedims)

        self.cvx_dual = True
        self.cvx_constr = []

        # u1_constr
        for i in range(n_image):
            for k in range(l_labels):
                self.cvx_constr.append(
                    c['b'][k] *
                    (q0[i] - cvxOp(div_op, p[k], i)) - q1[k, i] >= 0)

        # v_constr
        for i in range(n_image):
            for k in range(l_shm):
                Yk = cvx.vec(c['Y'][:, k])
                self.cvx_constr.append(Yk.T *
                                       (c['M'][k] * q2[:, i] + q1[:, i]) == 0)

        # w_constr
        for j in range(m_gradients):
            Aj = c['A'][j, :, :]
            Bj = c['B'][j, :, :]
            Pj = c['P'][j, :]
            for i in range(n_image):
                for t in range(d_image):
                    for l in range(s_manifold):
                        self.cvx_constr.append(Aj * g[i][j][l, t] == sum(
                            [Bj[l, m] * p[Pj[m]][t, i] for m in range(3)]))

        # additional inequality constraints
        for i in range(n_image):
            for j in range(m_gradients):
                self.cvx_constr.append(cvx.norm(g[i][j], 2) <= c['lbd'])
Esempio n. 16
0
class Model(ModelHARDI_SHM):
    name = "sh_bndl1_tvc"

    def __init__(self, *args, conf_lvl=0.9, **kwargs):
        ModelHARDI_SHM.__init__(self, *args, **kwargs)

        c = self.constvars
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        l_shm = c['l_shm']

        self.data.init_bounds(conf_lvl)
        _, f1, f2 = self.data.bounds
        c['f1'], c['f2'] = [np.array(a.T, order='C') for a in [f1, f2]]

        self.x = Variable(
            ('u1', (n_image, l_labels)),
            ('u2', (n_image, l_labels)),
            ('v', (n_image, l_shm)),
        )

        self.y = Variable(
            ('p', (n_image, d_image, l_shm)),
            ('q0', (n_image, )),
            ('q1', (n_image, l_labels)),
            ('q2', (n_image, l_labels)),
            ('q3', (n_image, l_labels)),
            ('q4', (n_image, l_labels)),
        )

        # start with a uniform distribution in each voxel
        self.state = (self.x.new(), self.y.new())
        u1k, u2k, vk = self.x.vars(self.state[0])
        u1k[:] = 1.0 / np.einsum('k->', c['b'])
        vk[:, 0] = .5 / np.sqrt(np.pi)

    def setup_solver_pdhg(self):
        x, y = self.x.vars(named=True), self.y.vars(named=True)
        c = self.constvars
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        l_shm = c['l_shm']

        self.pdhg_G = SplitSum([
            PositivityFct(x['u1']['size']),  # \delta_{u1 >= 0}
            ZeroFct(x['u1']['size']),  # 0
            ZeroFct(x['v']['size'])  # 0
        ])

        GradOp = GradientOp(imagedims, l_shm)

        bMult = MatrixMultR(n_image, c['b_precond'] * c['b'][:, None])
        YMult = MatrixMultR(n_image, c['Y'], trans=True)
        YMMult = MatrixMultR(n_image, c['YM'], trans=True)

        m_u = ScaleOp(x['u1']['size'], -1)

        dbMult = DiagMatrixMultR(n_image, c['b'])
        mdbMult = DiagMatrixMultR(n_image, -c['b'])

        self.pdhg_linop = BlockOp([
            [0, 0, GradOp],  # p  = Dv
            [bMult, 0, 0],  # q0 = <b,u1>
            [m_u, 0, YMult],  # q1 = Yv - u1
            [0, m_u, YMMult],  # q2 = YMv - u2
            [0, mdbMult, 0],  # q3 = -diag(b) u2
            [0, dbMult, 0]  # q4 = diag(b) u2
        ])

        l1norms = L1Norms(n_image, (d_image, l_shm), c['lbd'], "frobenius")
        LowerBoundFct = MaxFct(np.einsum('ik,k->ik', c['f1'], -c['b']))
        UpperBoundFct = MaxFct(np.einsum('ik,k->ik', c['f2'], c['b']))

        self.pdhg_F = SplitSum([
            l1norms,  # lbd*\sum_i |p[i,:,:]|_2
            IndicatorFct(y['q0']['size'],
                         c1=c['b_precond']),  # \delta_{q0 = 1}
            IndicatorFct(x['u1']['size']),  # \delta_{q1 = 0}
            IndicatorFct(x['u1']['size']),  # \delta_{q2 = 0}
            LowerBoundFct,  # |max(0, q3 + diag(b)f1)|_1
            UpperBoundFct  # |max(0, q4 - diag(b)f2)|_1
        ])

    def setup_solver_cvx(self):
        c = self.constvars
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        l_shm = c['l_shm']

        self.cvx_x = Variable(
            ('p', (l_shm, d_image, n_image)),
            ('q0', (n_image, )),
            ('q1', (l_labels, n_image)),
            ('q2', (l_labels, n_image)),
            ('q3', (l_labels, n_image)),
            ('q4', (l_labels, n_image)),
        )

        self.cvx_y = Variable(
            ('u1', (n_image, l_labels)),
            ('v', (n_image, l_shm)),
            ('misc', (n_image * l_labels * 5 + n_image, )),
        )

        p, q0, q1, q2, q3, q4 = [
            cvxVariable(*a['shape']) for a in self.cvx_x.vars()
        ]
        self.cvx_vars = p + [q0, q1, q2, q3, q4]

        self.cvx_obj = cvx.Maximize(
            cvx.vec(q3).T * cvx.vec(cvx.diag(c['b']) * c['f1'].T) -
            cvx.vec(q4).T * cvx.vec(cvx.diag(c['b']) * c['f2'].T) -
            cvx.sum(q0))

        div_op = sparse_div_op(imagedims)

        self.cvx_dual = True
        self.cvx_constr = []

        # u1_constr
        for i in range(n_image):
            self.cvx_constr.append(c['b'][:] * q0[i] - q1[:, i] >= 0)

        # v_constr
        for i in range(n_image):
            for k in range(l_shm):
                Yk = cvx.vec(c['Y'][:, k])
                self.cvx_constr.append(
                    Yk.T*(c['M'][k]*q2[:,i] + q1[:,i]) \
                        - cvxOp(div_op, p[k], i) == 0)

        # additional inequality constraints
        for i in range(n_image):
            for k in range(l_labels):
                self.cvx_constr.append(0 <= q3[k, i])
                self.cvx_constr.append(q3[k, i] <= 1)
                self.cvx_constr.append(0 <= q4[k, i])
                self.cvx_constr.append(q4[k, i] <= 1)
                self.cvx_constr.append(q4[k, i] - q3[k, i] - q2[k, i] == 0)

        for i in range(n_image):
            self.cvx_constr.append(sum(cvx.sum_squares(p[k][:,i]) \
                                       for k in range(l_shm)) <= c['lbd']**2)
Esempio n. 17
0
class Model(ModelHARDI_SHM):
    name = "sh_w_tvw"

    def __init__(self, *args, dataterm="W1", gradnorm="frobenius", **kwargs):
        ModelHARDI_SHM.__init__(self, *args, **kwargs)
        self.gradnorm = 'nuclear' if gradnorm == "spectral" else "frobenius"

        c = self.constvars
        b_sph = self.data.b_sph
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        m_gradients = c['m_gradients']
        s_manifold = c['s_manifold']
        l_shm = c['l_shm']

        xvars = [('u', (n_image, l_labels)), ('v', (n_image, l_shm)),
                 ('w', (m_gradients, n_image, d_image, s_manifold))]
        yvars = [('p', (n_image, d_image, l_labels)),
                 ('g', (m_gradients, n_image, d_image, s_manifold)),
                 ('q0', (n_image, )), ('q1', (n_image, l_labels))]

        self.dataterm = dataterm
        if self.dataterm == "W1":
            xvars.append(('w0', (m_gradients, n_image, s_manifold)))
            yvars.append(('p0', (n_image, l_labels)))
            yvars.append(('g0', (m_gradients, n_image, s_manifold)))
        elif self.dataterm != "quadratic":
            raise Exception("Dataterm unknown: %s" % self.dataterm)

        self.x = Variable(*xvars)
        self.y = Variable(*yvars)

        # start with a uniform distribution in each voxel
        self.state = (self.x.new(), self.y.new())
        x = self.x.vars(self.state[0], named=True)
        x['u'][:] = 1.0 / np.einsum('k->', c['b'])
        x['v'][:, 0] = .5 / np.sqrt(np.pi)

        f = self.data.odf
        f_flat = f.reshape(-1, l_labels).T
        f = np.array(f_flat.reshape((l_labels, ) + imagedims), order='C')
        normalize_odf(f, c['b'])
        self.f = np.array(f.reshape(l_labels, -1).T, order='C')

    def setup_solver_pdhg(self):
        x, y = self.x.vars(named=True), self.y.vars(named=True)
        c = self.constvars
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        m_gradients = c['m_gradients']
        s_manifold = c['s_manifold']
        l_shm = c['l_shm']

        GradOp = GradientOp(imagedims, l_labels, weights=c['b'])

        PBLinOp = IndexedMultAdj(l_labels, d_image * n_image, c['P'], c['B'])
        AMult = MatrixMultRBatched(n_image * d_image, c['A'])

        bMult = MatrixMultR(n_image, c['b_precond'] * c['b'][:, None])

        YMult = MatrixMultR(n_image, c['Y'], trans=True)
        m_u = ScaleOp(x['u']['size'], -1)

        l1norms = L1Norms(m_gradients * n_image, (d_image, s_manifold),
                          c['lbd'], self.gradnorm)

        if self.dataterm == "W1":
            self.pdhg_G = SplitSum([
                PositivityFct(x['u']['size']),  # \delta_{u >= 0}
                ZeroFct(x['v']['size']),  # 0
                ZeroFct(x['w']['size']),  # 0
                ZeroFct(x['w0']['size']),  # 0
            ])

            PBLinOp0 = IndexedMultAdj(l_labels, n_image, c['P'], c['B'])
            AMult0 = MatrixMultRBatched(n_image, c['A'])
            bMult0 = DiagMatrixMultR(n_image, c['b'])

            self.pdhg_linop = BlockOp([
                [GradOp, 0, PBLinOp, 0],  # p = diag(b)Du - P'B'w
                [0, 0, AMult, 0],  # g = A'w
                [bMult, 0, 0, 0],  # q0 = <b,u>
                [m_u, YMult, 0, 0],  # q1 = Yv - u
                [bMult0, 0, 0, PBLinOp0],  # p0 = diag(b) u - P'B'w0
                [0, 0, 0, AMult0]  # g0 = A'w0
            ])

            diag_b_f = np.einsum('ik,k->ik', self.f, c['b'])
            dataterm = ConstrainFct(c['inpaint_nloc'], diag_b_f)

            l1norms0 = L1Norms(m_gradients * n_image, (1, s_manifold), 1.0,
                               "frobenius")

            self.pdhg_F = SplitSum([
                IndicatorFct(y['p']['size']),  # \delta_{p = 0}
                l1norms,  # lbd*\sum_ji |g[j,i,:,:]|_nuc
                IndicatorFct(y['q0']['size'],
                             c1=c['b_precond']),  # \delta_{q0 = 1}
                IndicatorFct(x['u']['size']),  # \delta_{q1 = 0}
                dataterm,  # \delta_{p0 = diag(b)f}
                l1norms0,  # \sum_ji |g0[j,i,:]|_2
            ])
        elif self.dataterm == "quadratic":
            dataterm = PosSSD(self.f, vol=c['b'], mask=c['inpaint_nloc'])
            self.pdhg_G = SplitSum([
                dataterm,  # 0.5*<u-f,u-f>_b + \delta_{u >= 0}
                ZeroFct(x['v']['size']),  # 0
                ZeroFct(x['w']['size']),  # 0
            ])

            self.pdhg_linop = BlockOp([
                [GradOp, 0, PBLinOp],  # p = diag(b)Du - P'B'w
                [0, 0, AMult],  # g = A'w
                [bMult, 0, 0],  # q0 = <b,u>
                [m_u, YMult, 0],  # q1 = Yv - u
            ])

            self.pdhg_F = SplitSum([
                IndicatorFct(y['p']['size']),  # \delta_{p = 0}
                l1norms,  # lbd*\sum_ji |g[j,i,:,:]|_nuc
                IndicatorFct(y['q0']['size'],
                             c1=c['b_precond']),  # \delta_{q0 = 1}
                IndicatorFct(x['u']['size']),  # \delta_{q1 = 0}
            ])

    def setup_solver_cvx(self):
        if self.dataterm != "W1":
            raise Exception("Only W1 dataterm is implemented in CVX")

        c = self.constvars
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        m_gradients = c['m_gradients']
        s_manifold = c['s_manifold']
        l_shm = c['l_shm']

        f_flat = self.data.odf.reshape(-1, l_labels).T
        f = np.array(f_flat.reshape((l_labels, ) + imagedims), order='C')
        normalize_odf(f, c['b'])
        f_flat = f.reshape(l_labels, n_image)

        self.cvx_x = Variable(
            ('p', (l_labels, d_image, n_image)),
            ('g', (n_image, m_gradients, s_manifold, d_image)),
            ('q0', (n_image, )),
            ('q1', (l_labels, n_image)),
            ('p0', (l_labels, n_image)),
            ('g0', (n_image, m_gradients, s_manifold)),
        )

        self.cvx_y = Variable(
            ('u', (n_image, l_labels)),
            ('v', (n_image, l_shm)),
            ('w', (m_gradients, n_image, d_image, s_manifold)),
            ('w0', (m_gradients, n_image, s_manifold)),
            ('misc', (n_image * m_gradients, )),
        )

        p, g, q0, q1, p0, g0 = [
            cvxVariable(*a['shape']) for a in self.cvx_x.vars()
        ]
        self.cvx_vars = p + sum(g, []) + [q0, q1, p0] + g0

        self.cvx_obj = cvx.Maximize(-cvx.vec(f_flat).T *
                                    cvx.vec(cvx.diag(c['b']) * p0) -
                                    cvx.sum(q0))

        div_op = sparse_div_op(imagedims)

        self.cvx_dual = True
        self.cvx_constr = []

        # u_constr
        for i in range(n_image):
            for k in range(l_labels):
                self.cvx_constr.append(
                    c['b'][k]*(q0[i] + p0[k,i] \
                        - cvxOp(div_op, p[k], i)) - q1[k,i] >= 0)

        # v_constr
        for i in range(n_image):
            for k in range(l_shm):
                Yk = cvx.vec(c['Y'][:, k])
                self.cvx_constr.append(-Yk.T * q1[:, i] == 0)

        # w_constr
        for j in range(m_gradients):
            Aj = c['A'][j, :, :]
            Bj = c['B'][j, :, :]
            Pj = c['P'][j, :]
            for i in range(n_image):
                for t in range(d_image):
                    for l in range(s_manifold):
                        self.cvx_constr.append(
                            Aj*g[i][j][l,t] == sum([Bj[l,m]*p[Pj[m]][t,i] \
                                                    for m in range(3)]))

        # w0_constr
        for j in range(m_gradients):
            Aj = c['A'][j, :, :]
            Bj = c['B'][j, :, :]
            Pj = c['P'][j, :]
            for i in range(n_image):
                self.cvx_constr.append(Aj * g0[i][j, :].T == Bj * p0[Pj, i])

        # additional inequality constraints
        for i in range(n_image):
            for j in range(m_gradients):
                self.cvx_constr.append(cvx.norm(g[i][j], 2) <= c['lbd'])
                self.cvx_constr.append(cvx.norm(g0[i][j, :], 2) <= 1.0)
Esempio n. 18
0
class Model(ModelHARDI_SHM):
    name = "sh_bndl2_tvc"

    def __init__(self, *args, conf_lvl=0.9, **kwargs):
        ModelHARDI_SHM.__init__(self, *args, **kwargs)

        c = self.constvars
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        l_shm = c['l_shm']

        self.data.init_bounds(conf_lvl)
        _, f1, f2 = self.data.bounds
        c['f1'], c['f2'] = [np.array(a.T, order='C') for a in [f1, f2]]

        self.x = Variable(
            ('u1', (n_image, l_labels)),
            ('u2', (n_image, l_labels)),
            ('v', (n_image, l_shm)),
        )

        self.y = Variable(
            ('p', (n_image, d_image, l_shm)),
            ('q0', (n_image, )),
            ('q1', (n_image, l_labels)),
            ('q2', (n_image, l_labels)),
        )

        # start with a uniform distribution in each voxel
        self.state = (self.x.new(), self.y.new())
        u1k, u2k, vk = self.x.vars(self.state[0])
        u1k[:] = 1.0 / np.einsum('k->', c['b'])
        vk[:, 0] = .5 / np.sqrt(np.pi)

    def setup_solver_pdhg(self):
        x, y = self.x.vars(named=True), self.y.vars(named=True)
        c = self.constvars
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        l_shm = c['l_shm']

        dataterm = BndSSD(c['f1'], c['f2'], vol=c['b'], mask=c['inpaint_nloc'])

        self.pdhg_G = SplitSum([
            PositivityFct(x['u1']['size']),  # \delta_{u1 >= 0}
            dataterm,  # 0.5*|max(0, f1 - u2)|^2 + 0.5*|max(0, u2 - f2)|^2
            ZeroFct(x['v']['size'])  # 0
        ])

        GradOp = GradientOp(imagedims, l_shm)

        bMult = MatrixMultR(n_image, c['b_precond'] * c['b'][:, None])
        YMult = MatrixMultR(n_image, c['Y'], trans=True)
        YMMult = MatrixMultR(n_image, c['YM'], trans=True)

        m_u = ScaleOp(x['u1']['size'], -1)

        self.pdhg_linop = BlockOp([
            [0, 0, GradOp],  # p  = Dv
            [bMult, 0, 0],  # q0 = <b,u1>
            [m_u, 0, YMult],  # q1 = Yv - u1
            [0, m_u, YMMult]  # q2 = YMv - u2
        ])

        l1norms = L1Norms(n_image, (d_image, l_shm), c['lbd'], "frobenius")

        self.pdhg_F = SplitSum([
            l1norms,  # lbd*\sum_i |p[i,:,:]|_2
            IndicatorFct(y['q0']['size'],
                         c1=c['b_precond']),  # \delta_{q0 = 1}
            IndicatorFct(x['u1']['size']),  # \delta_{q1 = 0}
            IndicatorFct(x['u1']['size'])  # \delta_{q2 = 0}
        ])

    def setup_solver_cvx(self):
        c = self.constvars
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        l_shm = c['l_shm']

        self.cvx_x = Variable(
            ('p', (l_shm, d_image, n_image)),
            ('q0', (n_image, )),
            ('q1', (l_labels, n_image)),
            ('q2', (l_labels, n_image)),
        )

        self.cvx_y = Variable(
            ('u1', (n_image, l_labels)),
            ('v', (n_image, l_shm)),
            ('misc', (n_image, )),
        )

        p, q0, q1, q2 = [cvxVariable(*a['shape']) for a in self.cvx_x.vars()]
        self.cvx_vars = p + [q0, q1, q2]

        fid_fun_dual = 0
        for i in range(n_image):
            for k in range(l_labels):
                fid_fun_dual += -1.0/c['b'][k]*(cvx.power(q2[k,i],2)/2 \
                             + cvx.maximum(q2[k,i]*c['b'][k]*c['f1'][i,k],
                                 q2[k,i]*c['b'][k]*c['f2'][i,k]))

        self.cvx_obj = cvx.Maximize(fid_fun_dual - cvx.sum(q0))

        div_op = sparse_div_op(imagedims)

        self.cvx_dual = True
        self.cvx_constr = []

        # u1_constr
        for i in range(n_image):
            self.cvx_constr.append(c['b'] * q0[i] - q1[:, i] >= 0)

        # v_constr
        for i in range(n_image):
            for k in range(l_shm):
                Yk = cvx.vec(c['Y'][:, k])
                self.cvx_constr.append(
                    Yk.T*(c['M'][k]*q2[:,i] + q1[:,i]) \
                        - cvxOp(div_op, p[k], i) == 0)

        # additional inequality constraints
        for i in range(n_image):
            self.cvx_constr.append(sum(cvx.sum_squares(p[k][:,i]) \
                                       for k in range(l_shm)) <= c['lbd']**2)