Ejemplo n.º 1
0
class ConstrainFct(Functional):
    """ F(x) = 0 if x[mask,:]==c[mask,:] else infty
        The mask is only applied to the first component of x
    """
    def __init__(self, mask, c, conj=None):
        Functional.__init__(self)
        self.x = Variable(c.shape)
        self.mask = mask
        self.c = c
        if conj is None:
            from opymize.functionals import MaskedAffineFct
            self.conj = MaskedAffineFct(mask, c, conj=self)
        else:
            self.conj = conj
        self._prox = ConstrainOp(mask, c)

    def __call__(self, x, grad=False):
        x = self.x.vars(x)[0]
        val = 0
        infeas = norm(x[self.mask, :] - self.c[self.mask, :], ord=np.inf)
        result = (val, infeas)
        if grad:
            result = result, self.x.new()
        return result

    def prox(self, tau):
        # independent of tau
        return self._prox
Ejemplo n.º 2
0
    def __init__(self, *args, gradnorm="frobenius", **kwargs):
        ModelHARDI_SHM.__init__(self, *args, **kwargs)
        self.gradnorm = 'nuclear' if gradnorm == "spectral" else "frobenius"

        c = self.constvars
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        m_gradients = c['m_gradients']
        s_manifold = c['s_manifold']
        l_shm = c['l_shm']

        self.x = Variable(
            ('u1', (n_image, l_labels)),
            ('u2', (n_image, l_labels)),
            ('v', (n_image, l_shm)),
            ('w', (m_gradients, n_image, d_image, s_manifold)),
        )

        self.y = Variable(
            ('p', (n_image, d_image, l_labels)),
            ('g', (m_gradients, n_image, d_image, s_manifold)),
            ('q0', (n_image,)),
            ('q1', (n_image, l_labels)),
            ('q2', (n_image, l_labels)),
        )

        # start with a uniform distribution in each voxel
        self.state = (self.x.new(), self.y.new())
        u1k, u2k, vk, wk = self.x.vars(self.state[0])
        u1k[:] = 1.0/np.einsum('k->', c['b'])
        vk[:,0] = .5 / np.sqrt(np.pi)
Ejemplo n.º 3
0
 def __init__(self, g, f, A):
     self.g = g
     self.f = f
     self.linop = A
     self.xy = Variable((self.g.x.size,), (self.f.x.size,))
     self.itervars = { 'xyk': self.xy.new() }
     self.constvars = { 'tau': 1.0, 'sigma': 1.0 }
Ejemplo n.º 4
0
 def __init__(self, N, shift, a, b):
     Operator.__init__(self)
     self.shift = shift
     self.a = a
     self.b = b
     self.x = Variable(N)
     self.y = Variable(N)
Ejemplo n.º 5
0
class MaskedAffineFct(Functional):
    """ F(x) = sum(c[mask,:]*x[mask,:]) + \delta_{x[not(mask),:] == 0} """
    def __init__(self, mask, c, conj=None):
        Functional.__init__(self)
        self.x = Variable(c.shape)
        self.mask = mask.astype(bool)
        self.nmask = ~self.mask
        self.c = c
        if conj is None:
            from opymize.functionals import MaskedIndicatorFct
            self.conj = MaskedIndicatorFct(mask, c, conj=self)
        else:
            self.conj = conj
        scale = self.x.vars(self.x.new())[0]
        scale[self.mask, :] = 1.0
        self._prox = ShiftScaleOp(self.x.size, self.c.ravel(), scale.ravel(),
                                  -1)

    def __call__(self, x, grad=False):
        x = self.x.vars(x)[0]
        val = np.einsum('ik,ik->', x[self.mask, :], self.c[self.mask, :])
        infeas = 0.0 if np.all(self.mask) else norm(x[self.nmask, :],
                                                    ord=np.inf)
        result = (val, infeas)
        if grad:
            dF = self.x.new()
            dF[self.mask, :] = self.c[self.mask, :]
            result = (result, dF.ravel)
        return result

    def prox(self, tau):
        self._prox.b = -tau
        if hasattr(self._prox, 'gpuvars'):
            self._prox.gpuvars['b'][:] = np.atleast_1d(self._prox.b)
        return self._prox
Ejemplo n.º 6
0
    def __init__(self,
                 *args,
                 lbd=5.0,
                 alph=np.inf,
                 fdscheme="centered",
                 **kwargs):
        SublabelModel.__init__(self, *args, **kwargs)
        self.lbd = lbd
        self.alph = alph
        self.fdscheme = fdscheme
        logging.info("Init model '%s' (lambda=%.2e, alpha=%.2e, fdscheme=%s)" \
                     % (self.name, self.lbd, self.alph, self.fdscheme))

        imagedims = self.data.imagedims
        N_image = self.data.N_image
        L_labels = self.data.L_labels
        M_tris = self.data.M_tris
        s_gamma = self.data.s_gamma
        d_image = self.data.d_image

        xvars = [('u', (N_image, L_labels)),
                 ('w12', (M_tris, N_image, s_gamma + 1)),
                 ('w', (M_tris, N_image, d_image, s_gamma))]
        yvars = [
            ('p', (N_image, d_image, L_labels)),
            ('q', (N_image, L_labels)),
            ('v12', (M_tris, N_image, s_gamma + 1)),
            ('v3', (N_image, )),
            ('g12', (M_tris, N_image, d_image * s_gamma + 1)),
        ]

        self.x = Variable(*xvars)
        self.y = Variable(*yvars)
Ejemplo n.º 7
0
    def __init__(self, *args, lbd=1.0, fdscheme="centered", **kwargs):
        SublabelModel.__init__(self, *args, **kwargs)
        self.lbd = lbd
        self.fdscheme = fdscheme
        logging.info("Init model '%s' (lambda=%.2e,fdscheme=%s)" \
                     % (self.name, self.lbd, self.fdscheme))

        imagedims = self.data.imagedims
        N_image = self.data.N_image
        L_labels = self.data.L_labels
        M_tris = self.data.M_tris
        s_gamma = self.data.s_gamma
        d_image = self.data.d_image

        xvars = [('u', (N_image, L_labels)),
                 ('w12', (M_tris, N_image, s_gamma + 1)),
                 ('w', (M_tris, N_image, d_image, s_gamma))]
        yvars = [('p', (N_image, d_image, L_labels)),
                 ('q', (N_image, L_labels)),
                 ('v12', (M_tris, N_image, s_gamma + 1)), ('v3', (N_image, )),
                 ('g', (M_tris, N_image, d_image, s_gamma))]

        if self.data.R.shape[-1] == s_gamma + 1:
            del xvars[1]
            del yvars[2]

        self.x = Variable(*xvars)
        self.y = Variable(*yvars)
Ejemplo n.º 8
0
    def __init__(self, *args, conf_lvl=0.9, **kwargs):
        ModelHARDI_SHM.__init__(self, *args, **kwargs)

        c = self.constvars
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        l_shm = c['l_shm']

        self.data.init_bounds(conf_lvl)
        _, f1, f2 = self.data.bounds
        c['f1'], c['f2'] = [np.array(a.T, order='C') for a in [f1, f2]]

        self.x = Variable(
            ('u1', (n_image, l_labels)),
            ('u2', (n_image, l_labels)),
            ('v', (n_image, l_shm)),
        )

        self.y = Variable(
            ('p', (n_image, d_image, l_shm)),
            ('q0', (n_image, )),
            ('q1', (n_image, l_labels)),
            ('q2', (n_image, l_labels)),
            ('q3', (n_image, l_labels)),
            ('q4', (n_image, l_labels)),
        )

        # start with a uniform distribution in each voxel
        self.state = (self.x.new(), self.y.new())
        u1k, u2k, vk = self.x.vars(self.state[0])
        u1k[:] = 1.0 / np.einsum('k->', c['b'])
        vk[:, 0] = .5 / np.sqrt(np.pi)
Ejemplo n.º 9
0
 def __init__(self, data, vol=None, shift=0, mask=None, conj=None):
     Functional.__init__(self)
     self.x = Variable(data.shape)
     self.f = np.atleast_2d(data)
     self.shift = shift
     self.vol = np.ones(data.shape[1]) if vol is None else vol
     self.mask = np.ones(data.shape[0],
                         dtype=bool) if mask is None else mask
     if conj is None:
         cj_vol = 1.0 / self.vol
         cj_data = np.zeros_like(self.f)
         cj_data[self.mask, :] = np.einsum('ik,k->ik', self.f[self.mask, :],
                                           -self.vol)
         cj_shift = -0.5 * np.einsum('ik,k->', cj_data**2, cj_vol)
         cj_shift -= self.shift
         self.conj = SSD(cj_data,
                         shift=cj_shift,
                         vol=cj_vol,
                         mask=mask,
                         conj=self)
     else:
         self.conj = conj
     prox_shift = np.zeros_like(self.f)
     prox_shift[self.mask, :] = self.f[self.mask, :]
     prox_shift = prox_shift.ravel()
     self._prox = ShiftScaleOp(self.x.size, prox_shift, 0.5, 1.0)
Ejemplo n.º 10
0
    def __init__(self, imagedims, nchannels, imageh=None,
                       boundary="neumann", adjoint=None):
        LinOp.__init__(self)
        npoints = np.prod(imagedims)
        ndims = len(imagedims)
        self.imagedims = imagedims
        self.imageh = np.ones(ndims) if imageh is None else imageh
        self.bc = boundary
        self.nchannels = nchannels
        self.x = Variable((npoints, nchannels))
        self.y = Variable((npoints, nchannels))
        self._kernel = None

        if self.bc[-4:] != "_adj":
            self.spmat = lplcnopn(self.imagedims, components=self.nchannels,
                                  steps=self.imageh, boundaries=self.bc)

        if self.bc == "neumann":
            self.adjoint = self
        elif self.bc in self.supported_bc:
            if adjoint is None:
                adj_bc = self.bc[:-4]
                if self.bc[-4:] != "_adj":
                    adj_bc = "%s_adj" % self.bc
                self.adjoint = LaplacianOp(imagedims, nchannels, imageh=imageh,
                                           boundary=adj_bc, adjoint=self)
            else:
                self.adjoint = adjoint
        else:
            raise Exception("Unknown boundary conditions: %s" % self.bc)

        if self.bc[-4:] == "_adj":
            self.spmat = self.adjoint.spmat.T
Ejemplo n.º 11
0
    def __init__(self, *args, lbd=1.0, regularizer="tv", alph=np.inf,
                              fdscheme="centered", **kwargs):
        SublabelModel.__init__(self, *args, **kwargs)
        self.lbd = lbd
        self.regularizer = regularizer
        self.alph = alph
        self.fdscheme = fdscheme
        logging.info("Init model '%s' (%s regularizer, lambda=%.2e, "
                                      "alpha=%.2e, fdscheme=%s)" \
                     % (self.name, self.regularizer, self.lbd,
                        self.alph, self.fdscheme))

        imagedims = self.data.imagedims
        N_image = self.data.N_image
        L_labels = self.data.L_labels
        M_tris = self.data.M_tris
        s_gamma = self.data.s_gamma
        d_image = self.data.d_image

        xvars = [('u', (N_image, L_labels)),
                 ('w12', (M_tris, N_image, s_gamma+1)),
                 ('w', (M_tris, N_image, d_image, s_gamma))]
        yvars = [('p', (N_image, d_image, L_labels)),
                 ('q', (N_image, L_labels)),
                 ('v12a', (M_tris, N_image, s_gamma+1)),
                 ('v12b', (M_tris, N_image, s_gamma+1)),
                 ('v3', (N_image,)),]

        if self.regularizer == "tv":
            yvars.append(('g', (M_tris, N_image, d_image, s_gamma)))
        elif self.regularizer == "quadratic":
            yvars.append(('g12', (M_tris, N_image, d_image*s_gamma+1)))

        self.x = Variable(*xvars)
        self.y = Variable(*yvars)
Ejemplo n.º 12
0
class L1NormsConj(Functional):
    """ F(x) = \sum_i \delta_{|x[i,:,:]| \leq lbd}
    Supported norms are 'frobenius' and 'spectral'
    """
    def __init__(self, N, M, lbd, matrixnorm="frobenius", conj=None):
        Functional.__init__(self)
        assert matrixnorm in ['frobenius', 'spectral']
        self.x = Variable((N,) + M)
        self.lbd = lbd
        self.matrixnorm = matrixnorm
        conjnorm = 'nuclear' if matrixnorm == 'spectral' else 'frobenius'
        self.conj = L1Norms(N, M, lbd, conjnorm, conj=self) if conj is None else conj
        self._prox = L1NormsProj(N, M, self.lbd, matrixnorm)
        self._xnorms = np.zeros((N,), order='C')

    def __call__(self, x, grad=False):
        x = self.x.vars(x)[0]
        norms(x, self._xnorms, self.matrixnorm)
        val = 0
        infeas = norm(np.fmax(0, self._xnorms - self.lbd), ord=np.inf)
        result = (val, infeas)
        if grad:
            result = result, self.x.new()
        return result

    def prox(self, tau):
        # independent of tau!
        return self._prox
Ejemplo n.º 13
0
    def __init__(self, *args):
        ModelHARDI_SHM.__init__(self, *args)

        c = self.constvars
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        l_shm = c['l_shm']

        self.x = Variable(
            ('u1', (n_image, l_labels)),
            ('u2', (n_image, l_labels)),
            ('v', (n_image, l_shm)),
        )

        self.y = Variable(
            ('p', (n_image, d_image, l_shm)),
            ('q0', (n_image, )),
            ('q1', (n_image, l_labels)),
            ('q2', (n_image, l_labels)),
        )

        # start with a uniform distribution in each voxel
        self.state = (self.x.new(), self.y.new())
        u1k, u2k, vk = self.x.vars(self.state[0])
        u1k[:] = 1.0 / np.einsum('k->', c['b'])
        vk[:, 0] = .5 / np.sqrt(np.pi)
Ejemplo n.º 14
0
class SplitSum(Functional):
    """ F(x1,x2,...) = F1(x1) + F2(x2) + ... """
    def __init__(self, fcts, conj=None):
        Functional.__init__(self)
        self.x = Variable(*[(F.x.size, ) for F in fcts])
        self.fcts = fcts
        self.conj = SplitSum([F.conj for F in fcts],
                             conj=self) if conj is None else conj

    def __call__(self, x, grad=False):
        X = self.x.vars(x)
        results = [F(xi, grad=grad) for F, xi in zip(self.fcts, X)]
        if grad:
            val = sum([res[0][0] for res in results])
            infeas = sum([res[0][1] for res in results])
            dF = np.concatenate([res[1] for res in results])
            return (val, infeas), dF
        else:
            val = sum([res[0] for res in results])
            infeas = max([res[1] for res in results])
            return (val, infeas)

    def prox(self, tau):
        if type(tau) is np.ndarray:
            tau = self.x.vars(tau)
            prox_ops = []
            for F, Ftau in zip(self.fcts, tau):
                prox_ops.append(F.prox(Ftau))
            return SplitOp(prox_ops)
        else:
            return SplitOp([F.prox(tau) for F in self.fcts])
Ejemplo n.º 15
0
class QuadEpiInd(Functional):
    """ \sum_i \delta_{f_i(x[i,:-1]) \leq x[i,-1]}
        f_i(x) := 0.5*a*|x|^2 + <b[i],x> + c[i]
     """
    def __init__(self, N, M, a=1.0, b=None, c=None, conj=None):
        Functional.__init__(self)
        assert a > 0
        self.x = Variable((N, M + 1))
        self.a = a
        self.b = np.zeros((N, M)) if b is None else b
        self.c = np.zeros((N, )) if c is None else c
        if conj is None:
            da, db, dc = quad_dual_coefficients(self.a, self.b, self.c)
            self.conj = QuadEpiSupp(N, M, a=da, b=db, c=dc, conj=self)
        else:
            self.conj = conj
        self._prox = QuadEpiProj(N, M, alph=a, b=b, c=c)

    def __call__(self, x, grad=False):
        assert not grad
        x = self.x.vars(x)[0]
        fx = (0.5 * self.a * x[:, :-1]**2 +
              self.b * x[:, :-1]).sum(axis=1) + self.c
        dif = fx - x[:, -1]
        val = 0
        infeas = np.linalg.norm(np.fmax(0, dif), ord=np.inf)
        result = (val, infeas)
        if grad:
            result = result, self.x.new()
        return result

    def prox(self, tau):
        # independent of tau!
        return self._prox
Ejemplo n.º 16
0
    def __init__(self, I, If, J, v, b, conj=None):
        """
        Args:
            I : ndarray of bools, shape (nfuns, npoints)
            If : nfuns lists of nregions arrays, shape (nfaces,ndim+1) each
            J : ndarray of ints, shape (nregions, nsubpoints)
            v : ndarray of floats, shape (npoints, ndim)
            b : ndarray of floats, shape (nfuns, npoints)
        """
        Functional.__init__(self)

        nfuns, npoints = I.shape
        nregions, nsubpoints = J.shape
        ndim = v.shape[1]
        self.I, self.J, self.v, self.b = I, J, v, b

        self.x = Variable((nregions, nfuns, ndim + 1))

        if conj is None:
            self.conj = EpigraphSupp(I, If, J, v, b, conj=self)
        else:
            self.conj = conj

        self.A = self.conj.A
        self.b = self.conj.b
        self._prox = EpigraphProj(I, J, v, b, Ab=(self.A, self.b))
Ejemplo n.º 17
0
 def __init__(self, f1, f2, alpha=2.0, tau=1.0):
     Operator.__init__(self)
     self.x = Variable(f1.shape)
     self.y = Variable(f1.shape)
     self.f1 = f1
     self.f2 = f2
     self.alpha = alpha
     self.tau = tau
Ejemplo n.º 18
0
 def __init__(self, M, N=None, adjoint=None):
     LinOp.__init__(self)
     N = M if N is None else N
     self.x = Variable(N)
     self.y = Variable(M)
     self.adjoint = ZeroOp(N, M,
                           adjoint=self) if adjoint is None else adjoint
     self._call_cpu = self._call_gpu = self._call
Ejemplo n.º 19
0
 def __init__(self, N, shift, a, b):
     Operator.__init__(self)
     self.shift = shift
     self.a = a
     self.b = b
     self.x = Variable(N)
     self.y = Variable(N)
     self._jacobian = ScaleOp(N, self.a)
Ejemplo n.º 20
0
 def __init__(self, N, c1=0, c2=0, conj=None):
     Functional.__init__(self)
     self.c1 = c1
     self.c2 = c2
     self.x = Variable(N)
     from opymize.functionals import AffineFct
     self.conj = AffineFct(N, b=-c2, c=c1,
                           conj=self) if conj is None else conj
     self._prox = ConstOp(N, self.x.new() + c1)
Ejemplo n.º 21
0
 def __init__(self, N, A):
     LinOp.__init__(self)
     self.x = Variable((N, A.size))
     self.y = Variable((N, A.size))
     self.A = A
     self.adjoint = self
     self.spmat = einsumop('k,ik->ik', self.A, self.x[0]['shape'])
     self._kernel = None
     self.A_gpu = None
Ejemplo n.º 22
0
 def __init__(self, mask, const):
     Operator.__init__(self)
     self.x = Variable(const.shape)
     self.y = self.x
     self.mask = mask
     self.const = const
     scale = np.ones_like(const)
     scale[mask, :] = 0.0
     self._jacobian = ScaleOp(self.x.size, scale.ravel())
Ejemplo n.º 23
0
 def __init__(self, N, M, lbd, matrixnorm="frobenius", conj=None):
     Functional.__init__(self)
     assert matrixnorm in ['frobenius', 'nuclear']
     self.x = Variable((N,) + M)
     self.lbd = lbd
     self.matrixnorm = matrixnorm
     conjnorm = 'spectral' if matrixnorm == 'nuclear' else 'frobenius'
     self.conj = L1NormsConj(N, M, lbd, conjnorm, conj=self) if conj is None else conj
     self._xnorms = np.zeros((N,), order='C')
Ejemplo n.º 24
0
 def __init__(self, b, shift, a=1, mask=None):
     Operator.__init__(self)
     assert b.size == shift.shape[1]
     self.x = Variable(shift.shape)
     self.y = Variable(shift.shape)
     self.b = b
     self.a = a
     self.shift = shift
     self.mask = np.ones(shift.shape[0],
                         dtype=bool) if mask is None else mask
Ejemplo n.º 25
0
 def __init__(self, mask, c, conj=None):
     Functional.__init__(self)
     self.x = Variable(c.shape)
     self.mask = mask
     self.c = c
     if conj is None:
         from opymize.functionals import MaskedAffineFct
         self.conj = MaskedAffineFct(mask, c, conj=self)
     else:
         self.conj = conj
     self._prox = ConstrainOp(mask, c)
Ejemplo n.º 26
0
 def __init__(self, A, tau, sigma):
     LinOp.__init__(self)
     self.A = A
     self.tau = tau
     self.sigma = sigma
     self.x = Variable((A.x.size,), (A.y.size,))
     self.y = self.x
     self.xtmp = self.x.new()
     self.K = None
     self.H = None
     self.adjoint = SemismoothNewtonSystemAdjoint(self)
Ejemplo n.º 27
0
    def setup_solver_cvx(self):
        c = self.constvars
        imagedims = c['imagedims']
        n_image = c['n_image']
        d_image = c['d_image']
        l_labels = c['l_labels']
        l_shm = c['l_shm']

        self.cvx_x = Variable(
            ('p', (l_shm, d_image, n_image)),
            ('q0', (n_image, )),
            ('q1', (l_labels, n_image)),
            ('q2', (l_labels, n_image)),
        )

        self.cvx_y = Variable(
            ('u1', (n_image, l_labels)),
            ('v', (n_image, l_shm)),
            ('misc', (n_image, )),
        )

        p, q0, q1, q2 = [cvxVariable(*a['shape']) for a in self.cvx_x.vars()]
        self.cvx_vars = p + [q0, q1, q2]

        fid_fun_dual = 0
        for i in range(n_image):
            for k in range(l_labels):
                fid_fun_dual += -1.0/c['b'][k]*(cvx.power(q2[k,i],2)/2 \
                             + cvx.maximum(q2[k,i]*c['b'][k]*c['f1'][i,k],
                                 q2[k,i]*c['b'][k]*c['f2'][i,k]))

        self.cvx_obj = cvx.Maximize(fid_fun_dual - cvx.sum(q0))

        div_op = sparse_div_op(imagedims)

        self.cvx_dual = True
        self.cvx_constr = []

        # u1_constr
        for i in range(n_image):
            self.cvx_constr.append(c['b'] * q0[i] - q1[:, i] >= 0)

        # v_constr
        for i in range(n_image):
            for k in range(l_shm):
                Yk = cvx.vec(c['Y'][:, k])
                self.cvx_constr.append(
                    Yk.T*(c['M'][k]*q2[:,i] + q1[:,i]) \
                        - cvxOp(div_op, p[k], i) == 0)

        # additional inequality constraints
        for i in range(n_image):
            self.cvx_constr.append(sum(cvx.sum_squares(p[k][:,i]) \
                                       for k in range(l_shm)) <= c['lbd']**2)
Ejemplo n.º 28
0
 def __init__(self, data, vol=None, mask=None, conj=None):
     Functional.__init__(self)
     self.f = np.atleast_2d(data)
     self.x = Variable(self.f.shape)
     self.vol = np.ones(data.shape[1]) if vol is None else vol
     self.mask = np.ones(data.shape[0],
                         dtype=bool) if mask is None else mask
     if conj is None:
         self.conj = MaxFctConj(data, weights=vol, mask=mask, conj=self)
     else:
         self.conj = conj
Ejemplo n.º 29
0
 def __init__(self, data, weights=None, mask=None, conj=None):
     Functional.__init__(self)
     self.f = np.atleast_2d(data)
     self.x = Variable(self.f.shape)
     self.weights = np.ones(data.shape[1]) if weights is None else weights
     self.mask = np.ones(data.shape[0],
                         dtype=bool) if mask is None else mask
     if conj is None:
         self.conj = MaxFct(data, vol=vol, mask=mask, conj=self)
     else:
         self.conj = conj
     self._prox = IntervProj(self.weights, self.f, mask=mask)
Ejemplo n.º 30
0
 def __init__(self, f1, f2, vol=None, mask=None, conj=None):
     Functional.__init__(self)
     self.x = Variable(f1.shape)
     self.a = 0.5 * (f1 + f2)
     self.b = 0.5 * (f2 - f1)
     self.vol = np.ones(f1.shape[1]) if vol is None else vol
     self.mask = np.ones(f1.shape[0], dtype=bool) if mask is None else mask
     if conj is None:
         cj_vol = 1.0 / self.vol
         self.conj = BndSSD(f1, f2, vol=cj_vol, mask=mask, conj=self)
     else:
         self.conj = conj