def getDualProduct(self, m, r):
        """
        returns the dual product of a gradient represented by X=r[1] and Y=r[0]
        with a level set function m:

             *Y_i*m_i + X_ij*m_{i,j}*

        :type m: `Data`
        :type r: `ArithmeticTuple`
        :rtype: ``float``
        """
        A = 0
        if not r[0].isEmpty(): A += integrate(inner(r[0], m))
        if not r[1].isEmpty(): A += integrate(inner(r[1], grad(m)))
        return A
    def getDualProduct(self, m, r):
        """
        returns the dual product of a gradient represented by X=r[1] and Y=r[0]
        with a level set function m:

             *Y_i*m_i + X_ij*m_{i,j}*

        :type m: `Data`
        :type r: `ArithmeticTuple`
        :rtype: ``float``
        """
        A=0
        if not r[0].isEmpty(): A+=integrate(inner(r[0], m))
        if not r[1].isEmpty(): A+=integrate(inner(r[1], grad(m)))
        return A
    def _setCoefficients(self, pde, system):
        """sets PDE coefficients"""
        FAC_DIAG = 1.
        FAC_OFFDIAG = -0.4
        x = Solution(self.domain).getX()
        mask = whereZero(x[0])
        dim = self.domain.getDim()
        u_ex = self._getSolution(system)
        g_ex = self._getGrad(system)

        if system:
            A = Tensor4(0., Function(self.domain))
            for i in range(dim):
                A[i,:,i,:] = kronecker(dim)

            Y = Vector(0., Function(self.domain))
            if dim == 2:
                Y[0] = u_ex[0]*FAC_DIAG+u_ex[1]*FAC_OFFDIAG
                Y[1] = u_ex[1]*FAC_DIAG+u_ex[0]*FAC_OFFDIAG
            else:
                Y[0] = u_ex[0]*FAC_DIAG+u_ex[2]*FAC_OFFDIAG+u_ex[1]*FAC_OFFDIAG
                Y[1] = u_ex[1]*FAC_DIAG+u_ex[0]*FAC_OFFDIAG+u_ex[2]*FAC_OFFDIAG
                Y[2] = u_ex[2]*FAC_DIAG+u_ex[1]*FAC_OFFDIAG+u_ex[0]*FAC_OFFDIAG
            pde.setValue(r=u_ex, q=mask*numpy.ones(dim,),
                         A=A,
                         D=kronecker(dim)*(FAC_DIAG-FAC_OFFDIAG)+numpy.ones((dim,dim))*FAC_OFFDIAG,
                         Y=Y,
                         y=matrixmult(g_ex,self.domain.getNormal()))
        else:
            pde.setValue(r=u_ex, q=mask, A=kronecker(dim),
                         y=inner(g_ex, self.domain.getNormal()))
    def setCoefficients(self, pde, system):
        """sets PDE coefficients"""        
        FAC_DIAG = self.FAC_DIAG
        FAC_OFFDIAG =self.FAC_OFFDIAG
        x = Solution(self.domain).getX()
        mask = whereZero(x[0])
        dim = self.domain.getDim()
        u_ex = self.getSolution(system)
        g_ex = self.getGrad(system)

        if system:
            A = Tensor4(0., Function(self.domain))
            for i in range(dim):
                A[i,:,i,:] = kronecker(dim)

            Y = Vector(0., Function(self.domain))
            if dim == 2:
                Y[0] = u_ex[0]*FAC_DIAG+u_ex[1]*FAC_OFFDIAG-20
                Y[1] = u_ex[1]*FAC_DIAG+u_ex[0]*FAC_OFFDIAG-10
            else:
                Y[0] = u_ex[0]*FAC_DIAG+u_ex[2]*FAC_OFFDIAG+u_ex[1]*FAC_OFFDIAG-60
                Y[1] = u_ex[1]*FAC_DIAG+u_ex[0]*FAC_OFFDIAG+u_ex[2]*FAC_OFFDIAG-20
                Y[2] = u_ex[2]*FAC_DIAG+u_ex[1]*FAC_OFFDIAG+u_ex[0]*FAC_OFFDIAG-22
            pde.setValue(r=u_ex, q=mask*numpy.ones(dim,),
                         A=A,
                         D=kronecker(dim)*(FAC_DIAG-FAC_OFFDIAG)+numpy.ones((dim,dim))*FAC_OFFDIAG,
                         Y=Y,
                         y=matrixmult(g_ex,self.domain.getNormal()))
        else:
            pde.setValue(r=u_ex, q=mask, A=kronecker(dim),
                         y=inner(g_ex, self.domain.getNormal()))
            if dim == 2:
                pde.setValue(Y=-20.)
            else:
                pde.setValue(Y=-60.)
    def getValue(self, m, grad_m):
        """
        returns the value of the cost function J with respect to m.
        This equation is specified in the inversion cookbook.

        :rtype: ``float``
        """

        if m != self.__pre_input:
            raise RuntimeError("Attempt to change point using getValue")
        # substituting cached values
        m = self.__pre_input
        grad_m = self.__pre_args

        mu = self.__mu
        mu_c = self.__mu_c
        DIM = self.getDomain().getDim()
        numLS = self.getNumLevelSets()

        A = 0
        if self.__w0 is not None:
            r = inner(integrate(m ** 2 * self.__w0), mu)
            self.logger.debug("J_R[m^2] = %e" % r)
            A += r

        if self.__w1 is not None:
            if numLS == 1:
                r = integrate(inner(grad_m ** 2, self.__w1)) * mu
                self.logger.debug("J_R[grad(m)] = %e" % r)
                A += r
            else:
                for k in range(numLS):
                    r = mu[k] * integrate(inner(grad_m[k, :] ** 2, self.__w1[k, :]))
                    self.logger.debug("J_R[grad(m)][%d] = %e" % (k, r))
                    A += r

        if numLS > 1:
            for k in range(numLS):
                gk = grad_m[k, :]
                len_gk = length(gk)
                for l in range(k):
                    gl = grad_m[l, :]
                    r = mu_c[l, k] * integrate(self.__wc[l, k] * ((len_gk * length(gl)) ** 2 - inner(gk, gl) ** 2))
                    self.logger.debug("J_R[cross][%d,%d] = %e" % (l, k, r))
                    A += r
        return A / 2
Exemple #6
0
 def G(self, T, alpha):
     """
        tangential operator for taylor galerikin
        """
     g = grad(T)
     self.__pde.setValue(X=-self.thermal_permabilty*g, \
                         Y=self.thermal_source-self.__rhocp*inner(self.velocity,g), \
                         r=(self.__fixed_T-self.temperature)*alpha,\
                         q=self.location_fixed_temperature)
     return self.__pde.getSolution()
Exemple #7
0
 def G(self,T,alpha):
     """
     tangential operator for taylor galerikin
     """
     g=grad(T)
     self.__pde.setValue(X=-self.thermal_permabilty*g, \
                         Y=self.thermal_source-self.__rhocp*inner(self.velocity,g), \
                         r=(self.__fixed_T-self.temperature)*alpha,\
                         q=self.location_fixed_temperature)
     return self.__pde.getSolution()
    def getValue(self, m, grad_m):
        """
        returns the value of the cost function J with respect to m.
        This equation is specified in the inversion cookbook.

        :rtype: ``float``
        """
        mu = self.__mu
        mu_c = self.__mu_c
        DIM = self.getDomain().getDim()
        numLS = self.getNumLevelSets()

        A = 0
        if self.__w0 is not None:
            r = inner(integrate(m**2 * self.__w0), mu)
            self.logger.debug("J_R[m^2] = %e" % r)
            A += r

        if self.__w1 is not None:
            if numLS == 1:
                r = integrate(inner(grad_m**2, self.__w1)) * mu
                self.logger.debug("J_R[grad(m)] = %e" % r)
                A += r
            else:
                for k in range(numLS):
                    r = mu[k] * integrate(
                        inner(grad_m[k, :]**2, self.__w1[k, :]))
                    self.logger.debug("J_R[grad(m)][%d] = %e" % (k, r))
                    A += r

        if numLS > 1:
            for k in range(numLS):
                gk = grad_m[k, :]
                len_gk = length(gk)
                for l in range(k):
                    gl = grad_m[l, :]
                    r = mu_c[l, k] * integrate(self.__wc[l, k] * (
                        (len_gk * length(gl))**2 - inner(gk, gl)**2))
                    self.logger.debug("J_R[cross][%d,%d] = %e" % (l, k, r))
                    A += r
        return A / 2
    def getGradientAtPoint(self):
        """
        returns the gradient of the cost function J with respect to m.

        :note: This implementation returns Y_k=dPsi/dm_k and X_kj=dPsi/dm_kj
        """

        # Using cached values
        m = self.__pre_input
        grad_m = self.__pre_args

        mu = self.__mu
        mu_c = self.__mu_c
        DIM = self.getDomain().getDim()
        numLS = self.getNumLevelSets()

        grad_m = grad(m, Function(m.getDomain()))
        if self.__w0 is not None:
            Y = m * self.__w0 * mu
        else:
            if numLS == 1:
                Y = Scalar(0, grad_m.getFunctionSpace())
            else:
                Y = Data(0, (numLS,), grad_m.getFunctionSpace())

        if self.__w1 is not None:

            if numLS == 1:
                X = grad_m * self.__w1 * mu
            else:
                X = grad_m * self.__w1
                for k in range(numLS):
                    X[k, :] *= mu[k]
        else:
            X = Data(0, grad_m.getShape(), grad_m.getFunctionSpace())

        # cross gradient terms:
        if numLS > 1:
            for k in range(numLS):
                grad_m_k = grad_m[k, :]
                l2_grad_m_k = length(grad_m_k) ** 2
                for l in range(k):
                    grad_m_l = grad_m[l, :]
                    l2_grad_m_l = length(grad_m_l) ** 2
                    grad_m_lk = inner(grad_m_l, grad_m_k)
                    f = mu_c[l, k] * self.__wc[l, k]
                    X[l, :] += f * (l2_grad_m_k * grad_m_l - grad_m_lk * grad_m_k)
                    X[k, :] += f * (l2_grad_m_l * grad_m_k - grad_m_lk * grad_m_l)

        return ArithmeticTuple(Y, X)
    def getGradient(self, m, grad_m):
        """
        returns the gradient of the cost function J with respect to m.

        :note: This implementation returns Y_k=dPsi/dm_k and X_kj=dPsi/dm_kj
        """

        mu = self.__mu
        mu_c = self.__mu_c
        DIM = self.getDomain().getDim()
        numLS = self.getNumLevelSets()

        grad_m = grad(m, Function(m.getDomain()))
        if self.__w0 is not None:
            Y = m * self.__w0 * mu
        else:
            if numLS == 1:
                Y = Scalar(0, grad_m.getFunctionSpace())
            else:
                Y = Data(0, (numLS, ), grad_m.getFunctionSpace())

        if self.__w1 is not None:

            if numLS == 1:
                X = grad_m * self.__w1 * mu
            else:
                X = grad_m * self.__w1
                for k in range(numLS):
                    X[k, :] *= mu[k]
        else:
            X = Data(0, grad_m.getShape(), grad_m.getFunctionSpace())

        # cross gradient terms:
        if numLS > 1:
            for k in range(numLS):
                grad_m_k = grad_m[k, :]
                l2_grad_m_k = length(grad_m_k)**2
                for l in range(k):
                    grad_m_l = grad_m[l, :]
                    l2_grad_m_l = length(grad_m_l)**2
                    grad_m_lk = inner(grad_m_l, grad_m_k)
                    f = mu_c[l, k] * self.__wc[l, k]
                    X[l, :] += f * (l2_grad_m_k * grad_m_l -
                                    grad_m_lk * grad_m_k)
                    X[k, :] += f * (l2_grad_m_l * grad_m_k -
                                    grad_m_lk * grad_m_l)

        return ArithmeticTuple(Y, X)
Exemple #11
0
    def getGradient(self, sigma, u, uTar, uTai, uTu):
        """
        Returns the gradient of the defect with respect to density.

        :param sigma: a suggestion for complex 1/V**2
        :type sigma: ``escript.Data`` of shape (2,)
        :param u: a u vector
        :type u: ``escript.Data`` of shape (2,)
        :param uTar: equals `integrate( w  * (data[0]*u[0]+data[1]*u[1]))`
        :type uTar: `float`
        :param uTai: equals `integrate( w  * (data[1]*u[0]-data[0]*u[1]))`
        :type uTa: `float`
        :param uTu: equals `integrate( w  * (u,u))`
        :type uTu: `float`
        """
        pde = self.setUpPDE()

        if self.scaleF and abs(uTu) > 0:
            Z = ((uTar**2 + uTai**2) / uTu**2) * escript.interpolate(
                u, self.__data.getFunctionSpace())
            Z[0] += (-uTar / uTu) * self.__data[0] + (-uTai /
                                                      uTu) * self.__data[1]
            Z[1] += (-uTar /
                     uTu) * self.__data[1] + uTai / uTu * self.__data[0]

        else:
            Z = u - self.__data
        if Z.getFunctionSpace() == escript.DiracDeltaFunctions(
                self.getDomain()):
            pde.setValue(y_dirac=self.__weight * Z)
        else:
            pde.setValue(y=self.__weight * Z)
        D = pde.getCoefficient('D')
        D[0, 0] = -self.__omega**2 * sigma[0]
        D[0, 1] = -self.__omega**2 * sigma[1]
        D[1, 0] = self.__omega**2 * sigma[1]
        D[1, 1] = -self.__omega**2 * sigma[0]
        pde.setValue(D=D)
        ZTo2 = pde.getSolution() * self.__omega**2
        return escript.inner(
            ZTo2, u) * [1, 0] + (ZTo2[1] * u[0] - ZTo2[0] * u[1]) * [0, 1]
 def calculateGradientWorker(self, **args):
    """
    vnames1 gives the names to store the first component of the gradient in
    vnames2 gives the names to store the second component of the gradient in
    """
    vnames1=args['vnames1']
    vnames2=args['vnames2']
    props=self.importValue("props")
    mods=self.importValue("fwdmodels")
    reg=self.importValue("regularization")
    mu_model=self.importValue("mu_model")
    mappings=self.importValue("mappings")
    m=self.importValue("current_point")
    
    model_args=SplitInversionCostFunction.getModelArgs(self, mods)
    
    g_J = reg.getGradientAtPoint()
    p_diffs=[]
    # Find the derivative for each mapping
    # If a mapping has a list of components (idx), then make a new Data object with only those
    # components, pass it to the mapping and get the derivative.
    for i in range(len(mappings)):
        mm, idx=mappings[i]
        if idx and numLevelSets > 1:
            if len(idx)>1:
                m2=Data(0,(len(idx),),m.getFunctionSpace())
                for k in range(len(idx)): m2[k]=m[idx[k]]
                dpdm = mm.getDerivative(m2)
            else:
                dpdm = mm.getDerivative(m[idx[0]])
        else:
            dpdm = mm.getDerivative(m)
        p_diffs.append(dpdm)
    #Since we are going to be merging Y with other worlds, we need to make sure the the regularization
    #component is only added once.  However most of the ops below are in terms of += so we need to
    #create a zero object to use as a starting point
    if self.swid==0:
       Y=g_J[0]    # Because g_J==(Y,X)  Y_k=dKer/dm_k
    else:
       Y=Data(0, g_J[0].getShape(), g_J[0].getFunctionSpace())
    for i in range(len(mods)):
        mu=mu_model[i]
        f, idx_f=mods[i]
        args=tuple( [ props[k] for k in idx_f]  + list( model_args[i] ) )
        Ys = f.getGradient(*args) # this d Jf/d props
        # in this case f depends on one parameter props only but this can
        # still depend on several level set components
        if Ys.getRank() == 0:
            # run through all level sets k prop j is depending on:
            idx_m=mappings[idx_f[0]][1]
            # tmp[k] = dJ_f/d_prop * d prop/d m[idx_m[k]]
            tmp=Ys * p_diffs[idx_f[0]] * mu
            if idx_m:
                if tmp.getRank()== 0:
                    for k in range(len(idx_m)):
                        Y[idx_m[k]]+=tmp # dJ_f /d m[idx_m[k]] = tmp
                else:
                    for k in range(len(idx_m)):
                        Y[idx_m[k]]+=tmp[k] # dJ_f /d m[idx_m[k]] = tmp[k]
            else:
                Y+=tmp # dJ_f /d m[idx_m[k]] = tmp
        else:
            s=0
            # run through all props j forward model f is depending on:
            for j in range(len(idx_f)):
                # run through all level sets k prop j is depending on:
                idx_m=mappings[j][1]
                if p_diffs[idx_f[j]].getRank() == 0 :
                    if idx_m: # this case is not needed (really?)
                        raise RuntimeError("something wrong A")
                        # tmp[k] = dJ_f/d_prop[j] * d prop[j]/d m[idx_m[k]]
                        tmp=Ys[s]*p_diffs[idx_f[j]] * mu
                        for k in range(len(idx_m)):
                            Y[idx_m[k]]+=tmp[k] # dJ_f /d m[idx_m[k]] = tmp[k]
                    else:
                        Y+=Ys[s]*p_diffs[idx_f[j]] * mu
                    s+=1
                elif p_diffs[idx_f[j]].getRank() == 1 :
                    l=p_diffs[idx_f[j]].getShape()[0]
                    # tmp[k]=sum_j dJ_f/d_prop[j] * d prop[j]/d m[idx_m[k]]
                    tmp=inner(Ys[s:s+l], p_diffs[idx_f[j]]) * mu
                    if idx_m:
                        for k in range(len(idx_m)):
                            Y[idx_m[k]]+=tmp # dJ_f /d m[idx_m[k]] = tmp[k]
                    else:
                        Y+=tmp
                    s+=l
                else: # rank 2 case
                    l=p_diffs[idx_f[j]].getShape()[0]
                    Yss=Ys[s:s+l]
                    if idx_m:
                        for k in range(len(idx_m)):
                            # dJ_f /d m[idx_m[k]] = tmp[k]
                            Y[idx_m[k]]+=inner(Yss, p_diffs[idx_f[j]][:,k])
                    else:
                        Y+=inner(Yss, p_diffs[idx_f[j]]) * mu
                    s+=l    
    if isinstance(vnames1, str):
      self.exportValue(vnames1, Y)
    else:
      for n in vnames1:
        self.exportValue(n, Y)
    if isinstance(vnames2, str):          #The second component should be strictly local 
      self.exportValue(vnames2, g_J[1])
    else:
      for n in vnames2:
        self.exportValue(n, g_J[1])              
    def __init__(self,
                 domain,
                 numLevelSets=1,
                 w0=None,
                 w1=None,
                 wc=None,
                 location_of_set_m=Data(),
                 useDiagonalHessianApproximation=False,
                 tol=1e-8,
                 coordinates=None,
                 scale=None,
                 scale_c=None):
        """
        initialization.

        :param domain: domain
        :type domain: `Domain`
        :param numLevelSets: number of level sets
        :type numLevelSets: ``int``
        :param w0: weighting factor for the m**2 term. If not set zero is assumed.
        :type w0: ``Scalar`` if ``numLevelSets`` == 1 or `Data` object of shape
                  (``numLevelSets`` ,) if ``numLevelSets`` > 1
        :param w1: weighting factor for the grad(m_i) terms. If not set zero is assumed
        :type w1: ``Vector`` if ``numLevelSets`` == 1 or `Data` object of shape
                  (``numLevelSets`` , DIM) if ``numLevelSets`` > 1
        :param wc: weighting factor for the cross gradient terms. If not set
                   zero is assumed. Used for the case if ``numLevelSets`` > 1
                   only. Only values ``wc[l,k]`` in the lower triangle (l<k)
                   are used.
        :type wc: `Data` object of shape (``numLevelSets`` , ``numLevelSets``)
        :param location_of_set_m: marks location of zero values of the level
                                  set function ``m`` by a positive entry.
        :type location_of_set_m: ``Scalar`` if ``numLevelSets`` == 1 or `Data`
                object of shape (``numLevelSets`` ,) if ``numLevelSets`` > 1
        :param useDiagonalHessianApproximation: if True cross gradient terms
                    between level set components are ignored when calculating
                    approximations of the inverse of the Hessian Operator.
                    This can speed-up the calculation of the inverse but may
                    lead to an increase of the number of iteration steps in the
                    inversion.
        :type useDiagonalHessianApproximation: ``bool``
        :param tol: tolerance when solving the PDE for the inverse of the
                    Hessian Operator
        :type tol: positive ``float``

        :param coordinates: defines coordinate system to be used
        :type coordinates: ReferenceSystem` or `SpatialCoordinateTransformation`
        :param scale: weighting factor for level set function variation terms.
                      If not set one is used.
        :type scale: ``Scalar`` if ``numLevelSets`` == 1 or `Data` object of
                     shape (``numLevelSets`` ,) if ``numLevelSets`` > 1
        :param scale_c: scale for the cross gradient terms. If not set
                   one is assumed. Used for the case if ``numLevelSets`` > 1
                   only. Only values ``scale_c[l,k]`` in the lower triangle
                   (l<k) are used.
        :type scale_c: `Data` object of shape (``numLevelSets``,``numLevelSets``)

        """
        if w0 is None and w1 is None:
            raise ValueError("Values for w0 or for w1 must be given.")
        if wc is None and numLevelSets > 1:
            raise ValueError("Values for wc must be given.")

        self.logger = logging.getLogger('inv.%s' % self.__class__.__name__)
        self.__domain = domain
        DIM = self.__domain.getDim()
        self.__numLevelSets = numLevelSets
        self.__trafo = makeTransformation(domain, coordinates)
        self.__pde = LinearPDE(self.__domain,
                               numEquations=self.__numLevelSets,
                               numSolutions=self.__numLevelSets)
        self.__pde.getSolverOptions().setTolerance(tol)
        self.__pde.setSymmetryOn()
        self.__pde.setValue(
            A=self.__pde.createCoefficient('A'),
            D=self.__pde.createCoefficient('D'),
        )
        try:
            self.__pde.setValue(q=location_of_set_m)
        except IllegalCoefficientValue:
            raise ValueError(
                "Unable to set location of fixed level set function.")

        # =========== check the shape of the scales: ========================
        if scale is None:
            if numLevelSets == 1:
                scale = 1.
            else:
                scale = np.ones((numLevelSets, ))
        else:
            scale = np.asarray(scale)
            if numLevelSets == 1:
                if scale.shape == ():
                    if not scale > 0:
                        raise ValueError("Value for scale must be positive.")
                else:
                    raise ValueError("Unexpected shape %s for scale." %
                                     scale.shape)
            else:
                if scale.shape is (numLevelSets, ):
                    if not min(scale) > 0:
                        raise ValueError(
                            "All values for scale must be positive.")
                else:
                    raise ValueError("Unexpected shape %s for scale." %
                                     scale.shape)

        if scale_c is None or numLevelSets < 2:
            scale_c = np.ones((numLevelSets, numLevelSets))
        else:
            scale_c = np.asarray(scale_c)
            if scale_c.shape == (numLevelSets, numLevelSets):
                if not all([[scale_c[l, k] > 0. for l in range(k)]
                            for k in range(1, numLevelSets)]):
                    raise ValueError(
                        "All values in the lower triangle of scale_c must be positive."
                    )
            else:
                raise ValueError("Unexpected shape %s for scale." %
                                 scale_c.shape)
        # ===== check the shape of the weights: =============================
        if w0 is not None:
            w0 = interpolate(w0,
                             self.__pde.getFunctionSpaceForCoefficient('D'))
            s0 = w0.getShape()
            if numLevelSets == 1:
                if not s0 == ():
                    raise ValueError("Unexpected shape %s for weight w0." %
                                     (s0, ))
            else:
                if not s0 == (numLevelSets, ):
                    raise ValueError("Unexpected shape %s for weight w0." %
                                     (s0, ))
            if not self.__trafo.isCartesian():
                w0 *= self.__trafo.getVolumeFactor()
        if not w1 is None:
            w1 = interpolate(w1,
                             self.__pde.getFunctionSpaceForCoefficient('A'))
            s1 = w1.getShape()
            if numLevelSets == 1:
                if not s1 == (DIM, ):
                    raise ValueError("Unexpected shape %s for weight w1." %
                                     (s1, ))
            else:
                if not s1 == (numLevelSets, DIM):
                    raise ValueError("Unexpected shape %s for weight w1." %
                                     (s1, ))
            if not self.__trafo.isCartesian():
                f = self.__trafo.getScalingFactors(
                )**2 * self.__trafo.getVolumeFactor()
                if numLevelSets == 1:
                    w1 *= f
                else:
                    for i in range(numLevelSets):
                        w1[i, :] *= f

        if numLevelSets == 1:
            wc = None
        else:
            wc = interpolate(wc,
                             self.__pde.getFunctionSpaceForCoefficient('A'))
            sc = wc.getShape()
            if not sc == (numLevelSets, numLevelSets):
                raise ValueError("Unexpected shape %s for weight wc." % (sc, ))
            if not self.__trafo.isCartesian():
                raise ValueError(
                    "Non-cartesian coordinates for cross-gradient term is not supported yet."
                )
        # ============= now we rescale weights: =============================
        L2s = np.asarray(boundingBoxEdgeLengths(domain))**2
        L4 = 1 / np.sum(1 / L2s)**2
        if numLevelSets == 1:
            A = 0
            if w0 is not None:
                A = integrate(w0)
            if w1 is not None:
                A += integrate(inner(w1, 1 / L2s))
            if A > 0:
                f = scale / A
                if w0 is not None:
                    w0 *= f
                if w1 is not None:
                    w1 *= f
            else:
                raise ValueError("Non-positive weighting factor detected.")
        else:  # numLevelSets > 1
            for k in range(numLevelSets):
                A = 0
                if w0 is not None:
                    A = integrate(w0[k])
                if w1 is not None:
                    A += integrate(inner(w1[k, :], 1 / L2s))
                if A > 0:
                    f = scale[k] / A
                    if w0 is not None:
                        w0[k] *= f
                    if w1 is not None:
                        w1[k, :] *= f
                else:
                    raise ValueError(
                        "Non-positive weighting factor for level set component %d detected."
                        % k)

                # and now the cross-gradient:
                if wc is not None:
                    for l in range(k):
                        A = integrate(wc[l, k]) / L4
                        if A > 0:
                            f = scale_c[l, k] / A
                            wc[l, k] *= f
#                       else:
#                           raise ValueError("Non-positive weighting factor for cross-gradient level set components %d and %d detected."%(l,k))

        self.__w0 = w0
        self.__w1 = w1
        self.__wc = wc

        self.__pde_is_set = False
        if self.__numLevelSets > 1:
            self.__useDiagonalHessianApproximation = useDiagonalHessianApproximation
        else:
            self.__useDiagonalHessianApproximation = True
        self._update_Hessian = True

        self.__num_tradeoff_factors = numLevelSets + (
            (numLevelSets - 1) * numLevelSets) // 2
        self.setTradeOffFactors()
        self.__vol_d = vol(self.__domain)
Exemple #14
0
    def getInverseHessianApproximationAtPoint(self, r, solve=True):
        """
        """

        # substituting cached values
        m = self.__pre_input
        grad_m = self.__pre_args

        if self._new_mu or self._update_Hessian:
            self._new_mu = False
            self._update_Hessian = False
            mu = self.__mu
            mu_c = self.__mu_c

            DIM = self.getDomain().getDim()
            numLS = self.getNumLevelSets()
            if self.__w0 is not None:
                if numLS == 1:
                    D = self.__w0 * mu
                else:
                    D = self.getPDE().getCoefficient("D")
                    D.setToZero()
                    for k in range(numLS):
                        D[k, k] = self.__w0[k] * mu[k]
                self.getPDE().setValue(D=D)

            A = self.getPDE().getCoefficient("A")
            A.setToZero()
            if self.__w1 is not None:
                if numLS == 1:
                    for i in range(DIM):
                        A[i, i] = self.__w1[i] * mu
                else:
                    for k in range(numLS):
                        for i in range(DIM):
                            A[k, i, k, i] = self.__w1[k, i] * mu[k]

            if numLS > 1:
                # this could be make faster by creating caches for grad_m_k, l2_grad_m_k  and o_kk
                for k in range(numLS):
                    grad_m_k = grad_m[k, :]
                    l2_grad_m_k = escript.length(grad_m_k)**2
                    o_kk = escript.outer(grad_m_k, grad_m_k)
                    for l in range(k):
                        grad_m_l = grad_m[l, :]
                        l2_grad_m_l = escript.length(grad_m_l)**2
                        i_lk = escript.inner(grad_m_l, grad_m_k)
                        o_lk = escript.outer(grad_m_l, grad_m_k)
                        o_kl = escript.outer(grad_m_k, grad_m_l)
                        o_ll = escript.outer(grad_m_l, grad_m_l)
                        f = mu_c[l, k] * self.__wc[l, k]
                        Z = f * (2 * o_lk - o_kl -
                                 i_lk * escript.kronecker(DIM))
                        A[l, :,
                          l, :] += f * (l2_grad_m_k * escript.kronecker(DIM) -
                                        o_kk)
                        A[l, :, k, :] += Z
                        A[k, :, l, :] += escript.transpose(Z)
                        A[k, :,
                          k, :] += f * (l2_grad_m_l * escript.kronecker(DIM) -
                                        o_ll)
            self.getPDE().setValue(A=A)
        #self.getPDE().resetRightHandSideCoefficients()
        #self.getPDE().setValue(X=r[1])
        #print "X only: ",self.getPDE().getSolution()
        #self.getPDE().resetRightHandSideCoefficients()
        #self.getPDE().setValue(Y=r[0])
        #print "Y only: ",self.getPDE().getSolution()

        self.getPDE().resetRightHandSideCoefficients()
        self.getPDE().setValue(X=r[1], Y=r[0])
        if not solve:
            return self.getPDE()
        return self.getPDE().getSolution()
    def __init__(
        self,
        domain,
        numLevelSets=1,
        w0=None,
        w1=None,
        wc=None,
        location_of_set_m=Data(),
        useDiagonalHessianApproximation=False,
        tol=1e-8,
        coordinates=None,
        scale=None,
        scale_c=None,
    ):
        """
        initialization.

        :param domain: domain
        :type domain: `Domain`
        :param numLevelSets: number of level sets
        :type numLevelSets: ``int``
        :param w0: weighting factor for the m**2 term. If not set zero is assumed.
        :type w0: ``Scalar`` if ``numLevelSets`` == 1 or `Data` object of shape
                  (``numLevelSets`` ,) if ``numLevelSets`` > 1
        :param w1: weighting factor for the grad(m_i) terms. If not set zero is assumed
        :type w1: ``Vector`` if ``numLevelSets`` == 1 or `Data` object of shape
                  (``numLevelSets`` , DIM) if ``numLevelSets`` > 1
        :param wc: weighting factor for the cross gradient terms. If not set
                   zero is assumed. Used for the case if ``numLevelSets`` > 1
                   only. Only values ``wc[l,k]`` in the lower triangle (l<k)
                   are used.
        :type wc: `Data` object of shape (``numLevelSets`` , ``numLevelSets``)
        :param location_of_set_m: marks location of zero values of the level
                                  set function ``m`` by a positive entry.
        :type location_of_set_m: ``Scalar`` if ``numLevelSets`` == 1 or `Data`
                object of shape (``numLevelSets`` ,) if ``numLevelSets`` > 1
        :param useDiagonalHessianApproximation: if True cross gradient terms
                    between level set components are ignored when calculating
                    approximations of the inverse of the Hessian Operator.
                    This can speed-up the calculation of the inverse but may
                    lead to an increase of the number of iteration steps in the
                    inversion.
        :type useDiagonalHessianApproximation: ``bool``
        :param tol: tolerance when solving the PDE for the inverse of the
                    Hessian Operator
        :type tol: positive ``float``

        :param coordinates: defines coordinate system to be used
        :type coordinates: ReferenceSystem` or `SpatialCoordinateTransformation`
        :param scale: weighting factor for level set function variation terms.
                      If not set one is used.
        :type scale: ``Scalar`` if ``numLevelSets`` == 1 or `Data` object of
                     shape (``numLevelSets`` ,) if ``numLevelSets`` > 1
        :param scale_c: scale for the cross gradient terms. If not set
                   one is assumed. Used for the case if ``numLevelSets`` > 1
                   only. Only values ``scale_c[l,k]`` in the lower triangle
                   (l<k) are used.
        :type scale_c: `Data` object of shape (``numLevelSets``,``numLevelSets``)

        """
        if w0 is None and w1 is None:
            raise ValueError("Values for w0 or for w1 must be given.")
        if wc is None and numLevelSets > 1:
            raise ValueError("Values for wc must be given.")

        self.__pre_input = None
        self.__pre_args = None
        self.logger = logging.getLogger("inv.%s" % self.__class__.__name__)
        self.__domain = domain
        DIM = self.__domain.getDim()
        self.__numLevelSets = numLevelSets
        self.__trafo = makeTransformation(domain, coordinates)
        self.__pde = LinearPDE(self.__domain, numEquations=self.__numLevelSets, numSolutions=self.__numLevelSets)
        self.__pde.getSolverOptions().setTolerance(tol)
        self.__pde.setSymmetryOn()
        self.__pde.setValue(A=self.__pde.createCoefficient("A"), D=self.__pde.createCoefficient("D"))
        try:
            self.__pde.setValue(q=location_of_set_m)
        except IllegalCoefficientValue:
            raise ValueError("Unable to set location of fixed level set function.")

        # =========== check the shape of the scales: ========================
        if scale is None:
            if numLevelSets == 1:
                scale = 1.0
            else:
                scale = np.ones((numLevelSets,))
        else:
            scale = np.asarray(scale)
            if numLevelSets == 1:
                if scale.shape == ():
                    if not scale > 0:
                        raise ValueError("Value for scale must be positive.")
                else:
                    raise ValueError("Unexpected shape %s for scale." % scale.shape)
            else:
                if scale.shape is (numLevelSets,):
                    if not min(scale) > 0:
                        raise ValueError("All values for scale must be positive.")
                else:
                    raise ValueError("Unexpected shape %s for scale." % scale.shape)

        if scale_c is None or numLevelSets < 2:
            scale_c = np.ones((numLevelSets, numLevelSets))
        else:
            scale_c = np.asarray(scale_c)
            if scale_c.shape == (numLevelSets, numLevelSets):
                if not all([[scale_c[l, k] > 0.0 for l in range(k)] for k in range(1, numLevelSets)]):
                    raise ValueError("All values in the lower triangle of scale_c must be positive.")
            else:
                raise ValueError("Unexpected shape %s for scale." % scale_c.shape)
        # ===== check the shape of the weights: =============================
        if w0 is not None:
            w0 = interpolate(w0, self.__pde.getFunctionSpaceForCoefficient("D"))
            s0 = w0.getShape()
            if numLevelSets == 1:
                if not s0 == ():
                    raise ValueError("Unexpected shape %s for weight w0." % (s0,))
            else:
                if not s0 == (numLevelSets,):
                    raise ValueError("Unexpected shape %s for weight w0." % (s0,))
            if not self.__trafo.isCartesian():
                w0 *= self.__trafo.getVolumeFactor()
        if not w1 is None:
            w1 = interpolate(w1, self.__pde.getFunctionSpaceForCoefficient("A"))
            s1 = w1.getShape()
            if numLevelSets == 1:
                if not s1 == (DIM,):
                    raise ValueError("Unexpected shape %s for weight w1." % (s1,))
            else:
                if not s1 == (numLevelSets, DIM):
                    raise ValueError("Unexpected shape %s for weight w1." % (s1,))
            if not self.__trafo.isCartesian():
                f = self.__trafo.getScalingFactors() ** 2 * self.__trafo.getVolumeFactor()
                if numLevelSets == 1:
                    w1 *= f
                else:
                    for i in range(numLevelSets):
                        w1[i, :] *= f

        if numLevelSets == 1:
            wc = None
        else:
            wc = interpolate(wc, self.__pde.getFunctionSpaceForCoefficient("A"))
            sc = wc.getShape()
            if not sc == (numLevelSets, numLevelSets):
                raise ValueError("Unexpected shape %s for weight wc." % (sc,))
            if not self.__trafo.isCartesian():
                raise ValueError("Non-cartesian coordinates for cross-gradient term is not supported yet.")
        # ============= now we rescale weights: =============================
        L2s = np.asarray(boundingBoxEdgeLengths(domain)) ** 2
        L4 = 1 / np.sum(1 / L2s) ** 2
        if numLevelSets == 1:
            A = 0
            if w0 is not None:
                A = integrate(w0)
            if w1 is not None:
                A += integrate(inner(w1, 1 / L2s))
            if A > 0:
                f = scale / A
                if w0 is not None:
                    w0 *= f
                if w1 is not None:
                    w1 *= f
            else:
                raise ValueError("Non-positive weighting factor detected.")
        else:  # numLevelSets > 1
            for k in range(numLevelSets):
                A = 0
                if w0 is not None:
                    A = integrate(w0[k])
                if w1 is not None:
                    A += integrate(inner(w1[k, :], 1 / L2s))
                if A > 0:
                    f = scale[k] / A
                    if w0 is not None:
                        w0[k] *= f
                    if w1 is not None:
                        w1[k, :] *= f
                else:
                    raise ValueError("Non-positive weighting factor for level set component %d detected." % k)

                # and now the cross-gradient:
                if wc is not None:
                    for l in range(k):
                        A = integrate(wc[l, k]) / L4
                        if A > 0:
                            f = scale_c[l, k] / A
                            wc[l, k] *= f
        #                       else:
        #                           raise ValueError("Non-positive weighting factor for cross-gradient level set components %d and %d detected."%(l,k))

        self.__w0 = w0
        self.__w1 = w1
        self.__wc = wc

        self.__pde_is_set = False
        if self.__numLevelSets > 1:
            self.__useDiagonalHessianApproximation = useDiagonalHessianApproximation
        else:
            self.__useDiagonalHessianApproximation = True
        self._update_Hessian = True

        self.__num_tradeoff_factors = numLevelSets + ((numLevelSets - 1) * numLevelSets) // 2
        self.setTradeOffFactors()
        self.__vol_d = vol(self.__domain)
    def getInverseHessianApproximationAtPoint(self, r, solve=True):
        """
        """

        # substituting cached values
        m = self.__pre_input
        grad_m = self.__pre_args

        if self._new_mu or self._update_Hessian:
            self._new_mu = False
            self._update_Hessian = False
            mu = self.__mu
            mu_c = self.__mu_c

            DIM = self.getDomain().getDim()
            numLS = self.getNumLevelSets()
            if self.__w0 is not None:
                if numLS == 1:
                    D = self.__w0 * mu
                else:
                    D = self.getPDE().getCoefficient("D")
                    D.setToZero()
                    for k in range(numLS):
                        D[k, k] = self.__w0[k] * mu[k]
                self.getPDE().setValue(D=D)

            A = self.getPDE().getCoefficient("A")
            A.setToZero()
            if self.__w1 is not None:
                if numLS == 1:
                    for i in range(DIM):
                        A[i, i] = self.__w1[i] * mu
                else:
                    for k in range(numLS):
                        for i in range(DIM):
                            A[k, i, k, i] = self.__w1[k, i] * mu[k]

            if numLS > 1:
                # this could be make faster by creating caches for grad_m_k, l2_grad_m_k  and o_kk
                for k in range(numLS):
                    grad_m_k = grad_m[k, :]
                    l2_grad_m_k = length(grad_m_k) ** 2
                    o_kk = outer(grad_m_k, grad_m_k)
                    for l in range(k):
                        grad_m_l = grad_m[l, :]
                        l2_grad_m_l = length(grad_m_l) ** 2
                        i_lk = inner(grad_m_l, grad_m_k)
                        o_lk = outer(grad_m_l, grad_m_k)
                        o_kl = outer(grad_m_k, grad_m_l)
                        o_ll = outer(grad_m_l, grad_m_l)
                        f = mu_c[l, k] * self.__wc[l, k]
                        Z = f * (2 * o_lk - o_kl - i_lk * kronecker(DIM))
                        A[l, :, l, :] += f * (l2_grad_m_k * kronecker(DIM) - o_kk)
                        A[l, :, k, :] += Z
                        A[k, :, l, :] += transpose(Z)
                        A[k, :, k, :] += f * (l2_grad_m_l * kronecker(DIM) - o_ll)
            self.getPDE().setValue(A=A)
        # self.getPDE().resetRightHandSideCoefficients()
        # self.getPDE().setValue(X=r[1])
        # print "X only: ",self.getPDE().getSolution()
        # self.getPDE().resetRightHandSideCoefficients()
        # self.getPDE().setValue(Y=r[0])
        # print "Y only: ",self.getPDE().getSolution()

        self.getPDE().resetRightHandSideCoefficients()
        self.getPDE().setValue(X=r[1], Y=r[0])
        if not solve:
            return self.getPDE()
        return self.getPDE().getSolution()
    def _getGradient(self, m, *args):
        """
        returns the gradient of the cost function at *m*.
        If the pre-computed values are not supplied `getArguments()` is called.

        :param m: current approximation of the level set function
        :type m: `Data`
        :param args: tuple of values of the parameters, pre-computed values
                     for the forward model and pre-computed values for the
                     regularization

        :rtype: `ArithmeticTuple`

        :note: returns (Y^,X) where Y^ is the gradient from regularization plus
               gradients of fwd models. X is the gradient of the regularization
               w.r.t. gradient of m.
        """
        if not self.configured:
          raise ValueError("This inversion function has not been configured yet")         
        raise RuntimeError("Call to getGradient -- temporary block to see where this is used")        
        if len(args)==0:
            args = self.getArguments(m)

        props=args[0]
        args_f=args[1]
        args_reg=args[2]

        g_J = self.regularization.getGradient(m, *args_reg)
        p_diffs=[]
        # Find the derivative for each mapping
        # If a mapping has a list of components (idx), then make a new Data object with only those
        # components, pass it to the mapping and get the derivative.
        for i in range(self.numMappings):
            mm, idx=self.mappings[i]
            if idx and self.numLevelSets > 1:
                if len(idx)>1:
                    m2=Data(0,(len(idx),),m.getFunctionSpace())
                    for k in range(len(idx)): m2[k]=m[idx[k]]
                    dpdm = mm.getDerivative(m2)
                else:
                    dpdm = mm.getDerivative(m[idx[0]])
            else:
                dpdm = mm.getDerivative(m)
            p_diffs.append(dpdm)

        Y=g_J[0] # Because g_J==(Y,X)  Y_k=dKer/dm_k
        for i in range(self.numModels):
            mu=self.mu_model[i]
            f, idx_f=self.forward_models[i]
            args=tuple( [ props[k] for k in idx_f]  + list( args_f[i] ) )
            Ys = f.getGradient(*args) # this d Jf/d props
            # in this case f depends on one parameter props only but this can
            # still depend on several level set components
            if Ys.getRank() == 0:
                # run through all level sets k prop j is depending on:
                idx_m=self.mappings[idx_f[0]][1]
                # tmp[k] = dJ_f/d_prop * d prop/d m[idx_m[k]]
                tmp=Ys * p_diffs[idx_f[0]] * mu
                if idx_m:
                    if tmp.getRank()== 0:
                        for k in range(len(idx_m)):
                            Y[idx_m[k]]+=tmp # dJ_f /d m[idx_m[k]] = tmp
                    else:
                        for k in range(len(idx_m)):
                            Y[idx_m[k]]+=tmp[k] # dJ_f /d m[idx_m[k]] = tmp[k]
                else:
                    Y+=tmp # dJ_f /d m[idx_m[k]] = tmp
            else:
                s=0
                # run through all props j forward model f is depending on:
                for j in range(len(idx_f)):
                    # run through all level sets k prop j is depending on:
                    idx_m=self.mappings[j][1]
                    if p_diffs[idx_f[j]].getRank() == 0 :
                        if idx_m: # this case is not needed (really?)
                            self.logger.error("something wrong A")
                            # tmp[k] = dJ_f/d_prop[j] * d prop[j]/d m[idx_m[k]]
                            tmp=Ys[s]*p_diffs[idx_f[j]] * mu
                            for k in range(len(idx_m)):
                                Y[idx_m[k]]+=tmp[k] # dJ_f /d m[idx_m[k]] = tmp[k]
                        else:
                            Y+=Ys[s]*p_diffs[idx_f[j]] * mu
                        s+=1
                    elif p_diffs[idx_f[j]].getRank() == 1 :
                        l=p_diffs[idx_f[j]].getShape()[0]
                        # tmp[k]=sum_j dJ_f/d_prop[j] * d prop[j]/d m[idx_m[k]]
                        tmp=inner(Ys[s:s+l], p_diffs[idx_f[j]]) * mu
                        if idx_m:
                            for k in range(len(idx_m)):
                                Y[idx_m[k]]+=tmp # dJ_f /d m[idx_m[k]] = tmp[k]
                        else:
                            Y+=tmp
                        s+=l
                    else: # rank 2 case
                        l=p_diffs[idx_f[j]].getShape()[0]
                        Yss=Ys[s:s+l]
                        if idx_m:
                            for k in range(len(idx_m)):
                                # dJ_f /d m[idx_m[k]] = tmp[k]
                                Y[idx_m[k]]+=inner(Yss, p_diffs[idx_f[j]][:,k])
                        else:
                            Y+=inner(Yss, p_diffs[idx_f[j]]) * mu
                        s+=l
        return g_J
    def _getGradient(self, m, *args):
        """
        returns the gradient of the cost function at *m*.
        If the pre-computed values are not supplied `getArguments()` is called.

        :param m: current approximation of the level set function
        :type m: `Data`
        :param args: tuple of values of the parameters, pre-computed values
                     for the forward model and pre-computed values for the
                     regularization

        :rtype: `ArithmeticTuple`

        :note: returns (Y^,X) where Y^ is the gradient from regularization plus
               gradients of fwd models. X is the gradient of the regularization
               w.r.t. gradient of m.
        """
        if len(args)==0:
            args = self.getArguments(m)

        props=args[0]
        args_f=args[1]
        args_reg=args[2]

        g_J = self.regularization.getGradient(m, *args_reg)
        p_diffs=[]
        for i in range(self.numMappings):
            mm, idx=self.mappings[i]
            if idx and self.numLevelSets > 1:
                if len(idx)>1:
                    m2=Data(0,(len(idx),),m.getFunctionSpace())
                    for k in range(len(idx)): m2[k]=m[idx[k]]
                    dpdm = mm.getDerivative(m2)
                else:
                    dpdm = mm.getDerivative(m[idx[0]])
            else:
                dpdm = mm.getDerivative(m)
            p_diffs.append(dpdm)

        Y=g_J[0] # Because g_J==(Y,X)  Y_k=dKer/dm_k
        for i in range(self.numModels):
            mu=self.mu_model[i]
            f, idx_f=self.forward_models[i]
            args=tuple( [ props[k] for k in idx_f]  + list( args_f[i] ) )
            Ys = f.getGradient(*args) # this d Jf/d props
            # in this case f depends on one parameter props only but this can
            # still depend on several level set components
            if Ys.getRank() == 0:
                # run through all level sets k prop j is depending on:
                idx_m=self.mappings[idx_f[0]][1]
                # tmp[k] = dJ_f/d_prop * d prop/d m[idx_m[k]]
                tmp=Ys * p_diffs[idx_f[0]] * mu
                if idx_m:
                    if tmp.getRank()== 0:
                        for k in range(len(idx_m)):
                            Y[idx_m[k]]+=tmp # dJ_f /d m[idx_m[k]] = tmp
                    else:
                        for k in range(len(idx_m)):
                            Y[idx_m[k]]+=tmp[k] # dJ_f /d m[idx_m[k]] = tmp[k]
                else:
                    Y+=tmp # dJ_f /d m[idx_m[k]] = tmp
            else:
                s=0
                # run through all props j forward model f is depending on:
                for j in range(len(idx_f)):
                    # run through all level sets k prop j is depending on:
                    idx_m=self.mappings[j][1]
                    if p_diffs[idx_f[j]].getRank() == 0 :
                        if idx_m: # this case is not needed (really?)
                            self.logger.error("something wrong A")
                            # tmp[k] = dJ_f/d_prop[j] * d prop[j]/d m[idx_m[k]]
                            tmp=Ys[s]*p_diffs[idx_f[j]] * mu
                            for k in range(len(idx_m)):
                                Y[idx_m[k]]+=tmp[k] # dJ_f /d m[idx_m[k]] = tmp[k]
                        else:
                            Y+=Ys[s]*p_diffs[idx_f[j]] * mu
                        s+=1
                    elif p_diffs[idx_f[j]].getRank() == 1 :
                        l=p_diffs[idx_f[j]].getShape()[0]
                        # tmp[k]=sum_j dJ_f/d_prop[j] * d prop[j]/d m[idx_m[k]]
                        tmp=inner(Ys[s:s+l], p_diffs[idx_f[j]]) * mu
                        if idx_m:
                            for k in range(len(idx_m)):
                                Y[idx_m[k]]+=tmp # dJ_f /d m[idx_m[k]] = tmp[k]
                        else:
                            Y+=tmp
                        s+=l
                    else: # rank 2 case
                        l=p_diffs[idx_f[j]].getShape()[0]
                        Yss=Ys[s:s+l]
                        if idx_m:
                            for k in range(len(idx_m)):
                                # dJ_f /d m[idx_m[k]] = tmp[k]
                                Y[idx_m[k]]+=inner(Yss, p_diffs[idx_f[j]][:,k])
                        else:
                            Y+=inner(Yss, p_diffs[idx_f[j]]) * mu
                        s+=l
        return g_J
 def calculateGradientWorker(self, **args):
    """
    vnames1 gives the names to store the first component of the gradient in
    vnames2 gives the names to store the second component of the gradient in
    """
    vnames1=args['vnames1']
    vnames2=args['vnames2']
    props=self.importValue("props")
    mods=self.importValue("fwdmodels")
    reg=self.importValue("regularization")
    mu_model=self.importValue("mu_model")
    mappings=self.importValue("mappings")
    m=self.importValue("current_point")
    
    model_args=SplitInversionCostFunction.getModelArgs(self, mods)
    
    g_J = reg.getGradientAtPoint()
    p_diffs=[]
    # Find the derivative for each mapping
    # If a mapping has a list of components (idx), then make a new Data object with only those
    # components, pass it to the mapping and get the derivative.
    for i in range(len(mappings)):
        mm, idx=mappings[i]
        if idx and numLevelSets > 1:
            if len(idx)>1:
                m2=Data(0,(len(idx),),m.getFunctionSpace())
                for k in range(len(idx)): m2[k]=m[idx[k]]
                dpdm = mm.getDerivative(m2)
            else:
                dpdm = mm.getDerivative(m[idx[0]])
        else:
            dpdm = mm.getDerivative(m)
        p_diffs.append(dpdm)
    #Since we are going to be merging Y with other worlds, we need to make sure the the regularization
    #component is only added once.  However most of the ops below are in terms of += so we need to
    #create a zero object to use as a starting point
    if self.swid==0:
       Y=g_J[0]    # Because g_J==(Y,X)  Y_k=dKer/dm_k
    else:
       Y=Data(0, g_J[0].getShape(), g_J[0].getFunctionSpace())
    for i in range(len(mods)):
        mu=mu_model[i]
        f, idx_f=mods[i]
        args=tuple( [ props[k] for k in idx_f]  + list( model_args[i] ) )
        Ys = f.getGradient(*args) # this d Jf/d props
        # in this case f depends on one parameter props only but this can
        # still depend on several level set components
        if Ys.getRank() == 0:
            # run through all level sets k prop j is depending on:
            idx_m=mappings[idx_f[0]][1]
            # tmp[k] = dJ_f/d_prop * d prop/d m[idx_m[k]]
            tmp=Ys * p_diffs[idx_f[0]] * mu
            if idx_m:
                if tmp.getRank()== 0:
                    for k in range(len(idx_m)):
                        Y[idx_m[k]]+=tmp # dJ_f /d m[idx_m[k]] = tmp
                else:
                    for k in range(len(idx_m)):
                        Y[idx_m[k]]+=tmp[k] # dJ_f /d m[idx_m[k]] = tmp[k]
            else:
                Y+=tmp # dJ_f /d m[idx_m[k]] = tmp
        else:
            s=0
            # run through all props j forward model f is depending on:
            for j in range(len(idx_f)):
                # run through all level sets k prop j is depending on:
                idx_m=mappings[j][1]
                if p_diffs[idx_f[j]].getRank() == 0 :
                    if idx_m: # this case is not needed (really?)
                        raise RuntimeError("something wrong A")
                        # tmp[k] = dJ_f/d_prop[j] * d prop[j]/d m[idx_m[k]]
                        tmp=Ys[s]*p_diffs[idx_f[j]] * mu
                        for k in range(len(idx_m)):
                            Y[idx_m[k]]+=tmp[k] # dJ_f /d m[idx_m[k]] = tmp[k]
                    else:
                        Y+=Ys[s]*p_diffs[idx_f[j]] * mu
                    s+=1
                elif p_diffs[idx_f[j]].getRank() == 1 :
                    l=p_diffs[idx_f[j]].getShape()[0]
                    # tmp[k]=sum_j dJ_f/d_prop[j] * d prop[j]/d m[idx_m[k]]
                    tmp=inner(Ys[s:s+l], p_diffs[idx_f[j]]) * mu
                    if idx_m:
                        for k in range(len(idx_m)):
                            Y[idx_m[k]]+=tmp # dJ_f /d m[idx_m[k]] = tmp[k]
                    else:
                        Y+=tmp
                    s+=l
                else: # rank 2 case
                    l=p_diffs[idx_f[j]].getShape()[0]
                    Yss=Ys[s:s+l]
                    if idx_m:
                        for k in range(len(idx_m)):
                            # dJ_f /d m[idx_m[k]] = tmp[k]
                            Y[idx_m[k]]+=inner(Yss, p_diffs[idx_f[j]][:,k])
                    else:
                        Y+=inner(Yss, p_diffs[idx_f[j]]) * mu
                    s+=l    
    if isinstance(vnames1, str):
      self.exportValue(vnames1, Y)
    else:
      for n in vnames1:
        self.exportValue(n, Y)
    if isinstance(vnames2, str):          #The second component should be strictly local 
      self.exportValue(vnames2, g_J[1])
    else:
      for n in vnames2:
        self.exportValue(n, g_J[1])