def getGrad(self, system): """returns exact gradient""" dim = self.domain.getDim() if system: g_ex = Data(0., (dim,dim), Solution(self.domain)) if dim == 2: g_ex[0,0] = 2. g_ex[0,1] = 3. g_ex[1,0] = 3. g_ex[1,1] = 2. else: g_ex[0,0] = 2. g_ex[0,1] = 3. g_ex[0,2] = 4. g_ex[1,0] = 4. g_ex[1,1] = 1. g_ex[1,2] = -2. g_ex[2,0] = 8. g_ex[2,1] = 4. g_ex[2,2] = 5. else: g_ex = Data(0., (dim,), Solution(self.domain)) if dim == 2: g_ex[0] = 2. g_ex[1] = 3. else: g_ex[0] = 2. g_ex[1] = 3. g_ex[2] = 4. return g_ex
def createLevelSetFunction(self, *props): """ returns an instance of an object used to represent a level set function initialized with zeros. Components can be overwritten by physical properties `props`. If present entries must correspond to the `mappings` arguments in the constructor. Use ``None`` for properties for which no value is given. """ m=self.regularization.getPDE().createSolution() if len(props) > 0: for i in range(self.numMappings): if props[i]: mp, idx=self.mappings[i] m2=mp.getInverse(props[i]) if idx: if len(idx) == 1: m[idx[0]]=m2 else: for k in range(idx): m[idx[k]]=m2[k] else: if isinstance(m2, Data): m=interpolate(m2, m.getFunctionSpace()) else: m=Data(m2, m.getFunctionSpace()) return m
def _getGrad(self, system): """returns exact gradient""" dim = self.domain.getDim() x = Solution(self.domain).getX() if system: g_ex = Data(0., (dim, dim), Solution(self.domain)) if dim == 2: g_ex[0, 0] = 2. + 8. * x[0] + 5. * x[1] g_ex[0, 1] = 3. + 5. * x[0] + 12. * x[1] g_ex[1, 0] = 4. + 2. * x[0] + 6. * x[1] g_ex[1, 1] = 2. + 6. * x[0] + 8. * x[1] else: g_ex[0, 0] = 2. + 6. * x[1] + 8. * x[2] + 18. * x[0] g_ex[0, 1] = 3. + 6. * x[0] + 7. * x[2] + 20. * x[1] g_ex[0, 2] = 4. + 7. * x[1] + 8. * x[0] + 22. * x[2] g_ex[1, 0] = 4. + 3. * x[1] - 8. * x[2] - 4. * x[0] g_ex[1, 1] = 1. + 3. * x[0] + 2. * x[2] + 14. * x[1] g_ex[1, 2] = -6. + 2. * x[1] - 8. * x[0] + 10. * x[2] g_ex[2, 0] = 7. - 6. * x[1] + 2. * x[2] + 4. * x[0] g_ex[2, 1] = 9. - 6. * x[0] + 8. * x[2] + 16. * x[1] g_ex[2, 2] = 2. + 8. * x[1] + 2. * x[0] + 2. * x[2] else: g_ex = Data(0., (dim, ), Solution(self.domain)) if dim == 2: g_ex[0] = 2. + 8. * x[0] + 5. * x[1] g_ex[1] = 3. + 5. * x[0] + 12. * x[1] else: g_ex[0] = 2. + 6. * x[1] + 8. * x[2] + 18. * x[0] g_ex[1] = 3. + 6. * x[0] + 7. * x[2] + 20. * x[1] g_ex[2] = 4. + 7. * x[1] + 8. * x[0] + 22. * x[2] return g_ex
def getGradient(self, m, grad_m): """ returns the gradient of the cost function J with respect to m. :note: This implementation returns Y_k=dPsi/dm_k and X_kj=dPsi/dm_kj """ mu = self.__mu mu_c = self.__mu_c DIM = self.getDomain().getDim() numLS = self.getNumLevelSets() grad_m = grad(m, Function(m.getDomain())) if self.__w0 is not None: Y = m * self.__w0 * mu else: if numLS == 1: Y = Scalar(0, grad_m.getFunctionSpace()) else: Y = Data(0, (numLS, ), grad_m.getFunctionSpace()) if self.__w1 is not None: if numLS == 1: X = grad_m * self.__w1 * mu else: X = grad_m * self.__w1 for k in range(numLS): X[k, :] *= mu[k] else: X = Data(0, grad_m.getShape(), grad_m.getFunctionSpace()) # cross gradient terms: if numLS > 1: for k in range(numLS): grad_m_k = grad_m[k, :] l2_grad_m_k = length(grad_m_k)**2 for l in range(k): grad_m_l = grad_m[l, :] l2_grad_m_l = length(grad_m_l)**2 grad_m_lk = inner(grad_m_l, grad_m_k) f = mu_c[l, k] * self.__wc[l, k] X[l, :] += f * (l2_grad_m_k * grad_m_l - grad_m_lk * grad_m_k) X[k, :] += f * (l2_grad_m_l * grad_m_k - grad_m_lk * grad_m_l) return ArithmeticTuple(Y, X)
def getProperties(self, m, return_list=False): """ returns a list of the physical properties from a given level set function *m* using the mappings of the cost function. :param m: level set function :type m: `Data` :param return_list: if ``True`` a list is returned. :type return_list: ``bool`` :rtype: ``list`` of `Data` """ if not self.configured: raise ValueError("This inversion function has not been configured yet") #Since this involves solving a PDE (and therefore a domain, it must be kept local #to each subworld raise RuntimeError("This needs to run inside the subworld --- create a function for it") props=[] for i in range(self.numMappings): mp, idx=self.mappings[i] if idx: if len(idx)==1: p=mp.getValue(m[idx[0]]) else: m2=Data(0.,(len(idx),),m.getFunctionSpace()) for k in range(len(idx)): m2[k]=m[idx[k]] p=mp.getValue(m2) else: p=mp.getValue(m) props.append(p)
def calculatePropertiesHelper(self, m, mappings): """ returns a list of the physical properties from a given level set function *m* using the mappings of the cost function. :param m: level set function :type m: `Data` :rtype: ``list`` of `Data` """ if not isinstance(self, Job): raise RuntimeError("This function is designed to be run inside a Job.") props=[] for i in range(len(mappings)): mp, idx=mappings[i] if idx: if len(idx)==1: p=mp.getValue(m[idx[0]]) else: m2=Data(0.,(len(idx),),m.getFunctionSpace()) for k in range(len(idx)): m2[k]=m[idx[k]] p=mp.getValue(m2) else: p=mp.getValue(m) props.append(p) return props
def getProperties(self, m, return_list=False): """ returns a list of the physical properties from a given level set function *m* using the mappings of the cost function. :param m: level set function :type m: `Data` :param return_list: if ``True`` a list is returned. :type return_list: ``bool`` :rtype: ``list`` of `Data` """ props=[] for i in range(self.numMappings): mp, idx=self.mappings[i] if idx: if len(idx)==1: p=mp.getValue(m[idx[0]]) else: m2=Data(0.,(len(idx),),m.getFunctionSpace()) for k in range(len(idx)): m2[k]=m[idx[k]] p=mp.getValue(m2) else: p=mp.getValue(m) props.append(p) if self.numMappings > 1 or return_list: return props else: return props[0]
def out(self): x=self.domain.getX() t=self.t if isinstance(self.expression,str): out=eval(self.expression) else: out=Data(0,(len(self.expression),),x.getFunctionSpace()) for i in range(len(self.expression)): out[i]=eval(self.expression[i]) return out
def getArguments(self, P): """ Returns precomputed values shared by `getDefect()` and `getGradient()`. :param P: pressure :type P: ``Scalar`` :return: displacement u :rtype: ``Vector`` """ DIM = self.__pde.getDim() self.__pde.setValue(y=Data(), X=P * kronecker(DIM)) u = self.__pde.getSolution() return u,
def __init__(self, **kwargs): super(TemperatureAdvection, self).__init__(**kwargs) self.declareParameter(domain=None, \ temperature=1., \ velocity=numpy.zeros([3]), density=1., \ heat_capacity=1., \ thermal_permabilty=1., \ # reference_temperature=0., \ # radiation_coefficient=0., \ thermal_source=0., \ fixed_temperature=0., location_fixed_temperature=Data(), safety_factor=0.1)
def getGradient(self, P, u): """ Returns the gradient of the defect with respect to susceptibility. :param P: pressure :type P: ``Scalar`` :param u: corresponding displacement :type u: ``Vector`` :rtype: ``Scalar`` """ d = inner(u, self.__w) - self.__d self.__pde.setValue(y=d * self.__w, X=Data()) ustar = self.__pde.getSolution() return div(ustar)
def combineData(array, shape): """ """ # array could just be a single value if not hasattr(array, '__len__') and shape == (): return array from esys.escript import Data n = numpy.array(array) # for indexing # find function space if any dom = set() fs = set() for idx in numpy.ndindex(shape): if isinstance(n[idx], Data): fs.add(n[idx].getFunctionSpace()) dom.add(n[idx].getDomain()) if len(dom) > 1: domain = dom.pop() while len(dom) > 0: if domain != dom.pop(): raise ValueError("Mixing of domains not supported") if len(fs) > 0: d = Data(0., shape, fs.pop()) # maybe interpolate instead of using first? else: d = numpy.zeros(shape) for idx in numpy.ndindex(shape): #z=numpy.zeros(shape) #z[idx]=1. #d+=n[idx]*z # much slower! if hasattr(n[idx], "ndim") and n[idx].ndim == 0: d[idx] = float(n[idx]) else: d[idx] = n[idx] return d
def __init__(self, domain, omega, w, data, F, coordinates=None, fixAtBottom=False, tol=1e-8, saveMemory=True, scaleF=True): """ initializes a new forward model with acoustic wave form inversion. :param domain: domain of the model :type domain: `Domain` :param w: weighting factors :type w: ``Scalar`` :param data: real and imaginary part of data :type data: ``Data`` of shape (2,) :param F: real and imaginary part of source given at Dirac points, on surface or at volume. :type F: ``Data`` of shape (2,) :param coordinates: defines coordinate system to be used (not supported yet) :type coordinates: `ReferenceSystem` or `SpatialCoordinateTransformation` :param tol: tolerance of underlying PDE :type tol: positive ``float`` :param saveMemory: if true stiffness matrix is deleted after solution of PDE to minimize memory requests. This will require more compute time as the matrix needs to be reallocated. :type saveMemory: ``bool`` :param scaleF: if true source F is scaled to minimize defect. :type scaleF: ``bool`` :param fixAtBottom: if true pressure is fixed to zero at the bottom of the domain :type fixAtBottom: ``bool`` """ super(AcousticWaveForm, self).__init__() self.__trafo = makeTransformation(domain, coordinates) if not self.getCoordinateTransformation().isCartesian(): raise ValueError("Non-Cartesian Coordinates are not supported yet.") if not isinstance(data, Data): raise ValueError("data must be an escript.Data object.") if not data.getFunctionSpace() == FunctionOnBoundary(domain): raise ValueError("data must be defined on boundary") if not data.getShape() == (2,): raise ValueError("data must have shape (2,) (real and imaginary part).") if w is None: w = 1. if not isinstance(w, Data): w = Data(w, FunctionOnBoundary(domain)) else: if not w.getFunctionSpace() == FunctionOnBoundary(domain): raise ValueError("Weights must be defined on boundary.") if not w.getShape() == (): raise ValueError("Weights must be scalar.") self.__domain = domain self.__omega = omega self.__weight = w self.__data = data self.scaleF = scaleF if scaleF: A = integrate(self.__weight*length(self.__data)**2) if A > 0: self.__data*=1./sqrt(A) self.__BX = boundingBox(domain) self.edge_lengths = np.asarray(boundingBoxEdgeLengths(domain)) if not isinstance(F, Data): F=interpolate(F, DiracDeltaFunctions(domain)) if not F.getShape() == (2,): raise ValueError("Source must have shape (2,) (real and imaginary part).") self.__F=Data() self.__f=Data() self.__f_dirac=Data() if F.getFunctionSpace() == DiracDeltaFunctions(domain): self.__f_dirac=F elif F.getFunctionSpace() == FunctionOnBoundary(domain): self.__f=F else: self.__F=F self.__tol=tol self.__fixAtBottom=fixAtBottom self.__pde=None if not saveMemory: self.__pde=self.setUpPDE()
def _getGradient(self, m, *args): """ returns the gradient of the cost function at *m*. If the pre-computed values are not supplied `getArguments()` is called. :param m: current approximation of the level set function :type m: `Data` :param args: tuple of values of the parameters, pre-computed values for the forward model and pre-computed values for the regularization :rtype: `ArithmeticTuple` :note: returns (Y^,X) where Y^ is the gradient from regularization plus gradients of fwd models. X is the gradient of the regularization w.r.t. gradient of m. """ if not self.configured: raise ValueError("This inversion function has not been configured yet") raise RuntimeError("Call to getGradient -- temporary block to see where this is used") if len(args)==0: args = self.getArguments(m) props=args[0] args_f=args[1] args_reg=args[2] g_J = self.regularization.getGradient(m, *args_reg) p_diffs=[] # Find the derivative for each mapping # If a mapping has a list of components (idx), then make a new Data object with only those # components, pass it to the mapping and get the derivative. for i in range(self.numMappings): mm, idx=self.mappings[i] if idx and self.numLevelSets > 1: if len(idx)>1: m2=Data(0,(len(idx),),m.getFunctionSpace()) for k in range(len(idx)): m2[k]=m[idx[k]] dpdm = mm.getDerivative(m2) else: dpdm = mm.getDerivative(m[idx[0]]) else: dpdm = mm.getDerivative(m) p_diffs.append(dpdm) Y=g_J[0] # Because g_J==(Y,X) Y_k=dKer/dm_k for i in range(self.numModels): mu=self.mu_model[i] f, idx_f=self.forward_models[i] args=tuple( [ props[k] for k in idx_f] + list( args_f[i] ) ) Ys = f.getGradient(*args) # this d Jf/d props # in this case f depends on one parameter props only but this can # still depend on several level set components if Ys.getRank() == 0: # run through all level sets k prop j is depending on: idx_m=self.mappings[idx_f[0]][1] # tmp[k] = dJ_f/d_prop * d prop/d m[idx_m[k]] tmp=Ys * p_diffs[idx_f[0]] * mu if idx_m: if tmp.getRank()== 0: for k in range(len(idx_m)): Y[idx_m[k]]+=tmp # dJ_f /d m[idx_m[k]] = tmp else: for k in range(len(idx_m)): Y[idx_m[k]]+=tmp[k] # dJ_f /d m[idx_m[k]] = tmp[k] else: Y+=tmp # dJ_f /d m[idx_m[k]] = tmp else: s=0 # run through all props j forward model f is depending on: for j in range(len(idx_f)): # run through all level sets k prop j is depending on: idx_m=self.mappings[j][1] if p_diffs[idx_f[j]].getRank() == 0 : if idx_m: # this case is not needed (really?) self.logger.error("something wrong A") # tmp[k] = dJ_f/d_prop[j] * d prop[j]/d m[idx_m[k]] tmp=Ys[s]*p_diffs[idx_f[j]] * mu for k in range(len(idx_m)): Y[idx_m[k]]+=tmp[k] # dJ_f /d m[idx_m[k]] = tmp[k] else: Y+=Ys[s]*p_diffs[idx_f[j]] * mu s+=1 elif p_diffs[idx_f[j]].getRank() == 1 : l=p_diffs[idx_f[j]].getShape()[0] # tmp[k]=sum_j dJ_f/d_prop[j] * d prop[j]/d m[idx_m[k]] tmp=inner(Ys[s:s+l], p_diffs[idx_f[j]]) * mu if idx_m: for k in range(len(idx_m)): Y[idx_m[k]]+=tmp # dJ_f /d m[idx_m[k]] = tmp[k] else: Y+=tmp s+=l else: # rank 2 case l=p_diffs[idx_f[j]].getShape()[0] Yss=Ys[s:s+l] if idx_m: for k in range(len(idx_m)): # dJ_f /d m[idx_m[k]] = tmp[k] Y[idx_m[k]]+=inner(Yss, p_diffs[idx_f[j]][:,k]) else: Y+=inner(Yss, p_diffs[idx_f[j]]) * mu s+=l return g_J
def calculateGradientWorker(self, **args): """ vnames1 gives the names to store the first component of the gradient in vnames2 gives the names to store the second component of the gradient in """ vnames1=args['vnames1'] vnames2=args['vnames2'] props=self.importValue("props") mods=self.importValue("fwdmodels") reg=self.importValue("regularization") mu_model=self.importValue("mu_model") mappings=self.importValue("mappings") m=self.importValue("current_point") model_args=SplitInversionCostFunction.getModelArgs(self, mods) g_J = reg.getGradientAtPoint() p_diffs=[] # Find the derivative for each mapping # If a mapping has a list of components (idx), then make a new Data object with only those # components, pass it to the mapping and get the derivative. for i in range(len(mappings)): mm, idx=mappings[i] if idx and numLevelSets > 1: if len(idx)>1: m2=Data(0,(len(idx),),m.getFunctionSpace()) for k in range(len(idx)): m2[k]=m[idx[k]] dpdm = mm.getDerivative(m2) else: dpdm = mm.getDerivative(m[idx[0]]) else: dpdm = mm.getDerivative(m) p_diffs.append(dpdm) #Since we are going to be merging Y with other worlds, we need to make sure the the regularization #component is only added once. However most of the ops below are in terms of += so we need to #create a zero object to use as a starting point if self.swid==0: Y=g_J[0] # Because g_J==(Y,X) Y_k=dKer/dm_k else: Y=Data(0, g_J[0].getShape(), g_J[0].getFunctionSpace()) for i in range(len(mods)): mu=mu_model[i] f, idx_f=mods[i] args=tuple( [ props[k] for k in idx_f] + list( model_args[i] ) ) Ys = f.getGradient(*args) # this d Jf/d props # in this case f depends on one parameter props only but this can # still depend on several level set components if Ys.getRank() == 0: # run through all level sets k prop j is depending on: idx_m=mappings[idx_f[0]][1] # tmp[k] = dJ_f/d_prop * d prop/d m[idx_m[k]] tmp=Ys * p_diffs[idx_f[0]] * mu if idx_m: if tmp.getRank()== 0: for k in range(len(idx_m)): Y[idx_m[k]]+=tmp # dJ_f /d m[idx_m[k]] = tmp else: for k in range(len(idx_m)): Y[idx_m[k]]+=tmp[k] # dJ_f /d m[idx_m[k]] = tmp[k] else: Y+=tmp # dJ_f /d m[idx_m[k]] = tmp else: s=0 # run through all props j forward model f is depending on: for j in range(len(idx_f)): # run through all level sets k prop j is depending on: idx_m=mappings[j][1] if p_diffs[idx_f[j]].getRank() == 0 : if idx_m: # this case is not needed (really?) raise RuntimeError("something wrong A") # tmp[k] = dJ_f/d_prop[j] * d prop[j]/d m[idx_m[k]] tmp=Ys[s]*p_diffs[idx_f[j]] * mu for k in range(len(idx_m)): Y[idx_m[k]]+=tmp[k] # dJ_f /d m[idx_m[k]] = tmp[k] else: Y+=Ys[s]*p_diffs[idx_f[j]] * mu s+=1 elif p_diffs[idx_f[j]].getRank() == 1 : l=p_diffs[idx_f[j]].getShape()[0] # tmp[k]=sum_j dJ_f/d_prop[j] * d prop[j]/d m[idx_m[k]] tmp=inner(Ys[s:s+l], p_diffs[idx_f[j]]) * mu if idx_m: for k in range(len(idx_m)): Y[idx_m[k]]+=tmp # dJ_f /d m[idx_m[k]] = tmp[k] else: Y+=tmp s+=l else: # rank 2 case l=p_diffs[idx_f[j]].getShape()[0] Yss=Ys[s:s+l] if idx_m: for k in range(len(idx_m)): # dJ_f /d m[idx_m[k]] = tmp[k] Y[idx_m[k]]+=inner(Yss, p_diffs[idx_f[j]][:,k]) else: Y+=inner(Yss, p_diffs[idx_f[j]]) * mu s+=l if isinstance(vnames1, str): self.exportValue(vnames1, Y) else: for n in vnames1: self.exportValue(n, Y) if isinstance(vnames2, str): #The second component should be strictly local self.exportValue(vnames2, g_J[1]) else: for n in vnames2: self.exportValue(n, g_J[1])
def __init__(self, domain, numLevelSets=1, w0=None, w1=None, wc=None, location_of_set_m=Data(), useDiagonalHessianApproximation=False, tol=1e-8, coordinates=None, scale=None, scale_c=None): """ initialization. :param domain: domain :type domain: `Domain` :param numLevelSets: number of level sets :type numLevelSets: ``int`` :param w0: weighting factor for the m**2 term. If not set zero is assumed. :type w0: ``Scalar`` if ``numLevelSets`` == 1 or `Data` object of shape (``numLevelSets`` ,) if ``numLevelSets`` > 1 :param w1: weighting factor for the grad(m_i) terms. If not set zero is assumed :type w1: ``Vector`` if ``numLevelSets`` == 1 or `Data` object of shape (``numLevelSets`` , DIM) if ``numLevelSets`` > 1 :param wc: weighting factor for the cross gradient terms. If not set zero is assumed. Used for the case if ``numLevelSets`` > 1 only. Only values ``wc[l,k]`` in the lower triangle (l<k) are used. :type wc: `Data` object of shape (``numLevelSets`` , ``numLevelSets``) :param location_of_set_m: marks location of zero values of the level set function ``m`` by a positive entry. :type location_of_set_m: ``Scalar`` if ``numLevelSets`` == 1 or `Data` object of shape (``numLevelSets`` ,) if ``numLevelSets`` > 1 :param useDiagonalHessianApproximation: if True cross gradient terms between level set components are ignored when calculating approximations of the inverse of the Hessian Operator. This can speed-up the calculation of the inverse but may lead to an increase of the number of iteration steps in the inversion. :type useDiagonalHessianApproximation: ``bool`` :param tol: tolerance when solving the PDE for the inverse of the Hessian Operator :type tol: positive ``float`` :param coordinates: defines coordinate system to be used :type coordinates: ReferenceSystem` or `SpatialCoordinateTransformation` :param scale: weighting factor for level set function variation terms. If not set one is used. :type scale: ``Scalar`` if ``numLevelSets`` == 1 or `Data` object of shape (``numLevelSets`` ,) if ``numLevelSets`` > 1 :param scale_c: scale for the cross gradient terms. If not set one is assumed. Used for the case if ``numLevelSets`` > 1 only. Only values ``scale_c[l,k]`` in the lower triangle (l<k) are used. :type scale_c: `Data` object of shape (``numLevelSets``,``numLevelSets``) """ if w0 is None and w1 is None: raise ValueError("Values for w0 or for w1 must be given.") if wc is None and numLevelSets > 1: raise ValueError("Values for wc must be given.") self.logger = logging.getLogger('inv.%s' % self.__class__.__name__) self.__domain = domain DIM = self.__domain.getDim() self.__numLevelSets = numLevelSets self.__trafo = makeTransformation(domain, coordinates) self.__pde = LinearPDE(self.__domain, numEquations=self.__numLevelSets, numSolutions=self.__numLevelSets) self.__pde.getSolverOptions().setTolerance(tol) self.__pde.setSymmetryOn() self.__pde.setValue( A=self.__pde.createCoefficient('A'), D=self.__pde.createCoefficient('D'), ) try: self.__pde.setValue(q=location_of_set_m) except IllegalCoefficientValue: raise ValueError( "Unable to set location of fixed level set function.") # =========== check the shape of the scales: ======================== if scale is None: if numLevelSets == 1: scale = 1. else: scale = np.ones((numLevelSets, )) else: scale = np.asarray(scale) if numLevelSets == 1: if scale.shape == (): if not scale > 0: raise ValueError("Value for scale must be positive.") else: raise ValueError("Unexpected shape %s for scale." % scale.shape) else: if scale.shape is (numLevelSets, ): if not min(scale) > 0: raise ValueError( "All values for scale must be positive.") else: raise ValueError("Unexpected shape %s for scale." % scale.shape) if scale_c is None or numLevelSets < 2: scale_c = np.ones((numLevelSets, numLevelSets)) else: scale_c = np.asarray(scale_c) if scale_c.shape == (numLevelSets, numLevelSets): if not all([[scale_c[l, k] > 0. for l in range(k)] for k in range(1, numLevelSets)]): raise ValueError( "All values in the lower triangle of scale_c must be positive." ) else: raise ValueError("Unexpected shape %s for scale." % scale_c.shape) # ===== check the shape of the weights: ============================= if w0 is not None: w0 = interpolate(w0, self.__pde.getFunctionSpaceForCoefficient('D')) s0 = w0.getShape() if numLevelSets == 1: if not s0 == (): raise ValueError("Unexpected shape %s for weight w0." % (s0, )) else: if not s0 == (numLevelSets, ): raise ValueError("Unexpected shape %s for weight w0." % (s0, )) if not self.__trafo.isCartesian(): w0 *= self.__trafo.getVolumeFactor() if not w1 is None: w1 = interpolate(w1, self.__pde.getFunctionSpaceForCoefficient('A')) s1 = w1.getShape() if numLevelSets == 1: if not s1 == (DIM, ): raise ValueError("Unexpected shape %s for weight w1." % (s1, )) else: if not s1 == (numLevelSets, DIM): raise ValueError("Unexpected shape %s for weight w1." % (s1, )) if not self.__trafo.isCartesian(): f = self.__trafo.getScalingFactors( )**2 * self.__trafo.getVolumeFactor() if numLevelSets == 1: w1 *= f else: for i in range(numLevelSets): w1[i, :] *= f if numLevelSets == 1: wc = None else: wc = interpolate(wc, self.__pde.getFunctionSpaceForCoefficient('A')) sc = wc.getShape() if not sc == (numLevelSets, numLevelSets): raise ValueError("Unexpected shape %s for weight wc." % (sc, )) if not self.__trafo.isCartesian(): raise ValueError( "Non-cartesian coordinates for cross-gradient term is not supported yet." ) # ============= now we rescale weights: ============================= L2s = np.asarray(boundingBoxEdgeLengths(domain))**2 L4 = 1 / np.sum(1 / L2s)**2 if numLevelSets == 1: A = 0 if w0 is not None: A = integrate(w0) if w1 is not None: A += integrate(inner(w1, 1 / L2s)) if A > 0: f = scale / A if w0 is not None: w0 *= f if w1 is not None: w1 *= f else: raise ValueError("Non-positive weighting factor detected.") else: # numLevelSets > 1 for k in range(numLevelSets): A = 0 if w0 is not None: A = integrate(w0[k]) if w1 is not None: A += integrate(inner(w1[k, :], 1 / L2s)) if A > 0: f = scale[k] / A if w0 is not None: w0[k] *= f if w1 is not None: w1[k, :] *= f else: raise ValueError( "Non-positive weighting factor for level set component %d detected." % k) # and now the cross-gradient: if wc is not None: for l in range(k): A = integrate(wc[l, k]) / L4 if A > 0: f = scale_c[l, k] / A wc[l, k] *= f # else: # raise ValueError("Non-positive weighting factor for cross-gradient level set components %d and %d detected."%(l,k)) self.__w0 = w0 self.__w1 = w1 self.__wc = wc self.__pde_is_set = False if self.__numLevelSets > 1: self.__useDiagonalHessianApproximation = useDiagonalHessianApproximation else: self.__useDiagonalHessianApproximation = True self._update_Hessian = True self.__num_tradeoff_factors = numLevelSets + ( (numLevelSets - 1) * numLevelSets) // 2 self.setTradeOffFactors() self.__vol_d = vol(self.__domain)