def createInv(self, nlay=3, lam=100., verbose=True, **kwargs): """Create inversion instance (and fop if necessary with nlay).""" self.fop = MRS.createFOP(nlay, self.K, self.z, self.t) self.setBoundaries() self.INV = pg.RInversion(self.data, self.fop, verbose) self.INV.setLambda(lam) self.INV.setMarquardtScheme(kwargs.pop('lambdaFactor', 0.8)) self.INV.stopAtChi1(False) # now in MarquardtScheme self.INV.setDeltaPhiAbortPercent(0.5) self.INV.setAbsoluteError(np.abs(self.error)) self.INV.setRobustData(kwargs.pop('robust', False)) return self.INV
def DebyeDecomposition(fr, phi, maxfr=None, tv=None, verbose=False, zero=False, err=0.25e-3, lam=10., blocky=False): """Debye decomposition of a phase spectrum.""" if maxfr is not None: idx = (fr <= maxfr) & (phi >= 0.) phi1 = phi[idx] fr1 = fr[idx] print("using frequencies from ", N.min(fr), " to ", N.max(fr), "Hz") else: phi1 = phi fr1 = fr if tv is None: tmax = 1. / N.min(fr1) / 2. / N.pi * 4. tmin = 1. / N.max(fr1) / 2. / N.pi / 8. tvec = N.logspace(N.log10(tmin), N.log10(tmax), 30) else: tvec = tv f = DebyeModelling(fr1, tvec, zero=zero) tvec = f.t_ tm = pg.RTransLog() start = pg.RVector(len(tvec), 1e-4) if zero: f.region(-1).setConstraintType(0) # smoothness f.region(0).setConstraintType(1) # smoothness f.region(1).setConstraintType(0) # min length f.regionManager().setInterRegionConstraint(-1, 0, 1.) f.regionManager().setInterRegionConstraint(0, 1, 1.) f.region(-1).setTransModel(tm) f.region(0).setTransModel(tm) f.region(1).setTransModel(tm) f.region(-1).setModelControl(1000.) f.region(1).setModelControl(1000.) else: f.regionManager().setConstraintType(1) # smoothness inv = pg.RInversion(pg.asvector(phi1 * 1e-3), f, verbose) inv.setAbsoluteError(pg.RVector(len(fr1), err)) inv.setLambda(lam) inv.setModel(start) inv.setBlockyModel(blocky) if zero: inv.setReferenceModel(start) else: inv.setTransModel(tm) mvec = inv.run() resp = inv.response() return tvec, mvec, N.array(resp) * 1e3, idx
def createInv(self, fop, verbose=True, doSave=False): """Create default inversion instance for Traveltime inversion. base api (typically done by run) """ self.tD = pg.RTrans() self.tM = pg.RTransLogLU() inv = pg.RInversion(verbose, doSave) inv.setTransData(self.tD) inv.setTransModel(self.tM) inv.setForwardOperator(fop) return inv
def block1dInversion(self, nlay=2, lam=100., show=False, verbose=True, uncertainty=False): """Invert all data together by a 1D model (more general solution).""" data, error = pg.RVector(), pg.RVector() for mrs in self.mrs: data = pg.cat(data, mrs.data) error = pg.cat(error, np.real(mrs.error)) # f = JointMRSModelling(self.mrs, nlay) f = MultiFOP(self.mrs, nlay) mrsobj = self.mrs[0] for i in range(3): f.region(i).setParameters(mrsobj.startval[i], mrsobj.lowerBound[i], mrsobj.upperBound[i]) INV = pg.RInversion(data, f, verbose) INV.setLambda(lam) INV.setMarquardtScheme(0.8) # INV.stopAtChi1(False) # should be already in MarquardtScheme INV.setDeltaPhiAbortPercent(0.5) INV.setAbsoluteError(error) model = INV.run() m0 = self.mrs[0] m0.model = np.asarray(model) if uncertainty: from pygimli.utils import iterateBounds m0.modelL, m0.modelU = iterateBounds(INV, dchi2=INV.chi2() / 2, change=1.2) if show: self.show1dModel() # %% fill up 2D model (for display only) self.WMOD, self.TMOD = [], [] thk = model[0:nlay - 1] wc = model[nlay - 1:2 * nlay - 1] t2 = model[2 * nlay - 1:3 * nlay - 1] for i in range(len(self.mrs)): self.WMOD.append(np.hstack((thk, wc))) self.TMOD.append(np.hstack((thk, t2))) return model
def createInv(self, nlay, lam=100., errVES=3, verbose=True): """Create Marquardt type inversion instance with data transformatio""" self.createFOP(nlay) self.tMod = pg.RTransLog() self.tMRS = pg.RTrans() self.tVES = pg.RTransLog() self.transData = pg.RTransCumulative() self.transData.push_back(self.tMRS, len(self.data)) self.transData.push_back(self.tVES, len(self.rhoa)) data = pg.cat(self.data, self.rhoa) self.INV = pg.RInversion(data, self.f, self.transData, verbose) self.INV.setLambda(lam) self.INV.setMarquardtScheme(0.8) self.INV.stopAtChi1(False) # now in MarquardtScheme self.INV.setDeltaPhiAbortPercent(0.5) # self.INV.setMaxIter(1) error = pg.cat(self.error, self.rhoa * errVES / 100.) self.INV.setAbsoluteError(error)
def blockLCInversion(self, nlay=2, startModel=None, **kwargs): """Laterally constrained (piece-wise 1D) block inversion.""" data, error, self.nData = pg.RVector(), pg.RVector(), [] for mrs in self.mrs: data = pg.cat(data, mrs.data) error = pg.cat(error, mrs.error) self.nData.append(len(mrs.data)) fop = MRSLCI(self.mrs, nlay=nlay) fop.region(0).setZWeight(kwargs.pop('zWeight', 0)) fop.region(0).setConstraintType(kwargs.pop('cType', 1)) transData, transMod = pg.RTrans(), pg.RTransLog() # LU(1., 500.) if startModel is None: startModel = self.block1dInversion(nlay, verbose=False) model = kwargs.pop('startvec', np.tile(startModel, len(self.mrs))) INV = pg.RInversion(data, fop, transData, transMod, True, False) INV.setModel(model) INV.setReferenceModel(model) INV.setAbsoluteError(error) INV.setLambda(kwargs.pop('lam', 100)) INV.setMaxIter(kwargs.pop('maxIter', 20)) # INV.stopAtChi1(False) INV.setLambdaFactor(0.9) INV.setDeltaPhiAbortPercent(0.1) model = INV.run() self.WMOD, self.TMOD = [], [] for par in np.reshape(model, (len(self.mrs), 3 * nlay - 1)): thk = par[0:nlay - 1] self.WMOD.append(np.hstack((thk, par[nlay - 1:2 * nlay - 1]))) self.TMOD.append(np.hstack((thk, par[2 * nlay - 1:3 * nlay - 1]))) ind = np.hstack((0, np.cumsum(self.nData))) resp = INV.response() misfit = data - resp emisfit = misfit / error misfit *= 1e9 self.totalChi2 = INV.chi2() self.totalRMS = INV.absrms() * 1e9 self.RMSvec, self.Chi2vec = [], [] for i in range(len(self.mrs)): self.RMSvec.append(np.sqrt(np.mean(misfit[ind[i]:ind[i + 1]]**2))) self.Chi2vec.append(np.mean(emisfit[ind[i]:ind[i + 1]]**2))
def findShapeFunction(uN, dim, nCoeff, pnts, pascale, serendipity, powCombination): fop = g.PolynomialModelling(dim, nCoeff, pnts) fop.setPascalsStyle(pascale) fop.setSerendipityStyle(serendipity) fop.setPowCombinationTmp(powCombination) fop2 = g.PolynomialModelling(2, 3, pnts) fop2.setPascalsStyle(True) xy = g.RPolynomialFunction(g.RVector(fop2.polynomialFunction().size())) xy.fill(fop2.startModel()) print("xy:", xy) start = g.RVector(27) start.setVal((xy).coeff()[0:9], 0, 9) start.setVal((xy).coeff()[0:9], 9, 18) start.setVal((xy).coeff()[0:9], 18, 27) start[18 + 2] = 0 start[18 + 4] = 0 start[18 + 6] = 0 print(start) fop.setStartModel(start) # print fop.setStartModel() tmp = g.RPolynomialFunction(g.RVector(fop.polynomialFunction().size())) tmp.fill(fop.startModel()) print("base:", tmp) inv = g.RInversion(uN, fop, False, False) inv.setRelativeError(0.0) inv.setLambda(0) inv.stopAtChi1(False) inv.setCGLSTolerance(1e-40) inv.setMaxIter(20) coeff = inv.run() N = fop.polynomialFunction() return N
def fitCCEMPhi(f, phi, ePhi=0.001, lam=1000., verbose=True, mpar=(0.2, 0, 1), taupar=(1e-2, 1e-5, 100), cpar=(0.25, 0, 1), empar=(1e-7, 1e-9, 1e-5)): """Fit a Cole-Cole term with additional EM term to phase.""" fCCEM = PeltonPhiEM(f) fCCEM.region(0).setParameters(*mpar) # m (start,lower,upper) fCCEM.region(1).setParameters(*taupar) # tau fCCEM.region(2).setParameters(*cpar) # c fCCEM.region(3).setParameters(*empar) # tau-EM ICC = pg.RInversion(phi, fCCEM, False) # set up inversion class ICC.setAbsoluteError(ePhi) # 1 mrad ICC.setLambda(lam) # start with large damping and cool later ICC.setMarquardtScheme(0.8) # lower lambda by 20%/it., no stop chi=1 model = ICC.run() # run inversion if verbose: ICC.echoStatus() return model, np.asarray(ICC.response())
def fitCCAbs(f, amp, error=0.01, lam=1000., mstart=None, taupar=(1e-2, 1e-5, 100), cpar=(0.5, 0, 1)): """Fit amplitude spectrum by Cole-Cole model.""" fCC = ColeColeAbs(f) tLog = pg.RTransLog() fCC.region(0).setStartValue(max(amp)) if mstart is None: # compute from amplitude decay mstart = 1. - min(amp) / max(amp) fCC.region(1).setParameters(mstart, 0, 1) # m (start,lower,upper) fCC.region(2).setParameters(*taupar) # tau fCC.region(3).setParameters(*cpar) # c ICC = pg.RInversion(amp, fCC, tLog, tLog, False) # set up inversion class ICC.setRelativeError(error) # perr + ePhi/data) ICC.setLambda(lam) # start with large damping and cool later ICC.setMarquardtScheme(0.8) # lower lambda by 20%/it., no stop chi=1 model = np.asarray(ICC.run()) # run inversion ICC.echoStatus() response = np.asarray(ICC.response()) return model, response
def startModel(self): """Define the starting model.""" return pg.RVector(self.nc_, 0.5) if __name__ == "__main__": nc = 1 # polynomial degree x = np.arange(10.0) y = 1.1 + 0.6 * x y += np.random.randn(len(y)) * 0.2 # two coefficients and x-vector (first data column) f = FunctionModelling(nc + 1, x) # initialize inversion with data and forward operator and set options inv = pg.RInversion(y, f, True, True) # constant absolute error of 0.01 (not necessary, only for chi^2) inv.setAbsoluteError(0.01) # the problem is well-posed and does not need regularization inv.setLambda(0) # actual inversion run yielding coefficient model coeff = inv.run() # get actual response and write to file. pg.save(inv.response(), "resnp.out") # print result to screen and save coefficient vector to file s = "y = " + str(round(coeff[0] * 1000) / 1000)
print(fop.jacobian()) print(fop.jacobian().rows()) #fop = pb.DCMultiElectrodeModelling(mesh, data) fop.regionManager().region(1).setBackground(True) fop.createRefinedForwardMesh(refine=True, pRefine=False) cData = pb.getComplexData(data) mag = pg.abs(cData) phi = -pg.phase(cData) print(pg.norm(mag - data('rhoa'))) print(pg.norm(phi - data('ip') / 1000)) inv = pg.RInversion(pg.cat(mag, phi), fop, verbose=True, dosave=True) dataTrans = pg.RTransCumulative() datRe = pg.RTransLog() datIm = pg.RTrans() dataTrans.add(datRe, data.size()) dataTrans.add(datIm, data.size()) modRe = pg.RTransLog() modIm = pg.RTransLog() modelTrans = pg.RTransCumulative() modelTrans.add(modRe, fop.regionManager().parameterCount()) modelTrans.add(modIm, fop.regionManager().parameterCount()) inv.setTransData(dataTrans) inv.setTransModel(modelTrans)
def harmfit(y, x=None, error=None, nc=42, resample=None, lam=0.1, window=None, verbose=False, dosave=False, lineSearch=True, robust=False, maxiter=20): """HARMFIT - GIMLi based curve-fit by harmonic functions Parameters ---------- y : 1d-array - values to be fitted x : 1d-array(len(y)) - data abscissa data. default: [0 .. len(y)) error : 1d-array(len(y)) error of y. default (absolute error = 0.01) nc : int - Number of harmonic coefficients resample : 1d-array - resample y to x using fitting coeffients window : int - just fit data inside window bounds Returns ------- response : 1d-array(len(resample) or len(x)) - smoothed values coefficients : 1d-array - fitting coefficients """ if x is None: x = np.arange(len(y)) # else: # if not isinstance(x, pg.RVector): # x = pg.asvector(x) # # if not isinstance(y, pg.RVector): # y = pg.asvector(y) xToFit = None yToFit = None if window is not None: idx = pg.find((x >= window[0]) & (x < window[1])) # idx = getIndex(x , lambda v: v > window[0] and v < window[1]) xToFit = x(idx) yToFit = y(idx) if error is not None: error = error(idx) else: xToFit = x yToFit = y # print xToFit # print yToFit fop = pg.HarmonicModelling(nc, xToFit, verbose) inv = pg.RInversion(yToFit, fop, verbose, dosave) if error is not None: inv.setAbsoluteError(error) else: inv.setAbsoluteError(0.01) inv.setMarquardtScheme(0.8) if error is not None: inv.stopAtChi1(True) inv.setLambda(lam) inv.setMaxIter(maxiter) inv.setLineSearch(lineSearch) inv.setRobustData(robust) # inv.setConstraintType(0) coeff = inv.run() print(inv.chi2()) if resample is not None: if not isinstance(resample, pg.RVector): resample = pg.asvector(resample) ret = fop.response(coeff, resample) if window is not None: # print pg.find((resample < window[0]) | (resample >= window[1])) ret.setVal( 0.0, pg.find((resample < window[0]) | (resample >= window[1]))) # idx = getIndex(resample, # lambda v: v <= window[0] or v >= window[1]) # for i in idx: ret[i] = 0.0 return ret, coeff else: return inv.response(), coeff
transRho = g.RTransLogLU(1., 1000.) transThk = g.RTransLog() transRhoa = g.RTransLog() f = g.DC1dModelling(nlay, ab2, mn2) f.region(0).setTransModel(transThk) f.region(1).setTransModel(transRho) paraDepth = max(ab2) / 3 f.region(0).setStartValue(max(ab2) / 3. / nlay / 2.) f.region(1).setStartValue(P.median(rhoa)) model = f.createStartVector() model[nlay] *= 1.5 inv = g.RInversion(rhoa, f, True) inv.setModel(model) inv.setTransData(transRhoa) inv.setRelativeError(errPerc / 100.0) inv.setLambda(lam) inv.setMarquardtScheme(0.9) model = inv.run() fig = P.figure(1) fig.clf() ax1 = fig.add_subplot(121) P.loglog(rhoa, ab2, 'rx-', inv.response(), ab2, 'b-') P.axis('tight') P.ylim((max(ab2), min(ab2))) P.grid(which='both') P.xlabel(r"\rho_a in \Omegam")
def createINV(self, data, relErrorP=3., startmodel=None, **kwargs): """Create inversion instance Parameters ---------- data : array_like Data array you would like to fit with this inverse modelling approach. relErrorP : float [3.] Percentage value of the relative error you assume. Default 3. means a 3 % error is assumed. Affects the chi2 criteria during the inversion process and therefore the inversion result (Inversion tries to fit the given data within the given errors). startmodel : array_like [None] Optional possibility to define the starting model for the inversion routine. The default will be the mean of the given data. **kwargs : keyword arguments ---------------------------- Keyword arguments are redirected to the block inversion instance only! lambdaFactor: float < 1.0 [0.8] Inversion in Marquardt scheme reduces the lambda from initial high values down to a certain minimum. The reduction per step is represented by this value. Default is a reduction to 80% of the previous step (By the way, the default start lambda is 1000). robust: boolean [False] Recalculation of the errors to reduce the weight of spikes in the data. Not necessary for synthetic or "good" field data. """ if self.FOP is None: self.createFOP() self.INV = pg.RInversion(data, self.FOP, False) self.applyDataTrans() if self.type == 'block': print(kwargs.pop('lambdaFactor')) self.INV.setMarquardtScheme(kwargs.pop('lambdaFactor', 0.8)) self.INV.setRobustData(kwargs.pop('robust', False)) if self.type == 'smooth': pass self.INV.setRelativeError(relErrorP / 100.0) if startmodel is not None: self.startmodel = startmodel if self.startmodel is None: self.createStartModel(data) if self.type == 'smooth': self.INV.setModel(self.startmodel) else: self.FOP.region(0).setStartValue(self.startmodel[0]) self.FOP.region(1).setStartValue(self.startmodel[1])
def main(): # read config file conf_file = "inv.conf" with open(conf_file, "r") as fd: conf = json.load(fd) # res = pb.Resistivity("input.dat") # res.invert() # np.savetxt('resistivity.vector', res.resistivity) # return # load data file data = pg.DataContainerERT("input.dat") # remove invalid data oldsize = data.size() data.removeInvalid() newsize = data.size() if newsize < oldsize: print('Removed ' + str(oldsize - newsize) + ' values.') if not data.allNonZero('rhoa'): print("No or partial rhoa values.") return # check, compute error if data.allNonZero('err'): error = data('err') else: print("estimate data error") error = conf["relativeError"] + conf["absoluteError"] / data('rhoa') # create FOP fop = pg.DCSRMultiElectrodeModelling(verbose=conf["verbose"]) fop.setThreadCount(psutil.cpu_count(logical=False)) fop.setData(data) # create Inv inv = pg.RInversion(verbose=conf["verbose"], dosave=False) # variables tD, tM are needed to prevent destruct objects tD = pg.RTransLog() tM = pg.RTransLogLU() inv.setTransData(tD) inv.setTransModel(tM) inv.setForwardOperator(fop) # mesh if conf["meshFile"] == "": depth = conf["depth"] if depth is None: depth = pg.DCParaDepth(data) poly = pg.meshtools.createParaMeshPLC( data.sensorPositions(), paraDepth=depth, paraDX=conf["paraDX"], paraMaxCellSize=conf["maxCellArea"], paraBoundary=2, boundary=2) if conf["verbose"]: print("creating mesh...") mesh = pg.meshtools.createMesh(poly, quality=conf["quality"], smooth=(1, 10)) else: mesh = pg.Mesh(pg.load(conf["meshFile"])) mesh.createNeighbourInfos() if conf["verbose"]: print(mesh) sys.stdout.flush() # flush before multithreading fop.setMesh(mesh) fop.regionManager().setConstraintType(1) if not conf["omitBackground"]: if fop.regionManager().regionCount() > 1: fop.regionManager().region(1).setBackground(True) if conf["meshFile"] == "": fop.createRefinedForwardMesh(True, False) else: fop.createRefinedForwardMesh(conf["refineMesh"], conf["refineP2"]) paraDomain = fop.regionManager().paraDomain() inv.setForwardOperator(fop) # necessary? # inversion parameters inv.setData(data('rhoa')) inv.setRelativeError(error) fop.regionManager().setZWeight(conf['zWeight']) inv.setLambda(conf['lam']) inv.setMaxIter(conf['maxIter']) inv.setRobustData(conf['robustData']) inv.setBlockyModel(conf['blockyModel']) inv.setRecalcJacobian(conf['recalcJacobian']) pc = fop.regionManager().parameterCount() startModel = pg.RVector(pc, pg.median(data('rhoa'))) inv.setModel(startModel) # Run the inversion sys.stdout.flush() # flush before multithreading model = inv.run() resistivity = model(paraDomain.cellMarkers()) np.savetxt('resistivity.vector', resistivity) print("Done.")
"""Return a meaningful starting model.""" return pg.RVector(self.nc_, 0.5) x = np.arange(0., 10., 1) # evaluate f(x) = 1.1 + 2.1 * x y = 1.1 + 2.1 * x # add some random values with standard deviation 0.1 y += np.random.randn(len(y)) * 0.1 print((x, y)) nP = 3 # two coefficients and x-vector (first data column) fop = FunctionModelling(nP, x) # initialize inversion with data and forward operator and set options inv = pg.RInversion(y, fop) # constant absolute error of 0.01 (not necessary, only for chi^2) inv.setAbsoluteError(0.01) # the problem is well-posed and does not need regularization inv.setLambda(0) # actual inversion run yielding coefficient model coeff = inv.run() plt.plot(x, y, 'rx', x, inv.response(), 'b-') plt.show()
def fitDebyeModel(self, ePhi=0.001, lam=1e3, lamFactor=0.8, mint=None, maxt=None, nt=None, new=True, showFit=False, cType=1): """Fit a (smooth) continuous Debye model (Debye decomposition). Parameters ---------- ePhi : float absolute error of phase angle lam : float regularization parameter lamFactor : float regularization factor for subsequent iterations mint/maxt : float minimum/maximum tau values to use (else automatically from f) nt : int number of tau values (default number of frequencies * 2) new : bool new implementation (experimental) showFit : bool show fit cType : int constraint type (1/2=smoothness 1st/2nd order, 0=minimum norm) """ nf = len(self.f) if mint is None: mint = .1 / max(self.f) if maxt is None: maxt = .5 / min(self.f) if nt is None: nt = nf * 2 # discretize tau, setup DD and perform DD inversion self.tau = np.logspace(log10(mint), log10(maxt), nt) phi = self.phi tLin, tLog, tM = pg.RTrans(), pg.RTransLog(), pg.RTransLog() # pg.RTransLogLU(0., 1.) if new: reNorm, imNorm = self.zNorm() fDD = DebyeComplex(self.f, self.tau) Znorm = pg.cat(reNorm, imNorm) IDD = pg.RInversion(Znorm, fDD, tLog, tM, False) IDD.setAbsoluteError(max(Znorm) * 0.003 + 0.01) else: fDD = DebyePhi(self.f, self.tau) IDD = pg.RInversion(phi, fDD, tLin, tM, True) IDD.setAbsoluteError(ePhi) # 1 mrad fDD.regionManager().setConstraintType(cType) IDD.stopAtChi1(False) startModel = pg.RVector(nt, 0.01) IDD.setModel(startModel) IDD.setLambda(lam) IDD.setLambdaFactor(lamFactor) self.mDD = IDD.run() IDD.echoStatus() if new: resp = np.array(IDD.response()) respRe = resp[:nf] respIm = resp[nf:] respC = ((1 - respRe) + respIm * 1j) * max(self.amp) self.phiDD = np.angle(respC) self.ampDD = np.abs(respC) if showFit: fig, ax = self.showData(znorm=True, nrows=3) self.fig['DebyeFit'] = fig ax[0].plot(self.f, respRe, 'r-') ax[1].plot(self.f, respIm, 'r-') ax[2].semilogx(self.tau, self.mDD, 'r-') ax[2].set_xlim(max(self.tau), min(self.tau)) ax[2].set_ylim(0., max(self.mDD)) ax[2].grid(True) ax[2].set_xlabel(r'$\tau$ (s)') ax[2].set_ylabel('$m$ (-)') else: self.phiDD = IDD.response() if showFit: fig, ax = self.showData(nrows=3) self.fig['DebyeSpectrum'] = fig ax[2].semilogx(self.tau, self.mDD, 'r-')
def main(argv): from optparse import OptionParser parser = OptionParser( "Curvefit - fits data in datafile with different curves\n usage: %prog [options] Datafile" ) parser.add_option("-v", "--verbose", dest="verbose", action="store_true", help="be verbose", default=False) parser.add_option("-n", "--np", dest="np", type="int", default=1, help="Number of polynomials") (options, args) = parser.parse_args() if len(args) == 0: parser.print_help() print("Please add a datafile.") sys.exit(2) else: datafile = args[0] xy = g.RMatrix() g.loadMatrixCol(xy, datafile) if options.verbose: print("data:", xy) # two coefficients and x-vector (first data column) f = FunctionModelling(options.np + 1, xy[0]) # initialize inversion with data and forward operator and set options inv = g.RInversion(xy[1], f, True, True) # constant absolute error of 0.01 (not necessary, only for chi^2) inv.setAbsoluteError(0.01) # the problem is well-posed and does not need regularization inv.setLambda(0) # actual inversion run yielding coefficient model coeff = inv.run() # get actual response and write to file. g.save(inv.response(), "resp.out") # print result to screen and save coefficient vector to file s = "y = " + str(round(coeff[0] * 1000) / 1000) for i in range(1, options.np + 1): s = s + " + " + str(round(coeff[i] * 1000) / 1000) + " x^" + str(i) print(s) g.save(coeff, "out.vec") P.plot(xy[0], xy[1], 'rx', xy[0], inv.response(), 'b-') P.title(s) P.xlabel("x") P.ylabel("y") P.legend(("measured", "fitted"), loc="upper left") P.show()
############################################################################### # We define an (absolute) error level and add Gaussian noise to the data. error = 0.1 data += np.random.randn(*data.shape)*error ############################################################################### # Next, an instance of the forward operator is created. We could use it for # calculating the synthetic data using f.response([10.5, 0.55]) or just # f([10.5, 0.55]). We create a real-valued (R) inversion passing the forward # operator, the data. A verbose boolean flag could be added to provide some # output the inversion, another one prints more and saves files for debugging. f = ExpModelling(x) inv = pg.RInversion(data, f) ############################################################################### # We create a real-valued logarithmid transformation and appy it to the model. # Similar could be done for the data which are by default treated linearly. # We then set the error level that is used for data weighting. It can be a # float number or a vector of data length. One can also set a relative error. # Finally, we define the inversion style as Marquard scheme (pure local damping # with decreasing the regularization parameter subsequently) and start with a # relatively large regularization strength to avoid overshoot. # Finally run yields the coefficient vector and we plot some statistics. tLog = pg.RTransLog() inv.setTransModel(tLog) inv.setAbsoluteError(error) inv.setMarquardtScheme()
fBlock = pg.DC1dModelling(len(synres), ab2, ab2 / 3) rhoa = fBlock(synthk + synres) # The data are noisified using a errPerc = 3. # relative error of 3 percent rhoa = rhoa * (pg.randn(len(rhoa)) * errPerc / 100. + 1.) ############################################################################### # The forward operator can be called by f.response(model) or simply f(model) thk = np.logspace(-0.5, 0.5, 30) f = pg.DC1dRhoModelling(thk, ab2, ab2 / 3) ############################################################################### # Create some transformations used for inversion transRho = pg.RTransLogLU(1, 1000) # lower and upper bound transRhoa = pg.RTransLog() # log transformation also for data ############################################################################### # Set up inversion inv = pg.RInversion(rhoa, f, transRhoa, transRho, False) # data vector, f, ... # The transformations can also be omitted and set individually by # inv.setTransData(transRhoa) # inv.setTransModel(transRho) inv.setRelativeError(errPerc / 100.0) ############################################################################### # Create a homogeneous starting model model = pg.RVector(len(thk) + 1, np.median(rhoa)) # uniform values inv.setModel(model) # ############################################################################### # Set pretty large regularization strength and run inversion print("inversion with lam=200") inv.setLambda(100) res100 = inv.run() # result is a pg.RVector, but compatible to numpy array print('rrms={:.2f}%, chi^2={:.3f}'.format(inv.relrms(), inv.chi2())) # Decrease the regularization (smoothness) and start (from old result)
def createInv(self, fop, verbose=True, doSave=False): """TODO.""" inv = pg.RInversion(verbose, doSave) inv.setForwardOperator(fop) return inv
def invBlock(self, xpos=0, nlay=2, noise=1.0, stmod=30., lam=100., lBound=0., uBound=0., verbose=False): """Create and return Gimli inversion instance for block inversion. Parameters ---------- xpos : array position vector nLay : int Number of layers of the model to be determined OR vector of layer numbers OR forward operator noise : float Absolute data err in percent stmod : float or pg.RVector Starting model lam : float Global regularization parameter lambda. lBound : float Lower boundary for the model uBound : float Upper boundary for the model. 0 means no upper booundary verbose : bool Be verbose """ self.transThk = pg.RTransLog() self.transRes = pg.RTransLogLU(lBound, uBound) self.transData = pg.RTrans() # EM forward operator if isinstance(nlay, pg.FDEM1dModelling): self.fop = nlay else: self.fop = self.FOP(nlay) data = self.datavec(xpos) self.fop.region(0).setTransModel(self.transThk) self.fop.region(1).setTransModel(self.transRes) if isinstance(noise, float): noiseVec = pg.RVector(len(data), noise) else: noiseVec = pg.asvector(noise) # independent EM inversion self.inv = pg.RInversion(data, self.fop, self.transData, verbose) if isinstance(stmod, float): # real model given model = pg.RVector(nlay * 2 - 1, stmod) model[0] = 2. else: if len(stmod) == nlay * 2 - 1: model = pg.asvector(stmod) else: model = pg.RVector(nlay * 2 - 1, 30.) self.inv.setAbsoluteError(noiseVec) self.inv.setLambda(lam) self.inv.setMarquardtScheme(0.8) self.inv.setDeltaPhiAbortPercent(0.5) self.inv.setModel(model) self.inv.setReferenceModel(model) return self.inv
# create some transformations used for inversion transThk = pg.RTransLog() # log-transform ensures thk>0 transRho = pg.RTransLogLU(1, 1000) # lower and upper bound transRhoa = pg.RTransLog() # log transformation for data ############################################################################### # set model transformation for thickness and resistivity f.region(0).setTransModel(transThk) # 0=thickness f.region(1).setTransModel(transRho) # 1=resistivity ############################################################################### # generate start model values from median app. resistivity & spread paraDepth = max(ab2) / 3. # rule-of-thumb for Wenner/Schlumberger f.region(0).setStartValue(paraDepth / nlay / 2) f.region(1).setStartValue(np.median(rhoa)) ############################################################################### # set up inversion inv = pg.RInversion(rhoa, f, transRhoa, True) # data vector, fop, verbose # could also be set by inv.setTransData(transRhoa) ############################################################################### # set error model, regularization strength and Marquardt scheme inv.setRelativeError(errPerc / 100.0) # alternative: setAbsoluteError in Ohmm inv.setLambda(lam) # (initial) regularization parameter inv.setMarquardtScheme(0.9) # decrease lambda by factor 0.9 model = f.createStartVector() # creates from region start value model[nlay] *= 1.5 # change default model by changing 2nd layer resistivity inv.setModel(model) # ############################################################################### # run actual inversion and extract resistivity and thickness model = inv.run() # result is a pg.RVector, but compatible to numpy array res, thk = model[nlay - 1:nlay * 2 - 1], model[0:nlay - 1] print('rrms={:.2f}%, chi^2={:.3f}'.format(inv.relrms(), inv.chi2())) ###############################################################################
fop.regionManager().region(0).setFixValue(1e-4) # bedrock fop.regionManager().region(0).setSingle(1) # bedrock # Reflect the fix value setting here!!!! fop.createRefinedForwardMesh(refine=False, pRefine=False) # Connect all regions for i in range(2, fop.regionManager().regionCount()): for j in range(i + 1, fop.regionManager().regionCount()): fop.regionManager().setInterRegionConstraint(i, j, 1.0) startModel = pg.RVector(fop.regionManager().parameterCount(), 1e-3) fop.setStartModel(startModel) inv = pg.RInversion(rhoaR.flatten(), fop, verbose=1, dosave=0) tD = pg.RTransLog() tM = pg.RTransLogLU(1e-9, 1e-2) inv.setTransData(tD) inv.setTransModel(tM) inv.setRelativeError(err.flatten()) inv.setMaxIter(50) inv.setLineSearch(True) inv.setLambda(1000) outPath = "permModel_h-" + str(paraRefine) if not os.path.exists(outPath): os.mkdir(outPath)
dataEM[i] += np.random.randn(1)[0] * noiseEM ############################################################################### # We define model transformations: logarithms and log with upper+lower bounds transRhoa = pg.RTransLog() transThk = pg.RTransLog() transRes = pg.RTransLogLU(1., 1000.) transEM = pg.RTrans() fEM.region(0).setTransModel(transThk) fEM.region(1).setTransModel(transRes) ############################################################################### # We set up the independent EM inversion and run the model. invEM = pg.RInversion(dataEM, fEM, transEM, verbose) modelEM = pg.RVector(nlay * 2 - 1, 50.) invEM.setModel(modelEM) invEM.setAbsoluteError(noiseEM) invEM.setLambda(lamEM) invEM.setMarquardtScheme(0.9) modelEM = invEM.run() respEM = invEM.response() ############################################################################### # Next we set up the DC forward operator and generate synthetic data with noise ab2 = pg.RVector(20, 3.) na = len(ab2) mn2 = pg.RVector(na, 1.0) for i in range(na - 1):
mesh = pg.meshtools.createParaMesh(data.sensorPositions(), verbose=1, paraMaxCellSize=1, quality=34, smooth=[1, 4], paraDepth=10, paraDX=0.5) print(mesh) #fop = pb.DCSRMultiElectrodeModelling(mesh, data) fop = pg.physics.ert.ERTModelling(mesh, data) fop.regionManager().region(1).setBackground(True) fop.createRefinedForwardMesh(refine=True, pRefine=False) inv = pg.RInversion(data("rhoa"), fop, verbose=True, dosave=True) datTrans = pg.RTransLog() modTrans = pg.RTransLog() inv.setMaxIter(10) inv.setTransData(datTrans) inv.setTransModel(modTrans) inv.setError(data('err')) inv.setModel( pg.RVector(fop.regionManager().parameterCount(), pg.median(data('rhoa')))) inv.setLambda(5) model = inv.run() modelMesh = fop.regionManager().paraDomain()
def ReadAndRemoveEM(filename, readsecond=False, doplot=False, dellast=True, ePhi=0.5, ePerc=1., lam=2000.): """ Read res1file and remove EM effects using a double-Cole-Cole model fr,rhoa,phi,dphi = ReadAndRemoveEM(filename, readsecond/doplot bools) """ fr, rhoa, phi, drhoa, dphi = read1resfile(filename, readsecond, dellast=dellast) # forward problem mesh = pg.createMesh1D(1, 6) # 6 independent parameters f = DoubleColeColeModelling(mesh, pg.asvector(fr), phi[2] / abs(phi[2])) f.regionManager().loadMap("region.control") model = f.createStartVector() # inversion inv = pg.RInversion(phi, f, True, False) inv.setAbsoluteError(phi * ePerc * 0.01 + ePhi / 1000.) inv.setRobustData(True) # inv.setCWeight(pg.RVector(6, 1.0)) # wozu war das denn gut? inv.setMarquardtScheme(0.8) inv.setLambda(lam) inv.setModel(model) erg = inv.run() inv.echoStatus() chi2 = inv.chi2() mod0 = pg.RVector(erg) mod0[0] = 0.0 # set IP term to zero to obtain pure EM term emphi = f.response(mod0) resid = (phi - emphi) * 1000. if doplot: s = "IP: m= " + str(rndig(erg[0])) + " t=" + str(rndig(erg[1])) + \ " c =" + str(rndig(erg[2])) s += " EM: m= " + str(rndig(erg[3])) + " t=" + str(rndig(erg[4])) + \ " c =" + str(rndig(erg[5])) fig = P.figure(1) fig.clf() ax = P.subplot(111) P.errorbar( fr, phi * 1000., yerr=dphi * 1000., fmt='x-', label='measured') ax.set_xscale('log') P.semilogx(fr, emphi * 1000., label='EM term (CC)') P.errorbar(fr, resid, yerr=dphi * 1000., label='IP term') ax.set_yscale('log') P.xlim((min(fr), max(fr))) P.ylim((0.1, max(phi) * 1000.)) P.xlabel('f in Hz') P.ylabel(r'-$\phi$ in mrad') P.grid(True) P.title(s) P.legend(loc=2) # ('measured','2-cole-cole','residual')) fig.show() return N.array(fr), N.array(rhoa), N.array(resid), N.array( phi) * 1e3, dphi, chi2, N.array(emphi) * 1e3
that is called with dosave=True. """ print('musthave save if inversion is called with dosave ') return 0 class TestModelling(pg.ModellingBase): def __init__(self, verbose): super().__init__(verbose) self._J = MyMatrix() self.setJacobian(self._J) def createJacobian(self, model): print('Create Jacobian') def response(self, par): print('Create response') return [0.0, 0.0] fop = TestModelling(verbose=True) fop.setStartModel([0, 0]) inv = pg.RInversion(verbose=True, dosave=False) #fixme!!!!!!!! .. segfault if one of the following is unset inv.setForwardOperator(fop) inv.setData([0.0, 0.0]) inv.start()