def createInv(self, verbose): self.tD = pg.trans.Trans() self.tM = pg.trans.Trans() inv = pg.Inversion(verbose=verbose, dosave=False) inv.setTransData(self.tD) inv.setTransModel(self.tM) return inv
def __init__(self, **kwargs): self.fop = None self.dataVals = None self.dataErrs = None self.tM = None self.tD = None self.inv = pg.Inversion(**kwargs)
def createInv(self, verbose): """ Create resistivity inversion instance. """ self.tD = pg.trans.TransLog() self.tM = pg.trans.TransLog() inv = pg.Inversion(verbose=verbose, dosave=False) inv.setTransData(self.tD) inv.setTransModel(self.tM) return inv
def DebyeDecomposition(fr, phi, maxfr=None, tv=None, verbose=False, zero=False, err=0.25e-3, lam=10., blocky=False): """Debye decomposition of a phase spectrum.""" if maxfr is not None: idx = (fr <= maxfr) & (phi >= 0.) phi1 = phi[idx] fr1 = fr[idx] print("using frequencies from ", N.min(fr), " to ", N.max(fr), "Hz") else: phi1 = phi fr1 = fr if tv is None: tmax = 1. / N.min(fr1) / 2. / N.pi * 4. tmin = 1. / N.max(fr1) / 2. / N.pi / 8. tvec = N.logspace(N.log10(tmin), N.log10(tmax), 30) else: tvec = tv f = DebyeModelling(fr1, tvec, zero=zero) tvec = f.t_ tm = pg.trans.TransLog() start = pg.Vector(len(tvec), 1e-4) if zero: f.region(-1).setConstraintType(0) # smoothness f.region(0).setConstraintType(1) # smoothness f.region(1).setConstraintType(0) # min length f.regionManager().setInterRegionConstraint(-1, 0, 1.) f.regionManager().setInterRegionConstraint(0, 1, 1.) f.region(-1).setTransModel(tm) f.region(0).setTransModel(tm) f.region(1).setTransModel(tm) f.region(-1).setModelControl(1000.) f.region(1).setModelControl(1000.) else: f.regionManager().setConstraintType(1) # smoothness inv = pg.Inversion(pg.asvector(phi1 * 1e-3), f, verbose) inv.setAbsoluteError(pg.Vector(len(fr1), err)) inv.setLambda(lam) inv.setModel(start) inv.setBlockyModel(blocky) if zero: inv.setReferenceModel(start) else: inv.setTransModel(tm) mvec = inv.run() resp = inv.response() return tvec, mvec, N.array(resp) * 1e3, idx
def inv2D(self, nlay, lam=100., resL=1., resU=1000., thkL=1., thkU=100., minErr=1.0): """2d LCI inversion class.""" if isinstance(nlay, int): modVec = pg.Vector(nlay * 2 - 1, 30.) cType = 0 # no reference model else: modVec = nlay cType = 10 # use this as referencemodel nlay = (len(modVec) + 1) / 2 # init forward operator self.f2d = self.FOP2d(nlay) # transformations self.transData = pg.trans.Trans() self.transThk = pg.trans.TransLogLU(thkL, thkU) self.transRes = pg.trans.TransLogLU(resL, resU) for i in range(nlay - 1): self.f2d.region(i).setTransModel(self.transThk) for i in range(nlay - 1, nlay * 2 - 1): self.f2d.region(i).setTransModel(self.transRes) # set constraints self.f2d.region(0).setConstraintType(cType) self.f2d.region(1).setConstraintType(cType) # collect data vector datvec = pg.Vector(0) for i in range(len(self.x)): datvec = pg.cat(datvec, self.datavec(i)) # collect error vector if self.ERR is None: error = 1.0 else: error = [] for i in range(len(self.x)): err = np.maximum(self.ERR[i][self.activeFreq] * 0.701, minErr) error.extend(err) # generate starting model by repetition model = pg.asvector(np.repeat(modVec, len(self.x))) INV = pg.Inversion(datvec, self.f2d, self.transData) INV.setAbsoluteError(error) INV.setLambda(lam) INV.setModel(model) INV.setReferenceModel(model) return INV
def createInv(self, nlay=3, lam=100., verbose=True, **kwargs): """Create inversion instance (and fop if necessary with nlay).""" self.fop = MRS.createFOP(nlay, self.K, self.z, self.t) self.setBoundaries() self.INV = pg.Inversion(self.data, self.fop, verbose) self.INV.setLambda(lam) self.INV.setMarquardtScheme(kwargs.pop('lambdaFactor', 0.8)) self.INV.stopAtChi1(False) # now in MarquardtScheme self.INV.setDeltaPhiAbortPercent(0.5) self.INV.setAbsoluteError(np.abs(self.error)) self.INV.setRobustData(kwargs.pop('robust', False)) return self.INV
def block1dInversion(self, nlay=2, lam=100., show=False, verbose=True, uncertainty=False): """Invert all data together by a 1D model (more general solution).""" data, error = pg.Vector(), pg.Vector() for mrs in self.mrs: data = pg.cat(data, mrs.data) error = pg.cat(error, np.real(mrs.error)) # f = JointMRSModelling(self.mrs, nlay) f = MultiFOP(self.mrs, nlay) mrsobj = self.mrs[0] for i in range(3): f.region(i).setParameters(mrsobj.startval[i], mrsobj.lowerBound[i], mrsobj.upperBound[i]) INV = pg.Inversion(data, f, verbose) INV.setLambda(lam) INV.setMarquardtScheme(0.8) # INV.stopAtChi1(False) # should be already in MarquardtScheme INV.setDeltaPhiAbortPercent(0.5) INV.setAbsoluteError(error) model = INV.run() m0 = self.mrs[0] m0.model = np.asarray(model) if uncertainty: from pygimli.utils import iterateBounds m0.modelL, m0.modelU = iterateBounds(INV, dchi2=INV.chi2() / 2, change=1.2) if show: self.show1dModel() # %% fill up 2D model (for display only) self.WMOD, self.TMOD = [], [] thk = model[0:nlay - 1] wc = model[nlay - 1:2 * nlay - 1] t2 = model[2 * nlay - 1:3 * nlay - 1] for i in range(len(self.mrs)): self.WMOD.append(np.hstack((thk, wc))) self.TMOD.append(np.hstack((thk, t2))) return model
def __init__(self, **kwargs): self.__verbose = kwargs.pop('verbose', False) self.__debug = kwargs.pop('debug', False) self.dataVals = None self.errorVals = None self.transData = pg.RTransLin() self.inv = pg.Inversion(self.__verbose, self.__debug) self.maxIter = kwargs.pop('maxIter', 20) fop = kwargs.pop('fop', None) if fop is not None: self.setForwardOperator(fop) self.inv.setDeltaPhiAbortPercent(0.5)
def blockLCInversion(self, nlay=2, startModel=None, **kwargs): """Laterally constrained (piece-wise 1D) block inversion.""" data, error, self.nData = pg.Vector(), pg.Vector(), [] for mrs in self.mrs: data = pg.cat(data, mrs.data) error = pg.cat(error, mrs.error) self.nData.append(len(mrs.data)) fop = MRSLCI(self.mrs, nlay=nlay) fop.region(0).setZWeight(kwargs.pop('zWeight', 0)) fop.region(0).setConstraintType(kwargs.pop('cType', 1)) transData, transMod = pg.trans.Trans(), pg.trans.TransLog( ) # LU(1., 500.) if startModel is None: startModel = self.block1dInversion(nlay, verbose=False) model = kwargs.pop('startvec', np.tile(startModel, len(self.mrs))) INV = pg.Inversion(data, fop, transData, transMod, True, False) INV.setModel(model) INV.setReferenceModel(model) INV.setAbsoluteError(error) INV.setLambda(kwargs.pop('lam', 100)) INV.setMaxIter(kwargs.pop('maxIter', 20)) # INV.stopAtChi1(False) INV.setLambdaFactor(0.9) INV.setDeltaPhiAbortPercent(0.1) model = INV.run() self.WMOD, self.TMOD = [], [] for par in np.reshape(model, (len(self.mrs), 3 * nlay - 1)): thk = par[0:nlay - 1] self.WMOD.append(np.hstack((thk, par[nlay - 1:2 * nlay - 1]))) self.TMOD.append(np.hstack((thk, par[2 * nlay - 1:3 * nlay - 1]))) ind = np.hstack((0, np.cumsum(self.nData))) resp = INV.response() misfit = data - resp emisfit = misfit / error misfit *= 1e9 self.totalChi2 = INV.chi2() self.totalRMS = INV.absrms() * 1e9 self.RMSvec, self.Chi2vec = [], [] for i in range(len(self.mrs)): self.RMSvec.append(np.sqrt(np.mean(misfit[ind[i]:ind[i + 1]]**2))) self.Chi2vec.append(np.mean(emisfit[ind[i]:ind[i + 1]]**2))
print(fop.jacobian().rows()) #fop = pb.DCMultiElectrodeModelling(mesh, data) fop.regionManager().region(1).setBackground(True) fop.createRefinedForwardMesh(refine=True, pRefine=False) cData = pb.getComplexData(data) mag = pg.abs(cData) phi = -pg.phase(cData) print(pg.norm(mag-data('rhoa'))) print(pg.norm(phi-data('ip')/1000)) inv = pg.Inversion(pg.cat(mag, phi), fop, verbose=True, dosave=True) dataTrans = pg.trans.TransCumulative() datRe = pg.trans.TransLog() datIm = pg.trans.Trans() dataTrans.add(datRe, data.size()) dataTrans.add(datIm, data.size()) modRe = pg.trans.TransLog() modIm = pg.trans.TransLog() modelTrans = pg.trans.TransCumulative() modelTrans.add(modRe, fop.regionManager().parameterCount()) modelTrans.add(modIm, fop.regionManager().parameterCount())
fop.regionManager().region(0).setFixValue(1e-4) # bedrock fop.regionManager().region(0).setSingle(1) # bedrock # Reflect the fix value setting here!!!! fop.createRefinedForwardMesh(refine=False, pRefine=False) # Connect all regions for i in range(2, fop.regionManager().regionCount()): for j in range(i + 1, fop.regionManager().regionCount()): fop.regionManager().setInterRegionConstraint(i, j, 1.0) startModel = pg.Vector(fop.regionManager().parameterCount(), 1e-3) fop.setStartModel(startModel) inv = pg.Inversion(rhoaR.flatten(), fop, verbose=1, dosave=0) tD = pg.trans.TransLog() tM = pg.trans.TransLogLU(1e-9, 1e-2) inv.setTransData(tD) inv.setTransModel(tM) inv.setRelativeError(err.flatten()) inv.setMaxIter(50) inv.setLineSearch(True) inv.setLambda(1000) outPath = "permModel_h-" + str(paraRefine) if not os.path.exists(outPath): os.mkdir(outPath)
def invBlock(self, xpos=0, nlay=2, noise=1.0, stmod=30., lam=100., lBound=0., uBound=0., verbose=False): """Create and return Gimli inversion instance for block inversion. Parameters ---------- xpos : array position vector nLay : int Number of layers of the model to be determined OR vector of layer numbers OR forward operator noise : float Absolute data err in percent stmod : float or pg.Vector Starting model lam : float Global regularization parameter lambda. lBound : float Lower boundary for the model uBound : float Upper boundary for the model. 0 means no upper booundary verbose : bool Be verbose """ self.transThk = pg.trans.TransLog() self.transRes = pg.trans.TransLogLU(lBound, uBound) self.transData = pg.trans.Trans() # EM forward operator if isinstance(nlay, pg.core.FDEM1dModelling): self.fop = nlay else: self.fop = self.FOP(nlay) data = self.datavec(xpos) self.fop.region(0).setTransModel(self.transThk) self.fop.region(1).setTransModel(self.transRes) if isinstance(noise, float): noiseVec = pg.Vector(len(data), noise) else: noiseVec = pg.asvector(noise) # independent EM inversion self.inv = pg.Inversion(data, self.fop, self.transData, verbose) if isinstance(stmod, float): # real model given model = pg.Vector(nlay * 2 - 1, stmod) model[0] = 2. else: if len(stmod) == nlay * 2 - 1: model = pg.asvector(stmod) else: model = pg.Vector(nlay * 2 - 1, 30.) self.inv.setAbsoluteError(noiseVec) self.inv.setLambda(lam) self.inv.setMarquardtScheme(0.8) self.inv.setDeltaPhiAbortPercent(0.5) self.inv.setModel(model) self.inv.setReferenceModel(model) return self.inv
def ReadAndRemoveEM(filename, readsecond=False, doplot=False, dellast=True, ePhi=0.5, ePerc=1., lam=2000.): """ Read res1file and remove EM effects using a double-Cole-Cole model fr,rhoa,phi,dphi = ReadAndRemoveEM(filename, readsecond/doplot bools) """ fr, rhoa, phi, drhoa, dphi = read1resfile(filename, readsecond, dellast=dellast) # forward problem mesh = pg.meshtools.createMesh1D(1, 6) # 6 independent parameters f = DoubleColeColeModelling(mesh, pg.asvector(fr), phi[2] / abs(phi[2])) f.regionManager().loadMap("region.control") model = f.createStartVector() # inversion inv = pg.Inversion(phi, f, True, False) inv.setAbsoluteError(phi * ePerc * 0.01 + ePhi / 1000.) inv.setRobustData(True) # inv.setCWeight(pg.Vector(6, 1.0)) # wozu war das denn gut? inv.setMarquardtScheme(0.8) inv.setLambda(lam) inv.setModel(model) erg = inv.run() inv.echoStatus() chi2 = inv.chi2() mod0 = pg.Vector(erg) mod0[0] = 0.0 # set IP term to zero to obtain pure EM term emphi = f.response(mod0) resid = (phi - emphi) * 1000. if doplot: s = "IP: m= " + str(rndig(erg[0])) + " t=" + str(rndig(erg[1])) + \ " c =" + str(rndig(erg[2])) s += " EM: m= " + str(rndig(erg[3])) + " t=" + str(rndig(erg[4])) + \ " c =" + str(rndig(erg[5])) fig = P.figure(1) fig.clf() ax = P.subplot(111) P.errorbar(fr, phi * 1000., yerr=dphi * 1000., fmt='x-', label='measured') ax.set_xscale('log') P.semilogx(fr, emphi * 1000., label='EM term (CC)') P.errorbar(fr, resid, yerr=dphi * 1000., label='IP term') ax.set_yscale('log') P.xlim((min(fr), max(fr))) P.ylim((0.1, max(phi) * 1000.)) P.xlabel('f in Hz') P.ylabel(r'-$\phi$ in mrad') P.grid(True) P.title(s) P.legend(loc=2) # ('measured','2-cole-cole','residual')) fig.show() return N.array(fr), N.array(rhoa), N.array( resid), N.array(phi) * 1e3, dphi, chi2, N.array(emphi) * 1e3
data = pb.DataContainerERT('gallery.data') print(data) #mesh = pg.meshtools.createParaMesh2dGrid(data.sensorPositions(), #paraDZ=0.5) mesh = pg.meshtools.createParaMesh(data.sensorPositions(), verbose=1, paraMaxCellSize=1, quality=34, smooth=[1,4], paraDepth=10, paraDX=0.5) print(mesh) #fop = pb.DCSRMultiElectrodeModelling(mesh, data) fop = pg.physics.ert.ERTModelling(mesh, data) fop.regionManager().region(1).setBackground(True) fop.createRefinedForwardMesh(refine=True, pRefine=False) inv = pg.Inversion(data("rhoa"), fop, verbose=True, dosave=True) datTrans = pg.trans.TransLog() modTrans = pg.trans.TransLog() inv.setMaxIter(10) inv.setTransData(datTrans) inv.setTransModel(modTrans) inv.setError(data('err')) inv.setModel(pg.Vector(fop.regionManager().parameterCount(), pg.math.median(data('rhoa')))) inv.setLambda(5) model = inv.run() modelMesh = fop.regionManager().paraDomain()
fig, ax = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True) kw = dict( colorBar=True, cMin=30, cMax=300, orientation='vertical', cMap='Spectral_r', logScale=True) # We want to use a homogenenous starting model vals = [30, 50, 300, 100, 200] # We assume a 5% relative accuracy of the values error = pg.Vector(len(vals), 0.05) # set up data and model transformation log-scaled tLog = pg.trans.TransLog() inv = pg.Inversion(fop=fop) inv.transData = tLog inv.transModel = tLog inv.lam = 40 inv.startModel = 30 # Initially, we use the first-order constraints (default) res = inv.run(vals, error, cType=1, lam=30) print(('Ctype=1: ' + '{:.1f} ' * 6).format(*fop(res), inv.chi2())) pg.show(mesh, res, ax=ax[0, 0], **kw) ax[0, 0].set_title("1st order") np.testing.assert_array_less(inv.chi2(), 1.2) # Next, we use the second order (curvature) constraint type res = inv.run(vals, error, cType=2, lam=25) print(('Ctype=2: ' + '{:.1f} ' * 6).format(*fop(res), inv.chi2())) pg.show(mesh, res, ax=ax[0, 1], **kw)
def harmfit(y, x=None, error=None, nc=42, resample=None, lam=0.1, window=None, verbose=False, dosave=False, lineSearch=True, robust=False, maxiter=20): """HARMFIT - GIMLi based curve-fit by harmonic functions Parameters ---------- y : 1d-array - values to be fitted x : 1d-array(len(y)) - data abscissa data. default: [0 .. len(y)) error : 1d-array(len(y)) error of y. default (absolute error = 0.01) nc : int - Number of harmonic coefficients resample : 1d-array - resample y to x using fitting coeffients window : int - just fit data inside window bounds Returns ------- response : 1d-array(len(resample) or len(x)) - smoothed values coefficients : 1d-array - fitting coefficients """ if x is None: x = np.arange(len(y)) xToFit = None yToFit = None if window is not None: idx = pg.find((x >= window[0]) & (x < window[1])) # idx = getIndex(x , lambda v: v > window[0] and v < window[1]) xToFit = x(idx) yToFit = y(idx) if error is not None: error = error(idx) else: xToFit = x yToFit = y fop = pg.core.HarmonicModelling(nc, xToFit, verbose) inv = pg.Inversion(yToFit, fop, verbose, dosave) if error is not None: inv.setAbsoluteError(error) else: inv.setAbsoluteError(0.01) inv.setMarquardtScheme(0.8) if error is not None: inv.stopAtChi1(True) inv.setLambda(lam) inv.setMaxIter(maxiter) inv.setLineSearch(lineSearch) inv.setRobustData(robust) # inv.setConstraintType(0) coeff = inv.run() if resample is not None: ret = fop.response(coeff, resample) if window is not None: # print pg.find((resample < window[0]) | (resample >= window[1])) ret.setVal( 0.0, pg.find((resample < window[0]) | (resample >= window[1]))) # idx = getIndex(resample, # lambda v: v <= window[0] or v >= window[1]) # for i in idx: ret[i] = 0.0 return ret, coeff else: return inv.response(), coeff
############################################################################### # We define an (absolute) error level and add Gaussian noise to the data. error = 0.5 data += np.random.randn(*data.shape)*error relError = error / data ############################################################################### # Next, an instance of the forward operator is created. We could use it for # calculating the synthetic data using f.response([10.5, 0.55]) or just # f([10.5, 0.55]). We create a real-valued (R) inversion passing the forward # operator, the data. A verbose boolean flag could be added to provide some # output the inversion, another one prints more and saves files for debugging. f = ExpModelling(x) inv = pg.Inversion(f) ############################################################################### # We create a real-valued logarithmic transformation and apply it to the model. # Similar could be done for the data which are by default treated linearly. # We then set the error level that is used for data weighting. It can be a # float number or a vector of data length. One can also set a relative error. # Finally, we define the inversion style as Marquardt scheme (pure local damping # with decreasing the regularization parameter subsequently) and start with a # relatively large regularization strength to avoid overshoot. # Finally run yields the coefficient vector and we plot some statistics. tLog = pg.trans.TransLog() f.modelTrans = tLog inv._inv.setMarquardtScheme() inv._inv.setLambda(100)
def createInv(self, fop, verbose=True, doSave=False): """TODO.""" inv = pg.Inversion(verbose, doSave) inv.setForwardOperator(fop) return inv