def createInv(self, verbose): """ Create resistivity inversion instance. """ self.tD = pg.RTransLog() self.tM = pg.RTransLog() inv = pg.RInversion(verbose=verbose, dosave=False) inv.setTransData(self.tD) inv.setTransModel(self.tM) return inv
def fitCCCC(f, amp, phi, error=0.01, lam=10., taupar=(1e-2, 1e-5, 100), cpar=(0.25, 0, 1), mpar=(0, 0, 1)): """Fit complex spectrum by Cole-Cole model based on sigma.""" fCC = ColeColeComplexSigma(f) tLog = pg.RTransLog() fCC.region(0).setStartValue(1. / max(amp)) if mpar[0] == 0: mpar[0] = 1. - min(amp) / max(amp) fCC.region(1).setParameters(*mpar) # m (start,lower,upper) fCC.region(2).setParameters(*taupar) # tau fCC.region(3).setParameters(*cpar) # c data = pg.cat(1. / amp * np.cos(phi), 1. / amp * np.sin(phi)) ICC = pg.RInversion(data, fCC, False) # set up inversion class ICC.setTransModel(tLog) ICC.setAbsoluteError(data * error + max(data) * 0.0001) # perr + ePhi/data) ICC.setLambda(lam) # start with large damping and cool later ICC.setMarquardtScheme(0.8) # lower lambda by 20%/it., no stop chi=1 model = np.asarray(ICC.run()) # run inversion ICC.echoStatus() response = np.asarray(ICC.response()) rRe, rIm = response[:len(f)], response[len(f):] rAmp = 1. / np.sqrt(rRe**2 + rIm**2) return model, rAmp, np.arctan(rIm / rRe)
def fitCCC(f, amp, phi, eRho=0.01, ePhi=0.001, lam=1000., mstart=None, taupar=(1e-2, 1e-5, 100), cpar=(0.5, 0, 1)): """Fit complex spectrum by Cole-Cole model.""" fCC = ColeColeComplex(f) tLog = pg.RTransLog() fCC.region(0).setStartValue(max(amp)) if mstart is None: # compute from amplitude decay mstart = 1. - min(amp) / max(amp) fCC.region(1).setParameters(mstart, 0, 1) # m (start,lower,upper) fCC.region(2).setParameters(*taupar) # tau fCC.region(3).setParameters(*cpar) # c data = pg.cat(amp, phi) ICC = pg.RInversion(data, fCC, False) # set up inversion class ICC.setTransModel(tLog) error = pg.cat(eRho * amp, pg.RVector(len(f), ePhi)) ICC.setAbsoluteError(error) # perr + ePhi/data) ICC.setLambda(lam) # start with large damping and cool later ICC.setMarquardtScheme(0.8) # lower lambda by 20%/it., no stop chi=1 model = np.asarray(ICC.run()) # run inversion ICC.echoStatus() response = np.asarray(ICC.response()) return model, response[:len(f)], response[len(f):]
def DebyeDecomposition(fr, phi, maxfr=None, tv=None, verbose=False, zero=False, err=0.25e-3, lam=10., blocky=False): """Debye decomposition of a phase spectrum.""" if maxfr is not None: idx = (fr <= maxfr) & (phi >= 0.) phi1 = phi[idx] fr1 = fr[idx] print("using frequencies from ", N.min(fr), " to ", N.max(fr), "Hz") else: phi1 = phi fr1 = fr if tv is None: tmax = 1. / N.min(fr1) / 2. / N.pi * 4. tmin = 1. / N.max(fr1) / 2. / N.pi / 8. tvec = N.logspace(N.log10(tmin), N.log10(tmax), 30) else: tvec = tv f = DebyeModelling(fr1, tvec, zero=zero) tvec = f.t_ tm = pg.RTransLog() start = pg.RVector(len(tvec), 1e-4) if zero: f.region(-1).setConstraintType(0) # smoothness f.region(0).setConstraintType(1) # smoothness f.region(1).setConstraintType(0) # min length f.regionManager().setInterRegionConstraint(-1, 0, 1.) f.regionManager().setInterRegionConstraint(0, 1, 1.) f.region(-1).setTransModel(tm) f.region(0).setTransModel(tm) f.region(1).setTransModel(tm) f.region(-1).setModelControl(1000.) f.region(1).setModelControl(1000.) else: f.regionManager().setConstraintType(1) # smoothness inv = pg.RInversion(pg.asvector(phi1 * 1e-3), f, verbose) inv.setAbsoluteError(pg.RVector(len(fr1), err)) inv.setLambda(lam) inv.setModel(start) inv.setBlockyModel(blocky) if zero: inv.setReferenceModel(start) else: inv.setTransModel(tm) mvec = inv.run() resp = inv.response() return tvec, mvec, N.array(resp) * 1e3, idx
def createInv(self, nlay, lam=100., errVES=3, verbose=True): """Create Marquardt type inversion instance with data transformatio""" self.createFOP(nlay) self.tMod = pg.RTransLog() self.tMRS = pg.RTrans() self.tVES = pg.RTransLog() self.transData = pg.RTransCumulative() self.transData.push_back(self.tMRS, len(self.data)) self.transData.push_back(self.tVES, len(self.rhoa)) data = pg.cat(self.data, self.rhoa) self.INV = pg.RInversion(data, self.f, self.transData, verbose) self.INV.setLambda(lam) self.INV.setMarquardtScheme(0.8) self.INV.stopAtChi1(False) # now in MarquardtScheme self.INV.setDeltaPhiAbortPercent(0.5) # self.INV.setMaxIter(1) error = pg.cat(self.error, self.rhoa * errVES / 100.) self.INV.setAbsoluteError(error)
def createInv(self, fop, verbose=True, dosave=False): """Create inversion instance.""" self.tD = pg.RTransLog() self.tM = pg.RTransLogLU() inv = pg.RInversion(verbose, dosave) inv.setTransData(self.tD) inv.setTransModel(self.tM) inv.setForwardOperator(fop) return inv
def __init__(self, **kwargs): fop = kwargs.pop('fop', None) super(Modelling, self).__init__(**kwargs) self.__regionProperties = {} self.__transModel = pg.RTransLog() self.fop = None if fop is not None: self.setForwardOperator(fop)
def __init__(self, fop, data, error, startmodel, lam=20, beta=10000, maxIter=50, fwmin=0, fwmax=1, fimin=0, fimax=1, famin=0, famax=1, frmin=0, frmax=1): LSQRInversion.__init__(self, data, fop, verbose=True, dosave=True) self._error = pg.RVector(error) # Set data transformations self.logtrans = pg.RTransLog() self.trans = pg.RTrans() self.dcumtrans = pg.RTransCumulative() self.dcumtrans.add(self.trans, self.forwardOperator().RST.dataContainer.size()) self.dcumtrans.add(self.logtrans, self.forwardOperator().ERT.data.size()) self.setTransData(self.dcumtrans) # Set model transformation n = self.forwardOperator().cellCount self.mcumtrans = pg.TransCumulative() self.transforms = [] phase_limits = [[fwmin, fwmax], [fimin, fimax], [famin, famax], [frmin, frmax]] for i, (lower, upper) in enumerate(phase_limits): if lower == 0: lower = 0.001 self.transforms.append(pg.RTransLogLU(lower, upper)) self.mcumtrans.add(self.transforms[i], n) self.setTransModel(self.mcumtrans) # Set error self.setRelativeError(self._error) # Set some defaults # Set maximum number of iterations (default is 20) self.setMaxIter(maxIter) # Regularization strength self.setLambda(lam) self.setDeltaPhiAbortPercent(0.25) fop = self.forwardOperator() fop.createConstraints() # Important! ones = pg.RVector(fop._I.rows(), 1.0) phiVec = pg.cat(ones, startmodel) self.setParameterConstraints(fop._G, phiVec, beta) self.setModel(startmodel)
def blockLCInversion(self, nlay=2, startModel=None, **kwargs): """Laterally constrained (piece-wise 1D) block inversion.""" data, error, self.nData = pg.RVector(), pg.RVector(), [] for mrs in self.mrs: data = pg.cat(data, mrs.data) error = pg.cat(error, mrs.error) self.nData.append(len(mrs.data)) fop = MRSLCI(self.mrs, nlay=nlay) fop.region(0).setZWeight(kwargs.pop('zWeight', 0)) fop.region(0).setConstraintType(kwargs.pop('cType', 1)) transData, transMod = pg.RTrans(), pg.RTransLog() # LU(1., 500.) if startModel is None: startModel = self.block1dInversion(nlay, verbose=False) model = kwargs.pop('startvec', np.tile(startModel, len(self.mrs))) INV = pg.RInversion(data, fop, transData, transMod, True, False) INV.setModel(model) INV.setReferenceModel(model) INV.setAbsoluteError(error) INV.setLambda(kwargs.pop('lam', 100)) INV.setMaxIter(kwargs.pop('maxIter', 20)) # INV.stopAtChi1(False) INV.setLambdaFactor(0.9) INV.setDeltaPhiAbortPercent(0.1) model = INV.run() self.WMOD, self.TMOD = [], [] for par in np.reshape(model, (len(self.mrs), 3 * nlay - 1)): thk = par[0:nlay - 1] self.WMOD.append(np.hstack((thk, par[nlay - 1:2 * nlay - 1]))) self.TMOD.append(np.hstack((thk, par[2 * nlay - 1:3 * nlay - 1]))) ind = np.hstack((0, np.cumsum(self.nData))) resp = INV.response() misfit = data - resp emisfit = misfit / error misfit *= 1e9 self.totalChi2 = INV.chi2() self.totalRMS = INV.absrms() * 1e9 self.RMSvec, self.Chi2vec = [], [] for i in range(len(self.mrs)): self.RMSvec.append(np.sqrt(np.mean(misfit[ind[i]:ind[i + 1]]**2))) self.Chi2vec.append(np.mean(emisfit[ind[i]:ind[i + 1]]**2))
def fitCCAbs(f, amp, error=0.01, lam=1000., mstart=None, taupar=(1e-2, 1e-5, 100), cpar=(0.5, 0, 1)): """Fit amplitude spectrum by Cole-Cole model.""" fCC = ColeColeAbs(f) tLog = pg.RTransLog() fCC.region(0).setStartValue(max(amp)) if mstart is None: # compute from amplitude decay mstart = 1. - min(amp) / max(amp) fCC.region(1).setParameters(mstart, 0, 1) # m (start,lower,upper) fCC.region(2).setParameters(*taupar) # tau fCC.region(3).setParameters(*cpar) # c ICC = pg.RInversion(amp, fCC, tLog, tLog, False) # set up inversion class ICC.setRelativeError(error) # perr + ePhi/data) ICC.setLambda(lam) # start with large damping and cool later ICC.setMarquardtScheme(0.8) # lower lambda by 20%/it., no stop chi=1 model = np.asarray(ICC.run()) # run inversion ICC.echoStatus() response = np.asarray(ICC.response()) return model, response
ab2 = np.logspace(-1, 2, 50) # AB/2 distance (current electrodes) mn2 = ab2 / 3. # MN/2 distance (potential electrodes) ############################################################################### # initialize the forward modelling operator f = pg.DC1dModelling(nlay, ab2, mn2) ############################################################################### # other ways are by specifying a Data Container or am/an/bm/bn distances synres = [100., 500., 20., 800.] # synthetic resistivity synthk = [0.5, 3.5, 6.] # synthetic thickness (nlay-th layer is infinite) ############################################################################### # the forward operator can be called by f.response(model) or simply f(model) rhoa = f(synthk + synres) rhoa = rhoa * (pg.randn(len(rhoa)) * errPerc / 100. + 1.) ############################################################################### # create some transformations used for inversion transThk = pg.RTransLog() # log-transform ensures thk>0 transRho = pg.RTransLogLU(1, 1000) # lower and upper bound transRhoa = pg.RTransLog() # log transformation for data ############################################################################### # set model transformation for thickness and resistivity f.region(0).setTransModel(transThk) # 0=thickness f.region(1).setTransModel(transRho) # 1=resistivity ############################################################################### # generate start model values from median app. resistivity & spread paraDepth = max(ab2) / 3. # rule-of-thumb for Wenner/Schlumberger f.region(0).setStartValue(paraDepth / nlay / 2) f.region(1).setStartValue(np.median(rhoa)) ############################################################################### # set up inversion inv = pg.RInversion(rhoa, f, transRhoa, True) # data vector, fop, verbose # could also be set by inv.setTransData(transRhoa)
def invBlock(self, xpos=0, nlay=2, noise=1.0, stmod=30., lam=100., lBound=0., uBound=0., verbose=False): """Create and return Gimli inversion instance for block inversion. Parameters ---------- xpos : array position vector nLay : int Number of layers of the model to be determined OR vector of layer numbers OR forward operator noise : float Absolute data err in percent stmod : float or pg.RVector Starting model lam : float Global regularization parameter lambda. lBound : float Lower boundary for the model uBound : float Upper boundary for the model. 0 means no upper booundary verbose : bool Be verbose """ self.transThk = pg.RTransLog() self.transRes = pg.RTransLogLU(lBound, uBound) self.transData = pg.RTrans() # EM forward operator if isinstance(nlay, pg.FDEM1dModelling): self.fop = nlay else: self.fop = self.FOP(nlay) data = self.datavec(xpos) self.fop.region(0).setTransModel(self.transThk) self.fop.region(1).setTransModel(self.transRes) if isinstance(noise, float): noiseVec = pg.RVector(len(data), noise) else: noiseVec = pg.asvector(noise) # independent EM inversion self.inv = pg.RInversion(data, self.fop, self.transData, verbose) if isinstance(stmod, float): # real model given model = pg.RVector(nlay * 2 - 1, stmod) model[0] = 2. else: if len(stmod) == nlay * 2 - 1: model = pg.asvector(stmod) else: model = pg.RVector(nlay * 2 - 1, 30.) self.inv.setAbsoluteError(noiseVec) self.inv.setLambda(lam) self.inv.setMarquardtScheme(0.8) self.inv.setDeltaPhiAbortPercent(0.5) self.inv.setModel(model) self.inv.setReferenceModel(model) return self.inv
#fop = pb.DCMultiElectrodeModelling(mesh, data) fop.regionManager().region(1).setBackground(True) fop.createRefinedForwardMesh(refine=True, pRefine=False) cData = pb.getComplexData(data) mag = pg.abs(cData) phi = -pg.phase(cData) print(pg.norm(mag - data('rhoa'))) print(pg.norm(phi - data('ip') / 1000)) inv = pg.RInversion(pg.cat(mag, phi), fop, verbose=True, dosave=True) dataTrans = pg.RTransCumulative() datRe = pg.RTransLog() datIm = pg.RTrans() dataTrans.add(datRe, data.size()) dataTrans.add(datIm, data.size()) modRe = pg.RTransLog() modIm = pg.RTransLog() modelTrans = pg.RTransCumulative() modelTrans.add(modRe, fop.regionManager().parameterCount()) modelTrans.add(modIm, fop.regionManager().parameterCount()) inv.setTransData(dataTrans) inv.setTransModel(modelTrans) inv.setAbsoluteError(pg.cat(data("err") * mag, mag * phi * 10.01)) inv.setLambda(5) inv.setMaxIter(5)
lam = 20. errPerc = 3. isBlocky = False abmnr = g.RMatrix() g.loadMatrixCol(abmnr, "sond1-100.ves") ab2 = abmnr[0] mn2 = abmnr[1] rhoa = abmnr[2] maxDep = max(ab2) / 2. print("Maximum depth estimated to ", maxDep) thk = g.RVector(nlay - 1, maxDep / (nlay - 1)) thk *= (maxDep / sum(thk) / 3) transRho = g.RTransLog() transRhoa = g.RTransLog() f = g.DC1dRhoModelling(thk, ab2, mn2) inv = g.RInversion(rhoa, f, True) model = g.RVector(nlay, P.median(rhoa)) inv.setModel(model) inv.setTransData(transRhoa) inv.setTransModel(transRho) inv.setRelativeError(errPerc / 100.0) inv.setLambda(lam) model = inv.run() model2 = g.RVector(nlay, P.median(rhoa)) inv.setModel(model2)
import pygimli as g import pylab as P from pygimli.utils.base import draw1dmodel nlay = 4 lam = 200. errPerc = 3. abmnr = g.RMatrix() g.loadMatrixCol(abmnr, "sond1-100.ves") ab2 = abmnr[0] mn2 = abmnr[1] rhoa = abmnr[2] transRho = g.RTransLogLU(1., 1000.) transThk = g.RTransLog() transRhoa = g.RTransLog() f = g.DC1dModelling(nlay, ab2, mn2) f.region(0).setTransModel(transThk) f.region(1).setTransModel(transRho) paraDepth = max(ab2) / 3 f.region(0).setStartValue(max(ab2) / 3. / nlay / 2.) f.region(1).setStartValue(P.median(rhoa)) model = f.createStartVector() model[nlay] *= 1.5 inv = g.RInversion(rhoa, f, True) inv.setModel(model)
def applyDataTrans(self): """Apply a logarithmic transformation to the data.""" self.dataTrans = pg.RTransLog() self.INV.setTransData(self.dataTrans)
def main(): # read config file conf_file = "inv.conf" with open(conf_file, "r") as fd: conf = json.load(fd) # res = pb.Resistivity("input.dat") # res.invert() # np.savetxt('resistivity.vector', res.resistivity) # return # load data file data = pg.DataContainerERT("input.dat") # remove invalid data oldsize = data.size() data.removeInvalid() newsize = data.size() if newsize < oldsize: print('Removed ' + str(oldsize - newsize) + ' values.') if not data.allNonZero('rhoa'): print("No or partial rhoa values.") return # check, compute error if data.allNonZero('err'): error = data('err') else: print("estimate data error") error = conf["relativeError"] + conf["absoluteError"] / data('rhoa') # create FOP fop = pg.DCSRMultiElectrodeModelling(verbose=conf["verbose"]) fop.setThreadCount(psutil.cpu_count(logical=False)) fop.setData(data) # create Inv inv = pg.RInversion(verbose=conf["verbose"], dosave=False) # variables tD, tM are needed to prevent destruct objects tD = pg.RTransLog() tM = pg.RTransLogLU() inv.setTransData(tD) inv.setTransModel(tM) inv.setForwardOperator(fop) # mesh if conf["meshFile"] == "": depth = conf["depth"] if depth is None: depth = pg.DCParaDepth(data) poly = pg.meshtools.createParaMeshPLC( data.sensorPositions(), paraDepth=depth, paraDX=conf["paraDX"], paraMaxCellSize=conf["maxCellArea"], paraBoundary=2, boundary=2) if conf["verbose"]: print("creating mesh...") mesh = pg.meshtools.createMesh(poly, quality=conf["quality"], smooth=(1, 10)) else: mesh = pg.Mesh(pg.load(conf["meshFile"])) mesh.createNeighbourInfos() if conf["verbose"]: print(mesh) sys.stdout.flush() # flush before multithreading fop.setMesh(mesh) fop.regionManager().setConstraintType(1) if not conf["omitBackground"]: if fop.regionManager().regionCount() > 1: fop.regionManager().region(1).setBackground(True) if conf["meshFile"] == "": fop.createRefinedForwardMesh(True, False) else: fop.createRefinedForwardMesh(conf["refineMesh"], conf["refineP2"]) paraDomain = fop.regionManager().paraDomain() inv.setForwardOperator(fop) # necessary? # inversion parameters inv.setData(data('rhoa')) inv.setRelativeError(error) fop.regionManager().setZWeight(conf['zWeight']) inv.setLambda(conf['lam']) inv.setMaxIter(conf['maxIter']) inv.setRobustData(conf['robustData']) inv.setBlockyModel(conf['blockyModel']) inv.setRecalcJacobian(conf['recalcJacobian']) pc = fop.regionManager().parameterCount() startModel = pg.RVector(pc, pg.median(data('rhoa'))) inv.setModel(startModel) # Run the inversion sys.stdout.flush() # flush before multithreading model = inv.run() resistivity = model(paraDomain.cellMarkers()) np.savetxt('resistivity.vector', resistivity) print("Done.")
# Reflect the fix value setting here!!!! fop.createRefinedForwardMesh(refine=False, pRefine=False) # Connect all regions for i in range(2, fop.regionManager().regionCount()): for j in range(i + 1, fop.regionManager().regionCount()): fop.regionManager().setInterRegionConstraint(i, j, 1.0) startModel = pg.RVector(fop.regionManager().parameterCount(), 1e-3) fop.setStartModel(startModel) inv = pg.RInversion(rhoaR.flatten(), fop, verbose=1, dosave=0) tD = pg.RTransLog() tM = pg.RTransLogLU(1e-9, 1e-2) inv.setTransData(tD) inv.setTransModel(tM) inv.setRelativeError(err.flatten()) inv.setMaxIter(50) inv.setLineSearch(True) inv.setLambda(1000) outPath = "permModel_h-" + str(paraRefine) if not os.path.exists(outPath): os.mkdir(outPath) paraMesh.save(outPath + '/paraMesh') fop.mesh().save(outPath + "/fopMesh")
def fitDebyeModel(self, ePhi=0.001, lam=1e3, lamFactor=0.8, mint=None, maxt=None, nt=None, new=True, showFit=False, cType=1): """Fit a (smooth) continuous Debye model (Debye decomposition). Parameters ---------- ePhi : float absolute error of phase angle lam : float regularization parameter lamFactor : float regularization factor for subsequent iterations mint/maxt : float minimum/maximum tau values to use (else automatically from f) nt : int number of tau values (default number of frequencies * 2) new : bool new implementation (experimental) showFit : bool show fit cType : int constraint type (1/2=smoothness 1st/2nd order, 0=minimum norm) """ nf = len(self.f) if mint is None: mint = .1 / max(self.f) if maxt is None: maxt = .5 / min(self.f) if nt is None: nt = nf * 2 # discretize tau, setup DD and perform DD inversion self.tau = np.logspace(log10(mint), log10(maxt), nt) phi = self.phi tLin, tLog, tM = pg.RTrans(), pg.RTransLog(), pg.RTransLog() # pg.RTransLogLU(0., 1.) if new: reNorm, imNorm = self.zNorm() fDD = DebyeComplex(self.f, self.tau) Znorm = pg.cat(reNorm, imNorm) IDD = pg.RInversion(Znorm, fDD, tLog, tM, False) IDD.setAbsoluteError(max(Znorm) * 0.003 + 0.01) else: fDD = DebyePhi(self.f, self.tau) IDD = pg.RInversion(phi, fDD, tLin, tM, True) IDD.setAbsoluteError(ePhi) # 1 mrad fDD.regionManager().setConstraintType(cType) IDD.stopAtChi1(False) startModel = pg.RVector(nt, 0.01) IDD.setModel(startModel) IDD.setLambda(lam) IDD.setLambdaFactor(lamFactor) self.mDD = IDD.run() IDD.echoStatus() if new: resp = np.array(IDD.response()) respRe = resp[:nf] respIm = resp[nf:] respC = ((1 - respRe) + respIm * 1j) * max(self.amp) self.phiDD = np.angle(respC) self.ampDD = np.abs(respC) if showFit: fig, ax = self.showData(znorm=True, nrows=3) self.fig['DebyeFit'] = fig ax[0].plot(self.f, respRe, 'r-') ax[1].plot(self.f, respIm, 'r-') ax[2].semilogx(self.tau, self.mDD, 'r-') ax[2].set_xlim(max(self.tau), min(self.tau)) ax[2].set_ylim(0., max(self.mDD)) ax[2].grid(True) ax[2].set_xlabel(r'$\tau$ (s)') ax[2].set_ylabel('$m$ (-)') else: self.phiDD = IDD.response() if showFit: fig, ax = self.showData(nrows=3) self.fig['DebyeSpectrum'] = fig ax[2].semilogx(self.tau, self.mDD, 'r-')
paraMaxCellSize=1, quality=34, smooth=[1, 4], paraDepth=10, paraDX=0.5) print(mesh) #fop = pb.DCSRMultiElectrodeModelling(mesh, data) fop = pg.physics.ert.ERTModelling(mesh, data) fop.regionManager().region(1).setBackground(True) fop.createRefinedForwardMesh(refine=True, pRefine=False) inv = pg.RInversion(data("rhoa"), fop, verbose=True, dosave=True) datTrans = pg.RTransLog() modTrans = pg.RTransLog() inv.setMaxIter(10) inv.setTransData(datTrans) inv.setTransModel(modTrans) inv.setError(data('err')) inv.setModel( pg.RVector(fop.regionManager().parameterCount(), pg.median(data('rhoa')))) inv.setLambda(5) model = inv.run() modelMesh = fop.regionManager().paraDomain() a, cbar = pg.show(modelMesh, model)
coilspacing = 50. nf = 10 freq = pg.RVector(nf, 110.) for i in range(nf - 1): freq[i + 1] = freq[i] * 2. fEM = pg.FDEM1dModelling(nlay, freq, coilspacing) dataEM = fEM(model) for i in range(len(dataEM)): dataEM[i] += np.random.randn(1)[0] * noiseEM ############################################################################### # We define model transformations: logarithms and log with upper+lower bounds transRhoa = pg.RTransLog() transThk = pg.RTransLog() transRes = pg.RTransLogLU(1., 1000.) transEM = pg.RTrans() fEM.region(0).setTransModel(transThk) fEM.region(1).setTransModel(transRes) ############################################################################### # We set up the independent EM inversion and run the model. invEM = pg.RInversion(dataEM, fEM, transEM, verbose) modelEM = pg.RVector(nlay * 2 - 1, 50.) invEM.setModel(modelEM) invEM.setAbsoluteError(noiseEM) invEM.setLambda(lamEM) invEM.setMarquardtScheme(0.9)
synres = [100., 500., 20., 800.] # synthetic resistivity synthk = [4, 6, 10] # synthetic thickness (lay layer is infinite) ab2 = np.logspace(-1, 2, 25) # 0.1 to 100 in 25 steps (8 points per decade) fBlock = pg.DC1dModelling(len(synres), ab2, ab2 / 3) rhoa = fBlock(synthk + synres) # The data are noisified using a errPerc = 3. # relative error of 3 percent rhoa = rhoa * (pg.randn(len(rhoa)) * errPerc / 100. + 1.) ############################################################################### # The forward operator can be called by f.response(model) or simply f(model) thk = np.logspace(-0.5, 0.5, 30) f = pg.DC1dRhoModelling(thk, ab2, ab2 / 3) ############################################################################### # Create some transformations used for inversion transRho = pg.RTransLogLU(1, 1000) # lower and upper bound transRhoa = pg.RTransLog() # log transformation also for data ############################################################################### # Set up inversion inv = pg.RInversion(rhoa, f, transRhoa, transRho, False) # data vector, f, ... # The transformations can also be omitted and set individually by # inv.setTransData(transRhoa) # inv.setTransModel(transRho) inv.setRelativeError(errPerc / 100.0) ############################################################################### # Create a homogeneous starting model model = pg.RVector(len(thk) + 1, np.median(rhoa)) # uniform values inv.setModel(model) # ############################################################################### # Set pretty large regularization strength and run inversion print("inversion with lam=200") inv.setLambda(100)
"""Optimize line search by fitting parabola by Phi(tau) curve.""" return 0.1 if __name__ == '__main__': nlay = 4 # number of layers lam = 200. # (initial) regularization parameter errPerc = 3. # relative error of 3 percent ab2 = np.logspace(-1, 2, 50) # AB/2 distance (current electrodes) mn2 = ab2 / 3. # MN/2 distance (potential electrodes) f = pg.DC1dModelling(nlay, ab2, mn2) synres = [100., 500., 20., 800.] # synthetic resistivity synthk = [0.5, 3.5, 6.] # synthetic thickness (nlay-th layer is infinite) rhoa = f(synthk + synres) rhoa = rhoa * (pg.randn(len(rhoa)) * errPerc / 100. + 1.) transLog = pg.RTransLog() inv = LSQRInversion(rhoa, f, transLog, transLog, True) inv.setRelativeError(errPerc / 100) startModel = pg.cat(pg.Vector(nlay - 1, 5), pg.Vector(nlay, pg.median(rhoa))) print(inv.response()) inv.setModel(startModel) inv.setMarquardtScheme() inv.setLambda(1000) G = pg.RMatrix(rows=1, cols=len(startModel)) for i in range(3): G[0][i] = 1 c = pg.Vector(1, pg.sum(synthk)) inv.setParameterConstraints(G, c, 100) # print("Start", inv.chi2(), inv.relrms(), pg.sum(inv.model()(0, nlay-1))) if 0: