def createStartModel(self, rhoa, nLayer): r"""Create suitable starting model. Create suitable starting model based on median apparent resistivity values and skin depth approximation. """ res = np.ones(nLayer) * pg.median(rhoa) skinDepth = np.sqrt(max(self.t) * pg.median(rhoa)) * 500 thk = np.arange(nLayer) / sum(np.arange(nLayer)) * skinDepth / 2. thk = thk[1:] self.setStartModel(pg.cat(thk, res)) return self.startModel()
def createStartModel(self, rhoa, nLayer, thickness=None): r"""Create suitable starting model. Create suitable starting model based on median apparent resistivity values and skin depth approximation. """ res = np.ones(nLayer) * pg.median(rhoa) if thickness is None: skinDepth = np.sqrt(max(self.t) * pg.median(rhoa)) * 500 thk = np.arange(nLayer) / sum(np.arange(nLayer)) * skinDepth / 2. thk = thk[1:] else: thk = np.ones(nLayer-1) * thickness self.setStartModel(pg.cat(thk, res)) return self.startModel()
def invert(self, data, mesh, lam=20, limits=None): self.setData(data) self.setMesh(mesh) if limits is not None: if hasattr(self.tM, 'setLowerBound'): if self.verbose: print('Lower limit set to', limits[0]) self.tM.setLowerBound(limits[0]) if hasattr(self.tM, 'setUpperBound'): if self.verbose: print('Upper limit set to', limits[1]) self.tM.setUpperBound(limits[1]) nModel = self.fop.regionManager().parameterCount() startModel = pg.RVector(nModel, 0.0) for i in range(len(self.managers)): startModel += pg.RVector(nModel, pg.median(self.trans[i].inv(self.managers[i].createApparentData(self.data[i])))) startModel /= len(self.managers) self.fop.setStartModel(startModel) self.inv.setData(self.dataVals) self.inv.setRelativeError(self.dataErrs) self.inv.setLambda(lam) self.model = self.inv.start() self.model = self.model(self.fop.regionManager().paraDomain().cellMarkers()) return self.model
def invert(self, data=None, mesh=None, lam=20, limits=None, **kwargs): """TODO.""" if 'verbose' in kwargs: self.setVerbose(kwargs.pop('verbose')) self.setData(data) self.setMesh(mesh) nModel = self.fop.regionManager().parameterCount() startModel = None if limits is not None: if hasattr(self.tM, 'setLowerBound'): if self.verbose: print('Lower limit set to', limits[0]) self.tM.setLowerBound(limits[0]) if hasattr(self.tM, 'setUpperBound'): if self.verbose: print('Upper limit set to', limits[1]) self.tM.setUpperBound(limits[1]) startModel = pg.RVector(nModel, (limits[1] - limits[0]) / 2.0) else: for i in range(len(self.managers)): startModel += pg.RVector( nModel, pg.median(self.trans[i].inv( self.managers[i].createApparentData(self.data[i])))) startModel /= len(self.managers) self.inv.setModel(startModel) self.fop.regionManager().setZWeight(1.0) self.inv.setData(self.dataVals) self.inv.setRelativeError(self.dataErrs) self.inv.setLambda(lam) self.mod = self.inv.run() self.mod = self.mod( self.fop.regionManager().paraDomain().cellMarkers()) return self.mod
def createJacobian(self, model): print('=' * 100) if self.complex(): modelRe = model[0:int(len(model)/2)] modelIm = model[int(len(model)/2):len(model)] modelC = pg.toComplex(modelRe, modelIm) print("Real", min(modelRe), max(modelRe)) print("Imag", min(modelIm), max(modelIm)) u = self.prepareJacobian_(modelC) if self._J.rows() == 0: #re(data)/re(mod) = im(data)/im(mod) # we need a local copy until we have a gimli internal reference counter FIXTHIS M1 = pg.RMatrix() M2 = pg.RMatrix() self.matrixHeap.append(M1) self.matrixHeap.append(M2) JRe = self._J.addMatrix(M1) JIm = self._J.addMatrix(M2) self._J.addMatrixEntry(JRe, 0, 0) self._J.addMatrixEntry(JIm, 0, len(modelRe), -1.0) self._J.addMatrixEntry(JIm, self.data().size(), 0, 1.0) self._J.addMatrixEntry(JRe, self.data().size(), len(modelRe)) else: self._J.clean() k = pg.RVector(self.data()('k')) self.data().set('k', k*0.0 + 1.0) dMapResponse = pb.DataMap() dMapResponse.collect(self.electrodes(), self.solution()) respRe = dMapResponse.data(self.data(), False, False) respIm = dMapResponse.data(self.data(), False, True) #CVector resp(toComplex(respRe, respIm)); #RVector am(abs(resp) * dataContainer_->get("k")); #RVector ph(-phase(resp)); print("respRe", pg.median(respRe), min(respRe), max(respRe)) print("respIm", pg.median(respIm), min(respIm), max(respIm)) JC = pg.CMatrix() self.createJacobian_(modelC, u, JC) for i in range(JC.rows()): #JC[i] *= 1.0/(modelC*modelC) * k[i] JC[i] /= (modelC * modelC) / k[i] self._J.mat(0).copy(pg.real(JC)) self._J.mat(1).copy(pg.imag(JC)) #self.createJacobian_(modelRe*0.0+1.0, pg.real(u), self._J.mat(1)) #self.createJacobian_(modelRe*0.0+1.0, pg.imag(u), self._J.mat(2)) #self.createJacobian_(modelRe*0.0+1.0, pg.imag(u), self._J.mat(3)) sumsens0 = pg.RVector(self._J.mat(0).rows()) sumsens1 = pg.RVector(self._J.mat(0).rows()) sumsens2 = pg.RVector(self._J.mat(0).rows()) for i in range(self._J.mat(0).rows()): #self._J.mat(0)[i] *= 1./modelRe / respRe[i] #self._J.mat(1)[i] *= 1./modelIm / respRe[i] #self._J.mat(2)[i] *= 1./modelRe / respIm[i] #self._J.mat(3)[i] *= 1./modelIm / respIm[i] #self._J.mat(0)[i] *= 1./(modelRe * modelRe) * k[i] #self._J.mat(1)[i] *= 1./(modelRe * modelIm) * k[i] #self._J.mat(2)[i] *= 1./(modelIm * modelRe) * k[i] #self._J.mat(3)[i] *= 1./(modelIm * modelIm) * k[i] sumsens0[i] = sum(self._J.mat(0)[i]) sumsens1[i] = sum(self._J.mat(1)[i]) sumsens2[i] = abs(sum(JC[i])) print(pg.median(sumsens0), min(sumsens0), max(sumsens0)) print(pg.median(sumsens1), min(sumsens1), max(sumsens1)) print(pg.median(sumsens2), min(sumsens2), max(sumsens2)) self.data().set('k', k) self._J.recalcMatrixSize() else: # self.setVerbose(True) u = self.prepareJacobian_(model) #J = pg.RMatrix() if self._J.rows() == 0: print('#' * 100) M1 = pg.RMatrix() Jid = self._J.addMatrix(M1) self._J.addMatrixEntry(Jid, 0, 0) else: self._J.clean() self.createJacobian_(model, u, self._J.mat(0)) self._J.recalcMatrixSize()
def invert(self, data=None, vals=None, err=None, mesh=None, **kwargs): """Run the full inversion. The data and error needed to be set before. The meshes will be created if necessary. DOCUMENTME!!! Parameters ---------- lam : float [20] regularization parameter zWeight : float [0.7] relative vertical weight maxIter : int [20] maximum iteration number robustdata : bool [False] robust data reweighting using an L1 scheme (IRLS reweighting) blockymodel : bool [False] blocky model constraint using L1 reweighting roughness vector startModelIsReference : bool [False] startmodel is the reference model for the inversion forwarded to createMesh depth quality paraDX maxCellArea """ if 'verbose' in kwargs: self.setVerbose(kwargs.pop('verbose')) if data is not None: # setDataContainer would be better self.setData(data) if vals is not None: self.dataContainer.set(self.dataToken(), vals) if err is not None: self.dataContainer.set('err', vals) # check for data container here dataVals = self.dataContainer(self.dataToken()) errVals = self.dataContainer('err') if mesh is not None: self.setMesh(mesh) if self.mesh is None: self.createMesh(depth=kwargs.pop('depth', None), quality=kwargs.pop('quality', 34.0), maxCellArea=kwargs.pop('maxCellArea', 0.0), paraDX=kwargs.pop('paraDX', 0.3)) self.inv.setData(dataVals) self.inv.setError(errVals, not self.errIsAbsolute) zWeight = kwargs.pop('zWeight', 0.7) if 'zweight' in kwargs: zWeight = kwargs.pop('zweight', 0.7) print("zweight option will be removed soon. Please use zWeight.") self.fop.regionManager().setZWeight(zWeight) self.inv.setLambda(kwargs.pop('lam', 20)) self.inv.setMaxIter(kwargs.pop('maxIter', 20)) self.inv.setRobustData(kwargs.pop('robustData', False)) self.inv.setBlockyModel(kwargs.pop('blockyModel', False)) self.inv.setRecalcJacobian(kwargs.pop('recalcJacobian', True)) # TODO: ADD MORE KWARGS pc = self.fop.regionManager().parameterCount() startModel = kwargs.pop('startModel', pg.RVector(pc, pg.median(dataVals))) self.inv.setModel(startModel) # self.fop.setStartModel(startModel) if kwargs.pop('startModelIsReference', False): self.inv.setReferenceModel(startModel) # Run the inversion if len(kwargs) > 0: print("Keyword arguments unknown:") print(kwargs) print("Warning! There are unknown kwargs arguments.") model = self.inv.run() self.model = model(self.paraDomain.cellMarkers()) return self.model
def invert(self, data, values=None, verbose=0, **kwargs): """ Invert the given data. A parametric mesh for the inversion will be created if non is given before. Parameters ---------- """ self.fop.setVerbose(verbose) self.inv.setVerbose(verbose) self.inv.setMaxIter(kwargs.pop('maxiter', 10)) self.inv.setLambda(kwargs.pop('lambd', 10)) if self.paraMesh is None: self.paraMesh = createParaMesh2dGrid(data.sensorPositions(), **kwargs) self.setParaMesh(self.paraMesh) if verbose: print(self.paraMesh) # pg.show(self.paraMesh) err = data('err') rhoa = data('rhoa') startModel = pg.RVector(self.fop.regionManager().parameterCount(), pg.median(rhoa)) self.fop.setData(data) self.inv.setForwardOperator(self.fop) # check err here self.inv.setData(rhoa) self.inv.setError(err) self.inv.setModel(startModel) model = self.inv.run() if values is not None: if isinstance(values, pg.RVector): values = [values] elif isinstance(values, np.ndarray): if values.ndim == 1: values = [values] allModel = pg.RMatrix(len(values), len(model)) self.inv.setVerbose(False) for i in range(len(values)): print(i) tic = time.time() self.inv.setModel(model) self.inv.setReferenceModel(model) dData = pg.abs(values[i] / rhoa) relModel = self.inv.invSubStep(pg.log(dData)) allModel[i] = model * pg.exp(relModel) print(i, "/", len(values), " : ", time.time()-tic, "s min/max: ", min(allModel[i]), max(allModel[i])) return allModel return model
def calcSeismics(meshIn, vP): """Do seismic computations.""" meshSeis = meshIn.createH2() meshSeis = mt.appendTriangleBoundary( meshSeis, xbound=25, ybound=22.0, marker=1, quality=32.0, area=0.3, smooth=True, markerBoundary=1, isSubSurface=False, verbose=False) print(meshSeis) meshSeis = meshSeis.createH2() meshSeis = meshSeis.createH2() # meshSeis = meshSeis.createP2() meshSeis.smooth(1, 1, 1, 4) vP = pg.interpolate(meshIn, vP, meshSeis.cellCenters()) mesh = meshSeis vP = pg.solver.fillEmptyToCellArray(mesh, vP) print(mesh) # ax, cbar = pg.show(mesh, data=vP) # pg.show(mesh, axes=ax) geophPointsX = np.arange(-19, 19.1, 1) geophPoints = np.vstack((geophPointsX, np.zeros(len(geophPointsX)))).T sourcePos = geophPoints[4] c = mesh.findCell(sourcePos) h1 = pg.findBoundary(c.boundaryNodes(0)).size() h2 = pg.findBoundary(c.boundaryNodes(1)).size() h3 = pg.findBoundary(c.boundaryNodes(2)).size() print([h1, h2, h3]) h = pg.median([h1, h2, h3]) # h = pg.median(mesh.boundarySizes()) f0scale = 0.25 cfl = 0.5 dt = cfl * h / max(vP) print("Courant-Friedrich-Lewy number:", cfl) tmax = 40./min(vP) times = np.arange(0.0, tmax, dt) solutionName = createCacheName('seis', mesh, times) + "cfl-" + str(cfl) try: # u = pg.load(solutionName + '.bmat') uI = pg.load(solutionName + 'I.bmat') except Exception as e: print(e) f0 = f0scale * 1./dt print("h:", round(h, 2), "dt:", round(dt, 5), "1/dt:", round(1/dt, 1), "f0", round(f0, 2), "Wavelength: ", round(max(vP)/f0, 2), " m") uSource = ricker(times, f0, t0=1./f0) plt.figure() plt.plot(times, uSource, '-*') plt.show(block=0) plt.pause(0.01) u = solvePressureWave(mesh, vP, times, sourcePos=sourcePos, uSource=uSource, verbose=10) u.save(solutionName) uI = pg.RMatrix() print("interpolate node to cell data ... ") pg.interpolate(mesh, u, mesh.cellCenters(), uI) print("... done") uI.save(solutionName+'I') # nodes = [mesh.findNearestNode(p) for p in geophPoints] # fig = plt.figure() # axs = fig.add_subplot(1,1,1) # drawSeismogramm(axs, mesh, u, nodes, dt, i=None) # plt.show() dpi = 92 scale = 1 fig = plt.figure(facecolor='white', figsize=(scale*800/dpi, scale*490/dpi), dpi=dpi) ax = fig.add_subplot(1, 1, 1) gci = pg.mplviewer.drawModel(ax, mesh, data=uI[0], cMin=-1, cMax=1, cmap='bwr') pg.mplviewer.drawMeshBoundaries(ax, meshIn, hideMesh=1) ax.set_xlim((-20, 20)) ax.set_ylim((-15, 0)) ax.set_ylabel('Depth [m]') ax.set_xlabel('$x$ [m]') ticks = ax.yaxis.get_majorticklocs() tickLabels = [] for t in ticks: tickLabels.append(str(int(abs(t)))) ax.set_yticklabels(tickLabels) plt.tight_layout() # ax, cbar = pg.show(mesh, data=vP) # pg.showNow() # ax = fig.add_subplot(1,1,1) def animate(i): i = i*5 if i > len(uI)-1: return print("Frame:", i, "/", len(uI)) ui = uI[i] ui = ui / max(pg.abs(ui)) ui = pg.logDropTol(ui, 1e-2) cMax = max(pg.abs(ui)) pg.mplviewer.setMappableData(gci, ui, cMin=-cMax, cMax=cMax, logScale=False) # plt.pause(0.001) anim = animation.FuncAnimation(fig, animate, frames=int(len(uI)/5), interval=0.001, repeat=0) # , blit=True) out = 'seis' + str(f0scale) + "cfl-" + str(cfl) anim.save(out + ".mp4", writer=None, fps=20, dpi=dpi, codec=None, bitrate=24*1024, extra_args=None, metadata=None, extra_anim=None, savefig_kwargs=None) try: print("create frames ... ") os.system('mkdir -p anim-' + out) os.system('ffmpeg -i ' + out + '.mp4 anim-' + out + '/movie%d.jpg') except: pass
def createJacobian(self, model): print('=' * 100) if self.complex(): modelRe = model[0:int(len(model) / 2)] modelIm = model[int(len(model) / 2):len(model)] modelC = pg.toComplex(modelRe, modelIm) print("Real", min(modelRe), max(modelRe)) print("Imag", min(modelIm), max(modelIm)) u = self.prepareJacobian_(modelC) if self._J.rows() == 0: #re(data)/re(mod) = im(data)/im(mod) # we need a local copy until we have a gimli internal reference counter FIXTHIS M1 = pg.RMatrix() M2 = pg.RMatrix() self.matrixHeap.append(M1) self.matrixHeap.append(M2) JRe = self._J.addMatrix(M1) JIm = self._J.addMatrix(M2) self._J.addMatrixEntry(JRe, 0, 0) self._J.addMatrixEntry(JIm, 0, len(modelRe), -1.0) self._J.addMatrixEntry(JIm, self.data().size(), 0, 1.0) self._J.addMatrixEntry(JRe, self.data().size(), len(modelRe)) else: self._J.clean() k = pg.RVector(self.data()('k')) self.data().set('k', k * 0.0 + 1.0) dMapResponse = pb.DataMap() dMapResponse.collect(self.electrodes(), self.solution()) respRe = dMapResponse.data(self.data(), False, False) respIm = dMapResponse.data(self.data(), False, True) #CVector resp(toComplex(respRe, respIm)); #RVector am(abs(resp) * dataContainer_->get("k")); #RVector ph(-phase(resp)); print("respRe", pg.median(respRe), min(respRe), max(respRe)) print("respIm", pg.median(respIm), min(respIm), max(respIm)) JC = pg.CMatrix() self.createJacobian_(modelC, u, JC) for i in range(JC.rows()): #JC[i] *= 1.0/(modelC*modelC) * k[i] JC[i] /= (modelC * modelC) / k[i] self._J.mat(0).copy(pg.real(JC)) self._J.mat(1).copy(pg.imag(JC)) #self.createJacobian_(modelRe*0.0+1.0, pg.real(u), self._J.mat(1)) #self.createJacobian_(modelRe*0.0+1.0, pg.imag(u), self._J.mat(2)) #self.createJacobian_(modelRe*0.0+1.0, pg.imag(u), self._J.mat(3)) sumsens0 = pg.RVector(self._J.mat(0).rows()) sumsens1 = pg.RVector(self._J.mat(0).rows()) sumsens2 = pg.RVector(self._J.mat(0).rows()) for i in range(self._J.mat(0).rows()): #self._J.mat(0)[i] *= 1./modelRe / respRe[i] #self._J.mat(1)[i] *= 1./modelIm / respRe[i] #self._J.mat(2)[i] *= 1./modelRe / respIm[i] #self._J.mat(3)[i] *= 1./modelIm / respIm[i] #self._J.mat(0)[i] *= 1./(modelRe * modelRe) * k[i] #self._J.mat(1)[i] *= 1./(modelRe * modelIm) * k[i] #self._J.mat(2)[i] *= 1./(modelIm * modelRe) * k[i] #self._J.mat(3)[i] *= 1./(modelIm * modelIm) * k[i] sumsens0[i] = sum(self._J.mat(0)[i]) sumsens1[i] = sum(self._J.mat(1)[i]) sumsens2[i] = abs(sum(JC[i])) print(pg.median(sumsens0), min(sumsens0), max(sumsens0)) print(pg.median(sumsens1), min(sumsens1), max(sumsens1)) print(pg.median(sumsens2), min(sumsens2), max(sumsens2)) self.data().set('k', k) self._J.recalcMatrixSize() else: # self.setVerbose(True) u = self.prepareJacobian_(model) #J = pg.RMatrix() if self._J.rows() == 0: print('#' * 100) M1 = pg.RMatrix() Jid = self._J.addMatrix(M1) self._J.addMatrixEntry(Jid, 0, 0) else: self._J.clean() self.createJacobian_(model, u, self._J.mat(0)) self._J.recalcMatrixSize()
#fop = pb.DCSRMultiElectrodeModelling(mesh, data) fop = pg.physics.ert.ERTModelling(mesh, data) fop.regionManager().region(1).setBackground(True) fop.createRefinedForwardMesh(refine=True, pRefine=False) inv = pg.RInversion(data("rhoa"), fop, verbose=True, dosave=True) datTrans = pg.RTransLog() modTrans = pg.RTransLog() inv.setMaxIter(10) inv.setTransData(datTrans) inv.setTransModel(modTrans) inv.setError(data('err')) inv.setModel( pg.RVector(fop.regionManager().parameterCount(), pg.median(data('rhoa')))) inv.setLambda(5) model = inv.run() modelMesh = fop.regionManager().paraDomain() a, cbar = pg.show(modelMesh, model) ##C = fop.contraintsMatrix() #S = fop.jacobian() #print(S) #modelMesh = fop.regionManager().paraDomain() #fig = pg.plt.figure() #ax = [fig.add_subplot(2,2,i) for i in range(1,5)]
poly.createEdge(nodes[1], nodes[2], 3) # hom neumann (outflow) poly.createEdge(nodes[2], nodes[3], 2) # hom dirichlet (isolation) poly.createEdge(nodes[3], nodes[0], 4) # hom dirichlet (isolation) mesh = createMesh(poly, quality=34, area=0.05, smooth=[0,10]) return mesh dx = 0.2 x = np.arange(-20, 20., dx) y = np.arange(-20, 0.0, dx)[::-1] mesh = pg.createGrid(x=x, y=y) mesh = createMesh2() print(mesh) h = pg.median(mesh.boundarySizes()) v1 = 1000 v2 = 3000 tmax = 10.1/v1 z = 2. f0 = 1000.0 # A low wavelength of 50 Hz velocities = pg.RVector(mesh.cellCount(), v1) for c in mesh.cells(): velocities[c.id()] = v1 if c.center()[1] < -z: velocities[c.id()] = v2
def calcSeismics(meshIn, vP): """Do seismic computations.""" meshSeis = meshIn.createH2() meshSeis = mt.appendTriangleBoundary(meshSeis, xbound=25, ybound=22.0, marker=1, quality=32.0, area=0.3, smooth=True, markerBoundary=1, isSubSurface=False, verbose=False) print(meshSeis) meshSeis = meshSeis.createH2() meshSeis = meshSeis.createH2() # meshSeis = meshSeis.createP2() meshSeis.smooth(1, 1, 1, 4) vP = pg.interpolate(meshIn, vP, meshSeis.cellCenters()) mesh = meshSeis vP = pg.solver.fillEmptyToCellArray(mesh, vP) print(mesh) # ax, cbar = pg.show(mesh, data=vP) # pg.show(mesh, axes=ax) geophPointsX = np.arange(-19, 19.1, 1) geophPoints = np.vstack((geophPointsX, np.zeros(len(geophPointsX)))).T sourcePos = geophPoints[4] c = mesh.findCell(sourcePos) h1 = pg.findBoundary(c.boundaryNodes(0)).size() h2 = pg.findBoundary(c.boundaryNodes(1)).size() h3 = pg.findBoundary(c.boundaryNodes(2)).size() print([h1, h2, h3]) h = pg.median([h1, h2, h3]) # h = pg.median(mesh.boundarySizes()) f0scale = 0.25 cfl = 0.5 dt = cfl * h / max(vP) print("Courant-Friedrich-Lewy number:", cfl) tmax = 40. / min(vP) times = np.arange(0.0, tmax, dt) solutionName = createCacheName('seis', mesh, times) + "cfl-" + str(cfl) try: # u = pg.load(solutionName + '.bmat') uI = pg.load(solutionName + 'I.bmat') except Exception as e: print(e) f0 = f0scale * 1. / dt print("h:", round(h, 2), "dt:", round(dt, 5), "1/dt:", round(1 / dt, 1), "f0", round(f0, 2), "Wavelength: ", round(max(vP) / f0, 2), " m") uSource = ricker(times, f0, t0=1. / f0) plt.figure() plt.plot(times, uSource, '-*') plt.show(block=0) plt.pause(0.01) u = solvePressureWave(mesh, vP, times, sourcePos=sourcePos, uSource=uSource, verbose=10) u.save(solutionName) uI = pg.RMatrix() print("interpolate node to cell data ... ") pg.interpolate(mesh, u, mesh.cellCenters(), uI) print("... done") uI.save(solutionName + 'I') # nodes = [mesh.findNearestNode(p) for p in geophPoints] # fig = plt.figure() # axs = fig.add_subplot(1,1,1) # drawSeismogramm(axs, mesh, u, nodes, dt, i=None) # plt.show() dpi = 92 scale = 1 fig = plt.figure(facecolor='white', figsize=(scale * 800 / dpi, scale * 490 / dpi), dpi=dpi) ax = fig.add_subplot(1, 1, 1) gci = pg.mplviewer.drawModel(ax, mesh, data=uI[0], cMin=-1, cMax=1, cmap='bwr') pg.mplviewer.drawMeshBoundaries(ax, meshIn, hideMesh=1) ax.set_xlim((-20, 20)) ax.set_ylim((-15, 0)) ax.set_ylabel('Depth [m]') ax.set_xlabel('$x$ [m]') ticks = ax.yaxis.get_majorticklocs() tickLabels = [] for t in ticks: tickLabels.append(str(int(abs(t)))) ax.set_yticklabels(tickLabels) plt.tight_layout() # ax, cbar = pg.show(mesh, data=vP) # pg.showNow() # ax = fig.add_subplot(1,1,1) def animate(i): i = i * 5 if i > len(uI) - 1: return print("Frame:", i, "/", len(uI)) ui = uI[i] ui = ui / max(pg.abs(ui)) ui = pg.logDropTol(ui, 1e-2) cMax = max(pg.abs(ui)) pg.mplviewer.setMappableData(gci, ui, cMin=-cMax, cMax=cMax, logScale=False) # plt.pause(0.001) anim = animation.FuncAnimation(fig, animate, frames=int(len(uI) / 5), interval=0.001, repeat=0) # , blit=True) out = 'seis' + str(f0scale) + "cfl-" + str(cfl) anim.save(out + ".mp4", writer=None, fps=20, dpi=dpi, codec=None, bitrate=24 * 1024, extra_args=None, metadata=None, extra_anim=None, savefig_kwargs=None) try: print("create frames ... ") os.system('mkdir -p anim-' + out) os.system('ffmpeg -i ' + out + '.mp4 anim-' + out + '/movie%d.jpg') except: pass
fop = pb.DCSRMultiElectrodeModelling(mesh, data) fop.regionManager().region(1).setBackground(True) fop.createRefinedForwardMesh(refine=True, pRefine=False) inv = pg.RInversion(data("rhoa"), fop, verbose=True, dosave=True) datTrans = pg.RTransLog() modTrans = pg.RTransLog() inv.setMaxIter(1) inv.setTransData(datTrans) inv.setTransModel(modTrans) inv.setError(data('err')) inv.setModel(pg.RVector(fop.regionManager().parameterCount(), pg.median(data('rhoa')))) inv.setLambda(5) model = inv.run() # C = fop.contraintsMatrix() # S = fop.jacobian() modelMesh = fop.regionManager().paraDomain() pg.showLater(1) ax, cbar = pg.show(modelMesh, model) pg.show(modelMesh, data.sensorPositions(), axes=ax, showLater=1) pg.show(modelMesh, axes=ax)
def createJacobian(self, model): """Create Jacobian matrix.""" if self.subPotentials is None: self.response(model) J = self.jacobian() J.resize(self.data.size(), self.regionManager().parameterCount()) cells = self.mesh().findCellByMarker(0, -1) Si = pg.ElementMatrix() St = pg.ElementMatrix() u = self.subPotentials pg.tic() if self.verbose(): print("Calculate sensitivity matrix for model: ", min(model), max(model)) Jt = pg.RMatrix(self.data.size(), self.regionManager().parameterCount()) for kIdx, w in enumerate(self.w): k = self.k[kIdx] w = self.w[kIdx] Jt *= 0. A = pg.ElementMatrixMap() for i, c in enumerate(cells): modelIdx = c.marker() # 2.5D Si.u2(c) Si *= k * k Si += St.ux2uy2uz2(c) # 3D # Si.ux2uy2uz2(c); w = w* 2 A.add(modelIdx, Si) for dataIdx in range(self.data.size()): a = int(self.data('a')[dataIdx]) b = int(self.data('b')[dataIdx]) m = int(self.data('m')[dataIdx]) n = int(self.data('n')[dataIdx]) Jt[dataIdx] = A.mult(u[kIdx][a] - u[kIdx][b], u[kIdx][m] - u[kIdx][n]) J += w * Jt m2 = model*model k = self.data('k') for i in range(J.rows()): J[i] /= (m2 / k[i]) if self.verbose(): sumsens = np.zeros(J.rows()) for i in range(J.rows()): sumsens[i] = pg.sum(J[i]) print("sens sum: median = ", pg.median(sumsens), " min = ", pg.min(sumsens), " max = ", pg.max(sumsens))
pg.loadMatrixCol(abmnr, "sond1-100.ves") ab2 = abmnr[0] mn2 = abmnr[1] rhoa = abmnr[2] transRho = pg.RTransLogLU(1., 1000.) transThk = pg.RTransLog() transRhoa = pg.RTransLog() f = pg.DC1dModelling(nlay, ab2, mn2) f.region(0).setTransModel(transThk) f.region(1).setTransModel(transRho) paraDepth = max(ab2) / 3 f.region(0).setStartValue(max(ab2) / 3. / nlay / 2.) f.region(1).setStartValue(pg.median(rhoa)) model = f.createStartVector() model[nlay] *= 1.5 inv = pg.RInversion(rhoa, f, True) inv.setModel(model) inv.setTransData(transRhoa) inv.setRelativeError(errPerc / 100.0) inv.setLambda(lam) inv.setMarquardtScheme(0.9) model = inv.run() fig, ax = plt.subplots(nrows=2) ax[0].loglog(rhoa, ab2, 'rx-', inv.response(), ab2, 'b-') ax[0].set_ylim(max(ab2), min(ab2))
def main(): # read config file conf_file = "inv.conf" with open(conf_file, "r") as fd: conf = json.load(fd) # res = pb.Resistivity("input.dat") # res.invert() # np.savetxt('resistivity.vector', res.resistivity) # return # load data file data = pg.DataContainerERT("input.dat") # remove invalid data oldsize = data.size() data.removeInvalid() newsize = data.size() if newsize < oldsize: print('Removed ' + str(oldsize - newsize) + ' values.') if not data.allNonZero('rhoa'): print("No or partial rhoa values.") return # check, compute error if data.allNonZero('err'): error = data('err') else: print("estimate data error") error = conf["relativeError"] + conf["absoluteError"] / data('rhoa') # create FOP fop = pg.DCSRMultiElectrodeModelling(verbose=conf["verbose"]) fop.setThreadCount(psutil.cpu_count(logical=False)) fop.setData(data) # create Inv inv = pg.RInversion(verbose=conf["verbose"], dosave=False) # variables tD, tM are needed to prevent destruct objects tD = pg.RTransLog() tM = pg.RTransLogLU() inv.setTransData(tD) inv.setTransModel(tM) inv.setForwardOperator(fop) # mesh if conf["meshFile"] == "": depth = conf["depth"] if depth is None: depth = pg.DCParaDepth(data) poly = pg.meshtools.createParaMeshPLC( data.sensorPositions(), paraDepth=depth, paraDX=conf["paraDX"], paraMaxCellSize=conf["maxCellArea"], paraBoundary=2, boundary=2) if conf["verbose"]: print("creating mesh...") mesh = pg.meshtools.createMesh(poly, quality=conf["quality"], smooth=(1, 10)) else: mesh = pg.Mesh(pg.load(conf["meshFile"])) mesh.createNeighbourInfos() if conf["verbose"]: print(mesh) sys.stdout.flush() # flush before multithreading fop.setMesh(mesh) fop.regionManager().setConstraintType(1) if not conf["omitBackground"]: if fop.regionManager().regionCount() > 1: fop.regionManager().region(1).setBackground(True) if conf["meshFile"] == "": fop.createRefinedForwardMesh(True, False) else: fop.createRefinedForwardMesh(conf["refineMesh"], conf["refineP2"]) paraDomain = fop.regionManager().paraDomain() inv.setForwardOperator(fop) # necessary? # inversion parameters inv.setData(data('rhoa')) inv.setRelativeError(error) fop.regionManager().setZWeight(conf['zWeight']) inv.setLambda(conf['lam']) inv.setMaxIter(conf['maxIter']) inv.setRobustData(conf['robustData']) inv.setBlockyModel(conf['blockyModel']) inv.setRecalcJacobian(conf['recalcJacobian']) pc = fop.regionManager().parameterCount() startModel = pg.RVector(pc, pg.median(data('rhoa'))) inv.setModel(startModel) # Run the inversion sys.stdout.flush() # flush before multithreading model = inv.run() resistivity = model(paraDomain.cellMarkers()) np.savetxt('resistivity.vector', resistivity) print("Done.")
if __name__ == '__main__': nlay = 4 # number of layers lam = 200. # (initial) regularization parameter errPerc = 3. # relative error of 3 percent ab2 = np.logspace(-1, 2, 50) # AB/2 distance (current electrodes) mn2 = ab2 / 3. # MN/2 distance (potential electrodes) f = pg.DC1dModelling(nlay, ab2, mn2) synres = [100., 500., 20., 800.] # synthetic resistivity synthk = [0.5, 3.5, 6.] # synthetic thickness (nlay-th layer is infinite) rhoa = f(synthk + synres) rhoa = rhoa * (pg.randn(len(rhoa)) * errPerc / 100. + 1.) transLog = pg.RTransLog() inv = LSQRInversion(rhoa, f, transLog, transLog, True) inv.setRelativeError(errPerc / 100) startModel = pg.cat(pg.Vector(nlay - 1, 5), pg.Vector(nlay, pg.median(rhoa))) print(inv.response()) inv.setModel(startModel) inv.setMarquardtScheme() inv.setLambda(1000) G = pg.RMatrix(rows=1, cols=len(startModel)) for i in range(3): G[0][i] = 1 c = pg.Vector(1, pg.sum(synthk)) inv.setParameterConstraints(G, c, 100) # print("Start", inv.chi2(), inv.relrms(), pg.sum(inv.model()(0, nlay-1))) if 0: for i in range(10): inv.oneStep() print(i, inv.chi2(), inv.relrms(), pg.sum(inv.model()(0, nlay - 1)))
def invert(self, data, values=None, verbose=0, **kwargs): """ Invert the given data. A parametric mesh for the inversion will be created if non is given before. Parameters ---------- """ self.fop.setVerbose(verbose) self.inv.setVerbose(verbose) self.inv.setMaxIter(kwargs.pop('maxiter', 10)) self.inv.setLambda(kwargs.pop('lambd', 10)) if self.paraMesh is None: self.paraMesh = createParaMesh2dGrid(data.sensorPositions(), **kwargs) self.setParaMesh(self.paraMesh) if verbose: print(self.paraMesh) # pg.show(self.paraMesh) err = data('err') rhoa = data('rhoa') startModel = pg.RVector(self.fop.regionManager().parameterCount(), pg.median(rhoa)) self.fop.setData(data) self.inv.setForwardOperator(self.fop) # check err here self.inv.setData(rhoa) self.inv.setError(err) self.inv.setModel(startModel) model = self.inv.run() if values is not None: if isinstance(values, pg.RVector): values = [values] elif isinstance(values, np.ndarray): if values.ndim == 1: values = [values] allModel = pg.RMatrix(len(values), len(model)) self.inv.setVerbose(False) for i in range(len(values)): print(i) tic = time.time() self.inv.setModel(model) self.inv.setReferenceModel(model) dData = pg.abs(values[i] / rhoa) relModel = self.inv.invSubStep(pg.log(dData)) allModel[i] = model * pg.exp(relModel) print(i, "/", len(values), " : ", time.time() - tic, "s min/max: ", min(allModel[i]), max(allModel[i])) return allModel return model
def createJacobian(self, model): """Create Jacobian matrix.""" if self.subPotentials is None: self.response(model) J = self.jacobian() J.resize(self.data.size(), self.regionManager().parameterCount()) cells = self.mesh().findCellByMarker(0, -1) Si = pg.ElementMatrix() St = pg.ElementMatrix() u = self.subPotentials pg.tic() if self.verbose(): print("Calculate sensitivity matrix for model: ", min(model), max(model)) Jt = pg.RMatrix(self.data.size(), self.regionManager().parameterCount()) for kIdx, w in enumerate(self.w): k = self.k[kIdx] w = self.w[kIdx] Jt *= 0. A = pg.ElementMatrixMap() for i, c in enumerate(cells): modelIdx = c.marker() # 2.5D Si.u2(c) Si *= k * k Si += St.ux2uy2uz2(c) # 3D # Si.ux2uy2uz2(c); w = w* 2 A.add(modelIdx, Si) for dataIdx in range(self.data.size()): a = int(self.data('a')[dataIdx]) b = int(self.data('b')[dataIdx]) m = int(self.data('m')[dataIdx]) n = int(self.data('n')[dataIdx]) Jt[dataIdx] = A.mult(u[kIdx][a] - u[kIdx][b], u[kIdx][m] - u[kIdx][n]) J += w * Jt m2 = model * model k = self.data('k') for i in range(J.rows()): J[i] /= (m2 / k[i]) if self.verbose(): sumsens = np.zeros(J.rows()) for i in range(J.rows()): sumsens[i] = pg.sum(J[i]) print("sens sum: median = ", pg.median(sumsens), " min = ", pg.min(sumsens), " max = ", pg.max(sumsens))
poly.createEdge(nodes[2], nodes[3], 2) # hom dirichlet (isolation) poly.createEdge(nodes[3], nodes[0], 4) # hom dirichlet (isolation) mesh = createMesh(poly, quality=34, area=0.05, smooth=[0, 10]) return mesh dx = 0.2 x = np.arange(-20, 20., dx) y = np.arange(-20, 0.0, dx)[::-1] mesh = pg.createGrid(x=x, y=y) mesh = createMesh2() print(mesh) h = pg.median(mesh.boundarySizes()) v1 = 1000 v2 = 3000 tmax = 10.1 / v1 z = 2. f0 = 1000.0 # A low wavelength of 50 Hz velocities = pg.RVector(mesh.cellCount(), v1) for c in mesh.cells(): velocities[c.id()] = v1 if c.center()[1] < -z: velocities[c.id()] = v2