def randN(n, minVal=0.0, maxVal=1.0): """Create RVector of length n with normally distributed random numbers.""" r = pg.RVector(n) pg.randn(r) r *= (maxVal - minVal) r += minVal return r
def randN(n, minVal=0.0, maxVal=1.0): """Create RVector of length n with normally distributed random numbers.""" r = pg.RVector(n) pg.randn(r) r *= (maxVal-minVal) r += minVal return r
def simulateSynth(model, tMax=5000, satSteps=150, ertSteps=10, area=0.1, synthPath='synth/'): """Create synthetic example.""" if not os.path.exists('synth/'): os.mkdir(synthPath) world = mt.createWorld(start=[-20, 0], end=[20, -16], layers=[-2, -8], worldMarker=False) for i, b in enumerate(world.boundaries()): b.setMarker(i + 1) block = mt.createRectangle(start=[-6, -3.5], end=[6, -6.0], marker=4, boundaryMarker=11, area=area) geom = mt.mergePLC([world, block]) geom.save(synthPath + 'synthGeom') # pg.show(geom, boundaryMarker=1) paraMesh = pg.meshtools.createMesh(geom, quality=32, area=area, smooth=[1, 10]) # translate 1 2 3 4 - > 0 1 2 3 mapMarker = np.array([0, 0, 1, 2, 3], 'float') paraMesh.setCellMarkers(mapMarker[np.array(paraMesh.cellMarkers())]) paraMesh.save(synthPath + 'synth.bms') fop = HydroGeophysicalModelling(mesh=paraMesh, tMax=tMax, satSteps=satSteps, ertSteps=ertSteps, verbose=1) # openblas have some problems with to high thread count .. # we need to dig into print("ThreadCount:", pg.threadCount()) pg.setThreadCount(4) print('##### Simulate synthetic data ' + '#'*50) pg.tic() rhoaR = fop.response(pg.RVector(model)[paraMesh.cellMarkers()]) pg.toc() print('#'*100) # add some noise here rand = pg.RVector(len(rhoaR)) pg.randn(rand) rhoaR *= (1.0 + rand * fop.ws.derr.flatten()) fop.ws.rhoaR = rhoaR.reshape(fop.ws.derr.shape) # fop.ws.mesh.save(synthPath + 'synth.bms') np.save(synthPath + 'synthK', fop.ws.k) np.save(synthPath + 'synthVel', fop.ws.vel) np.save(synthPath + 'synthSat', fop.ws.sat) fop.ws.scheme.save(synthPath + 'synth.shm', 'a b m n') np.save(synthPath + 'synthRhoaRatio', fop.ws.rhoaR) np.save(synthPath + 'synthRhoa', fop.ws.rhoa) np.save(synthPath + 'synthErr', fop.ws.derr)
def calcApparentResistivities(mesh, meshERT, poro, rhoBrine): ert = ERT(verbose=False) meshFOP = appendTriangleBoundary(meshERT, xbound=50, ybound=50, marker=1, quality=34.0, smooth=False, markerBoundary=1, isSubSurface=False, verbose=False) swatch = pg.Stopwatch(True) print("res 1:", swatch.duration(True)) resis = resistivityArchie(rBrine=rhoBrine, porosity=poro, S=1.0, mesh=mesh, meshI=meshFOP) print("res 2:", swatch.duration(True)) ertPointsX = [pg.RVector3(x, 0) for x in np.arange(-19, 19.1, 1)] ertScheme = ert.createData(ertPointsX, scheme="Dipole Dipole (CC-PP)") solutionName = createCacheName('appRes', mesh) + "-" + \ str(ertScheme.size()) + "-" + str(len(rhoBrine)) try: rhoa = np.load(solutionName + '.bmat.npy') ertData = pb.DataContainerERT(solutionName + '.dat') except Exception as e: print(e) print("Building .... ") rhoa = np.zeros((len(resis), ertScheme.size())) ertScheme.set('k', pb.geometricFactor(ertScheme)) ertData = ert.simulate(meshFOP, resis[0], ertScheme) errPerc = 1 errVolt = 1e-5 voltage = ertData('rhoa') / ertData('k') ertData.set('err', pg.abs(errVolt / voltage) + errPerc / 100.0) print('err min:', min(ertData('err'))*100, 'max:', max(ertData('err'))*100) ertData.save(solutionName + '.dat', 'a b m n rhoa err k') for i in range(0, len(resis)): tic = time.time() rhoa[i] = ert.fop.response(resis[i]) rand = pg.RVector(len(rhoa[i])) pg.randn(rand) rhoa[i] *= (1.0 + rand * ertData('err')) print(i, "/", len(resis), " : ", time.time()-tic, "s", "min:", min(resis[i]), "max:", max(resis[i]), "min:", min(rhoa[i]), "max:", max(rhoa[i])) np.save(solutionName + '.bmat', rhoa) return meshFOP, resis, ertData, rhoa
def simulate(mesh, res, scheme, verbose=False, **kwargs): """Forward calculation vor given mesh, data and resistivity.""" fop = ERTModelling(verbose=verbose) # fop = ERTManager.createFOP(verbose=verbose) fop.setData(scheme) fop.setMesh(mesh, ignoreRegionManager=True) if not scheme.allNonZero('k'): scheme.set('k', pg.RVector(scheme.size(), -1)) rhoa = None isArrayData = None if hasattr(res[0], '__iter__'): isArrayData = True rhoa = np.zeros((len(res), scheme.size())) for i, r in enumerate(res): rhoa[i] = fop.response(r) else: rhoa = fop.response(res) noiseLevel = kwargs.pop('noiseLevel', 0.0) if noiseLevel > 0: err = kwargs.pop('noiseLevel', 0.03) + kwargs.pop('noiseAbs', 1e-4) / rhoa scheme.set('err', err) rhoa *= 1. + pg.randn(scheme.size()) * err if not isArrayData: scheme.set('rhoa', rhoa) if kwargs.pop('returnArray', False): return rhoa return scheme
def simulate(synmodel, ab2=None, mn2=None, errPerc=3.): """Forward calculation with optional noise Simulates a synthetic data set of a vertical electric sounding and appends gaussian distributed noise. Block only for now. Parameters ---------- ab2: array_like Vector of distances between the point of the sounding and the current electrodes. mn2: array_like [ab2/3] Vector of distances between the point of the sounding and the potential electrodes. errPerc: float [3.] Percentage Value for the gaussian noise. Default are 3 %. """ thk = synmodel[0] res = synmodel[1] if mn2 is None: mn2 = ab2/3 FOP = pg.DC1dModelling(len(res), ab2, mn2) syndata = FOP.response(thk + res) syndata = syndata * (pg.randn(len(syndata)) * errPerc / 100. + 1.) return syndata
def test_VMD(self, showProgress=False): t = np.logspace(-5.5, -2.2, 20) verbose = False fop = VMDTimeDomainModelling(times=t, txArea=10000.0, rxArea=10000.0, verbose=verbose) # [thick[3], res[4]] nLay=4 vmdMgr = pg.frameworks.MethodManager1d(fop) synthModel = np.array([25., 5., 100., 150., 1., 10., 4.]) ra = vmdMgr.simulate(synthModel) err = abs(np.log(t) / 2) * 0.01 ra *= 1. + pg.randn(len(ra), seed=1337) * err model = vmdMgr.invert(ra, err, nLayers=4, layerLimits=[2, 500], maxIter=50, showProgress=showProgress, verbose=verbose) if showProgress is True: fop.drawModel(ax=vmdMgr.inv.axs[0], model=synthModel, label='Synth') np.testing.assert_array_less(vmdMgr.fw.chi2(), 1.5)
def simulate(synmodel, ab2=None, mn2=None, errPerc=3.): """Forward calculation with optional noise Simulates a synthetic data set of a vertical electric sounding and appends gaussian distributed noise. Block only for now. Parameters ---------- ab2: array_like Vector of distances between the point of the sounding and the current electrodes. mn2: array_like [ab2/3] Vector of distances between the point of the sounding and the potential electrodes. errPerc: float [3.] Percentage Value for the gaussian noise. Default are 3 %. """ thk = synmodel[0] res = synmodel[1] if mn2 is None: mn2 = ab2 / 3 FOP = pg.DC1dModelling(len(res), ab2, mn2) syndata = FOP.response(thk + res) syndata = syndata * (pg.randn(len(syndata)) * errPerc / 100. + 1.) return syndata
def simulate(mesh, res, scheme, verbose=False, **kwargs): """Forward calculation vor given mesh, data and resistivity.""" fop = ERTModelling(verbose=verbose) # fop = ERTManager.createFOP(verbose=verbose) fop.setData(scheme) fop.setMesh(mesh, ignoreRegionManager=True) if not scheme.allNonZero('k'): if min(pg.y(scheme)) != max(pg.y(scheme)) or min( pg.z(scheme)) != max(pg.z(scheme)): pg.info( "Non flat earth topography found. " "We will set geometric factors to -1 to emulate " "electrical impedance tomography (EIT). If you want to " "use ERT will full topography support. " "Please consider the use of pyBERT.") scheme.set('k', pg.RVector(scheme.size(), -1)) else: scheme.set('k', fop.calcGeometricFactors(scheme)) rhoa = None isArrayData = None if hasattr(res[0], '__iter__'): isArrayData = True rhoa = np.zeros((len(res), scheme.size())) for i, r in enumerate(res): rhoa[i] = fop.response(r) else: rhoa = fop.response(res) pg.renameKwarg('noisify', 'noiseLevel', kwargs) noiseLevel = kwargs.pop('noiseLevel', 0.0) if noiseLevel > 0: noiseAbs = kwargs.pop('noiseAbs', 1e-4) err = noiseLevel + noiseAbs / rhoa scheme.set('err', err) if verbose: pg.info( "Set noise (" + str(noiseLevel * 100) + "% + " + str(noiseAbs) + " V) min:", min(err), "max:", max(err)) rhoa *= 1. + pg.randn(scheme.size()) * err if isArrayData is None: scheme.set('rhoa', rhoa) if kwargs.pop('returnArray', False): return rhoa return scheme
def simulate(self, model, **kwargs): # """Run a simulation aka the forward task.""" ra = self.fop.response(par=model) noiseLevel = kwargs.pop('noiseLevel', 0.0) if noiseLevel > 0: err = self.estimateError(ra, errLevel=noiseLevel) ra *= 1. + pg.randn(ra.size(), seed=kwargs.pop('seed', None)) * err return ra, err return ra
def simulate(mesh, res, scheme, verbose=False, **kwargs): """Forward calculation vor given mesh, data and resistivity.""" fop = ERTModelling(verbose=verbose) # fop = ERTManager.createFOP(verbose=verbose) fop.setData(scheme) fop.setMesh(mesh, ignoreRegionManager=True) if not scheme.allNonZero('k'): if min(pg.y(scheme)) != max(pg.y(scheme)) or min(pg.z(scheme)) != max(pg.z(scheme)): pg.info("Non flat earth topography found. " "We will set geometric factors to -1 to emulate " "electrical impedance tomography (EIT). If you want to " "use ERT will full topography support. " "Please consider the use of pyBERT.") scheme.set('k', pg.RVector(scheme.size(), -1)) else: scheme.set('k', fop.calcGeometricFactors(scheme)) rhoa = None isArrayData = None if hasattr(res[0], '__iter__'): isArrayData = True rhoa = np.zeros((len(res), scheme.size())) for i, r in enumerate(res): rhoa[i] = fop.response(r) else: rhoa = fop.response(res) pg.renameKwarg('noisify', 'noiseLevel', kwargs) noiseLevel = kwargs.pop('noiseLevel', 0.0) if noiseLevel > 0: noiseAbs = kwargs.pop('noiseAbs', 1e-4) err = noiseLevel + noiseAbs / rhoa scheme.set('err', err) if verbose: pg.info("Set noise (" + str(noiseLevel*100) + "% + " + str(noiseAbs) + " V) min:", min(err), "max:", max(err)) rhoa *= 1. + pg.randn(scheme.size()) * err if isArrayData is None: scheme.set('rhoa', rhoa) if kwargs.pop('returnArray', False): return rhoa return scheme
nlay = 4 # number of layers lam = 200. # (initial) regularization parameter errPerc = 3. # relative error of 3 percent ab2 = np.logspace(-1, 2, 50) # AB/2 distance (current electrodes) mn2 = ab2 / 3. # MN/2 distance (potential electrodes) ############################################################################### # initialize the forward modelling operator f = pg.core.DC1dModelling(nlay, ab2, mn2) ############################################################################### # other ways are by specifying a Data Container or am/an/bm/bn distances synres = [100., 500., 20., 800.] # synthetic resistivity synthk = [0.5, 3.5, 6.] # synthetic thickness (nlay-th layer is infinite) ############################################################################### # the forward operator can be called by f.response(model) or simply f(model) rhoa = f(synthk+synres) rhoa = rhoa * (pg.randn(len(rhoa), seed=0) * errPerc / 100. + 1.) ############################################################################### # create some transformations used for inversion transThk = pg.trans.TransLog() # log-transform ensures thk>0 transRho = pg.trans.TransLogLU(1, 1000) # lower and upper bound transRhoa = pg.trans.TransLog() # log transformation for data ############################################################################### # set model transformation for thickness and resistivity f.region(0).setTransModel(transThk) # 0=thickness f.region(1).setTransModel(transRho) # 1=resistivity ############################################################################### # generate start model values from median app. resistivity & spread paraDepth = max(ab2) / 3. # rule-of-thumb for Wenner/Schlumberger f.region(0).setStartValue(paraDepth / nlay / 2) f.region(1).setStartValue(np.median(rhoa)) ###############################################################################
nlay = 4 # number of layers lam = 200. # (initial) regularization parameter errPerc = 10. # relative error of 3 percent ab2 = np.logspace(-1, 2, 50) # AB/2 distance (current electrodes) mn2 = ab2 / 3. # MN/2 distance (potential electrodes) ############################################################################### # initialize the forward modelling operator f = pg.DC1dModelling(nlay, ab2, mn2) ############################################################################### # other ways are by specifying a Data Container or am/an/bm/bn distances synres = [100., 500., 20., 800.] # synthetic resistivity synthk = [0.5, 3.5, 6.] # synthetic thickness (nlay-th layer is infinite) ############################################################################### # the forward operator can be called by f.response(model) or simply f(model) rhoa = f(synthk+synres) rhoa = rhoa * (pg.randn(len(rhoa)) * errPerc / 100. + 1.) ############################################################################### # create some transformations used for inversion transThk = pg.RTransLog() # log-transform ensures thk>0 transRho = pg.RTransLogLU(1, 1000) # lower and upper bound transRhoa = pg.RTransLog() # log transformation for data ############################################################################### # set model transformation for thickness and resistivity f.region(0).setTransModel(transThk) # 0=thickness f.region(1).setTransModel(transRho) # 1=resistivity ############################################################################### # generate start model values from median app. resistivity & spread paraDepth = max(ab2) / 3. # rule-of-thumb for Wenner/Schlumberger f.region(0).setStartValue(paraDepth / nlay / 2) f.region(1).setStartValue(np.median(rhoa)) ###############################################################################
def simulate(mesh, slowness, scheme, verbose=False, **kwargs): """Simulate a traveltime measurement. Perform the forward task for a given mesh, a slowness distribution (per cell) and return data (traveltime) for a measurement scheme. This is a static method since it does not interfere with the managers inversion approaches. Parameters ---------- mesh : :gimliapi:`GIMLI::Mesh` Mesh to calculate for. slowness : array(mesh.cellCount()) | array(N, mesh.cellCount()) slowness distribution for the given mesh cells can be: * a single array of len mesh.cellCount() * a matrix of N slowness distributions of len mesh.cellCount() * a res map as [[marker0, res0], [marker1, res1], ...] scheme : :gimliapi:`GIMLI::DataContainer` data measurement scheme verbose : boolean Be verbose. Other parameters ---------------- noisify : boolean add normal distributed noise based on scheme('err') Returns ------- t : array(N, data.size()) | DataContainer The resulting simulated travel time values. Either one column array or matrix in case of slowness matrix. A DataContainer is return if noisify set to True. """ fop = Refraction.createFOP(verbose=verbose) fop.setData(scheme) fop.setMesh(mesh, ignoreRegionManager=True) if len(slowness) == mesh.cellCount(): if max(slowness) > 1.: print('Warning: slowness values larger than 1 (' + str(max(slowness)) + ').. assuming that are velocity ' 'values .. building reciprocity') t = fop.response(1. / slowness) else: t = fop.response(slowness) else: print(mesh) print("slowness: ", slowness) raise BaseException("Simulate called with wrong slowness array.") ret = pg.DataContainer(scheme) ret.set('t', t) noiseLevel = kwargs.pop('noiseLevel', 0) noiseAbs = kwargs.pop('noiseAbs', 0) if noiseLevel > 0 or noiseAbs > 0: if not ret.allNonZero('err'): ret.set('t', t) ret.set( 'err', pg.physics.Refraction.estimateError( ret, absoluteError=noiseAbs)) if verbose: print("Data error estimates (min:max) ", min(ret('err')), ":", max(ret('err'))) t += pg.randn(ret.size()) * ret('err') ret.set('t', t) if kwargs.pop('returnArray', False): return t return ret
def simulate(self, mesh, scheme, res, **kwargs): """Simulate an ERT measurement. Perform the forward task for a given mesh, a resistivity distribution (per cell), a measurement scheme and will return data (apparent resistivity) or potential fields. This function can also operate on complex resistivity models, thereby computing complex apparent resistivities. The forward operator itself only calculate potential values for the given scheme file. To calculate apparent resistivities, geometric factors (k) are needed. If there are no values k in the DataContainerERT scheme, then we will try to calculate them, either analytic or by using a p2-refined version of the given mesh. TODO ---- * 2D + Complex + SR Args ---- mesh : :gimliapi:`GIMLI::Mesh` 2D or 3D Mesh to calculate for. res : float, array(mesh.cellCount()) | array(N, mesh.cellCount()) | list Resistivity distribution for the given mesh cells can be: . float for homogeneous resistivity . single array of length mesh.cellCount() . matrix of N resistivity distributions of length mesh.cellCount() . resistivity map as [[regionMarker0, res0], [regionMarker0, res1], ...] scheme : :gimliapi:`GIMLI::DataContainerERT` Data measurement scheme. Keyword Args ------------ verbose: bool[False] Be verbose. Will override class settings. calcOnly: bool [False] Use fop.calculate instead of fop.response. Useful if you want to force the calculation of impedances for homogeneous models. No noise handling. Solution is put as token 'u' in the returned DataContainerERT. noiseLevel: float [0.0] add normally distributed noise based on scheme('err') or on noiseLevel if scheme did not contain 'err' noiseAbs: float [0.0] Absolute voltage error in V returnArray: bool [False] Returns an array of apparent resistivities instead of a DataContainerERT returnFields: bool [False] Returns a matrix of all potential values (per mesh nodes) for each injection electrodes. Returns ------- DataContainerERT | array(N, data.size()) | array(N, data.size()) | array(N, data.size()): Data container with resulting apparent resistivity data and errors (if noiseLevel or noiseAbs is set). Optional returns a Matrix of rhoa values (for returnArray==True forces noiseLevel=0). In case of a complex valued resistivity model, phase values will be returned in the DataContainerERT (see example below), or as an additional returned array. Examples -------- # TODO: Remove pybert dependencies # >>> import pybert as pb # >>> import pygimli as pg # >>> import pygimli.meshtools as mt # >>> world = mt.createWorld(start=[-50, 0], end=[50, -50], # ... layers=[-1, -5], worldMarker=True) # >>> scheme = pb.createData( # ... elecs=pg.utils.grange(start=-10, end=10, n=21), # ... schemeName='dd') # >>> for pos in scheme.sensorPositions(): # ... _= world.createNode(pos) # ... _= world.createNode(pos + [0.0, -0.1]) # >>> mesh = mt.createMesh(world, quality=34) # >>> rhomap = [ # ... [1, 100. + 0j], # ... [2, 50. + 0j], # ... [3, 10.+ 0j], # ... ] # >>> ert = pb.ERTManager() # >>> data = ert.simulate(mesh, res=rhomap, scheme=scheme, verbose=True) # >>> rhoa = data.get('rhoa').array() # >>> phia = data.get('phia').array() """ verbose = kwargs.pop('verbose', self.verbose) calcOnly = kwargs.pop('calcOnly', False) returnFields = kwargs.pop("returnFields", False) returnArray = kwargs.pop('returnArray', False) noiseLevel = kwargs.pop('noiseLevel', 0.0) noiseAbs = kwargs.pop('noiseAbs', 1e-4) seed = kwargs.pop('seed', None) #segfaults with self.fop (test & fix) fop = self.createForwardOperator(useBert=self.useBert, sr=self.sr) fop.data = scheme fop.setMesh(mesh, ignoreRegionManager=True) fop.verbose = verbose rhoa = None phia = None isArrayData = False # parse the given res into mesh-cell-sized array if isinstance(res, int) or isinstance(res, float): res = np.ones(mesh.cellCount()) * float(res) elif isinstance(res, complex): res = np.ones(mesh.cellCount()) * res elif hasattr(res[0], '__iter__'): # ndim == 2 if len(res[0]) == 2: # res seems to be a res map # check if there are markers in the mesh that are not defined in # the rhomap. better signal here before it results in some error meshMarkers = list(set(mesh.cellMarkers())) mapMarkers = [m[0] for m in res] if any([mark not in mapMarkers for mark in meshMarkers]): left = [m for m in meshMarkers if m not in mapMarkers] pg.critical( "Mesh contains markers without assigned resistivities {}. Please fix given rhomap." .format(left)) res = pg.solver.parseArgToArray(res, mesh.cellCount(), mesh) else: # probably nData x nCells array # better check for array data here isArrayData = True if isinstance(res[0], np.complex) or isinstance(res, pg.CVector): pg.info("Complex resistivity values found.") fop.setComplex(True) else: fop.setComplex(False) if not scheme.allNonZero('k') and not calcOnly: if verbose: pg.info('Calculate geometric factors.') scheme.set('k', fop.calcGeometricFactor(scheme)) ret = pg.DataContainerERT(scheme) ## just be sure that we don't work with artifacts ret['u'] *= 0.0 ret['i'] *= 0.0 ret['r'] *= 0.0 if isArrayData: rhoa = np.zeros((len(res), scheme.size())) for i, r in enumerate(res): rhoa[i] = fop.response(r) if verbose: print(i, "/", len(res), " : ", pg.dur(), "s", "min r:", min(r), "max r:", max(r), "min r_a:", min(rhoa[i]), "max r_a:", max(rhoa[i])) else: # res is single resistivity array if len(res) == mesh.cellCount(): if calcOnly: fop.mapERTModel(res, 0) dMap = pg.core.DataMap() fop.calculate(dMap) if fop.complex(): pg.critical('Implement me') else: ret["u"] = dMap.data(scheme) ret["i"] = np.ones(ret.size()) if returnFields: return pg.Matrix(fop.solution()) return ret else: if fop.complex(): res = pg.utils.squeezeComplex(res) resp = fop.response(res) if fop.complex(): rhoa, phia = pg.utils.toPolar(resp) else: rhoa = resp else: print(mesh) print("res: ", res) raise BaseException( "Simulate called with wrong resistivity array.") if not isArrayData: ret['rhoa'] = rhoa if phia is not None: ret.set('phia', phia) else: ret.set('rhoa', rhoa[0]) if phia is not None: ret.set('phia', phia[0]) if returnFields: return pg.Matrix(fop.solution()) if noiseLevel > 0: # if errors in data noiseLevel=1 just triggers if not ret.allNonZero('err'): # 1A and #100µV ret.set( 'err', self.estimateError(ret, relativeError=noiseLevel, absoluteUError=noiseAbs, absoluteCurrent=1)) print("Data error estimate (min:max) ", min(ret('err')), ":", max(ret('err'))) rhoa *= 1. + pg.randn(ret.size(), seed=seed) * ret('err') ret.set('rhoa', rhoa) ipError = None if phia is not None: if scheme.allNonZero('iperr'): ipError = scheme('iperr') else: # np.abs(self.data("phia") +TOLERANCE) * 1e-4absoluteError if noiseLevel > 0.5: noiseLevel /= 100. if 'phiErr' in kwargs: ipError = np.ones( ret.size()) * kwargs.pop('phiErr') / 1000 else: ipError = abs(ret["phia"]) * noiseLevel if verbose: print("Data IP abs error estimate (min:max) ", min(ipError), ":", max(ipError)) phia += np.randn(ret.size(), seed=seed) * ipError ret['iperr'] = ipError ret['phia'] = phia # check what needs to be setup and returned if returnArray: if phia is not None: return rhoa, phia else: return rhoa return ret
# The function startModel defines a meaningful starting vector. There are other # methods to set the starting model as inv.setModel() but this one is a default # one for people who use the class and forget about a starting model. # We first create an abscissa vector using numpy (note that pygimli also # provides an exp function and generate synthetic data with two arbitrary A and # X values. x = np.arange(0, 1, 1e-2) data = 10.5 * np.exp(-x / 550e-3) ############################################################################### # We define an (absolute) error level and add Gaussian noise to the data. error = 0.5 data += pg.randn(*data.shape) * error relError = error / data ############################################################################### # Next, an instance of the forward operator is created. We could use it for # calculating the synthetic data using f.response([10.5, 0.55]) or just # f([10.5, 0.55]). We create a real-valued (R) inversion passing the forward # operator, the data. A verbose boolean flag could be added to provide some # output the inversion, another one prints more and saves files for debugging. f = ExpModelling(x) inv = pg.Inversion(f) ############################################################################### # We create a real-valued logarithmic transformation and apply it to the model. # Similar could be done for the data which are by default treated linearly.
nlay = 4 # number of layers lam = 200. # (initial) regularization parameter errPerc = 10. # relative error of 3 percent ab2 = np.logspace(-1, 2, 50) # AB/2 distance (current electrodes) mn2 = ab2 / 3. # MN/2 distance (potential electrodes) ############################################################################### # initialize the forward modelling operator f = pg.DC1dModelling(nlay, ab2, mn2) ############################################################################### # other ways are by specifying a Data Container or am/an/bm/bn distances synres = [100., 500., 20., 800.] # synthetic resistivity synthk = [0.5, 3.5, 6.] # synthetic thickness (nlay-th layer is infinite) ############################################################################### # the forward operator can be called by f.response(model) or simply f(model) rhoa = f(synthk + synres) rhoa = rhoa * (pg.randn(len(rhoa)) * errPerc / 100. + 1.) ############################################################################### # create some transformations used for inversion transThk = pg.RTransLog() # log-transform ensures thk>0 transRho = pg.RTransLogLU(1, 1000) # lower and upper bound transRhoa = pg.RTransLog() # log transformation for data ############################################################################### # set model transformation for thickness and resistivity f.region(0).setTransModel(transThk) # 0=thickness f.region(1).setTransModel(transRho) # 1=resistivity ############################################################################### # generate start model values from median app. resistivity & spread paraDepth = max(ab2) / 3. # rule-of-thumb for Wenner/Schlumberger f.region(0).setStartValue(paraDepth / nlay / 2) f.region(1).setStartValue(np.median(rhoa)) ###############################################################################
def simulate(self, mesh, scheme, slowness=None, vel=None, seed=None, secNodes=2, noiseLevel=0.0, noiseAbs=0.0, **kwargs): """Simulate traveltime measurements. Perform the forward task for a given mesh, a slowness distribution (per cell) and return data (traveltime) for a measurement scheme. Parameters ---------- mesh : :gimliapi:`GIMLI::Mesh` Mesh to calculate for or use the last known mesh. scheme: :gimliapi:`GIMLI::DataContainer` Data measurement scheme needs 's' for shot and 'g' for geophone data token. slowness : array(mesh.cellCount()) | array(N, mesh.cellCount()) Slowness distribution for the given mesh cells can be: * a single array of len mesh.cellCount() * a matrix of N slowness distributions of len mesh.cellCount() * a res map as [[marker0, res0], [marker1, res1], ...] vel : array(mesh.cellCount()) | array(N, mesh.cellCount()) Velocity distribution for the given mesh cells. Will overwrite given slowness. secNodes: int [2] Number of refinement nodes to increase accuracy of the forward calculation. noiseLevel: float [0.0] Add relative noise to the simulated data. noiseLevel*100 in % noiseAbs: float [0.0] Add absolute noise to the simulated data in ms. seed: int [None] Seed the random generator for the noise. Keyword Arguments ----------------- returnArray: [False] Return only the calculated times. verbose: [self.verbose] Overwrite verbose level. **kwargs Additional kwargs ... Returns ------- t : array(N, data.size()) | DataContainer The resulting simulated travel time values. Either one column array or matrix in case of slowness matrix. """ verbose = kwargs.pop('verbose', self.verbose) fop = self.fop fop.data = scheme fop.verbose = verbose if mesh is not None: self.applyMesh(mesh, secNodes=secNodes, ignoreRegionManager=True) if vel is not None: slowness = 1/vel if slowness is None: pg.critical("Need some slowness or velocity distribution for" " simulation.") if len(slowness) == self.fop.mesh().cellCount(): t = fop.response(slowness) else: print(self.fop.mesh()) print("slowness: ", slowness) pg.critical("Simulate called with wrong slowness array.") ret = pg.DataContainer(scheme) ret.set('t', t) if noiseLevel > 0 or noiseAbs > 0: if not ret.allNonZero('err'): ret.set('t', t) err = noiseAbs + t * noiseLevel ret.set('err', err) pg.verbose("Absolute error estimates (min:max) {0}:{1}".format( min(ret('err')), max(ret('err')))) t += pg.randn(ret.size(), seed=seed) * ret('err') ret.set('t', t) if kwargs.pop('returnArray', False) is True: return t return ret
res[1] = 10. res[2] = 50. model = pg.cat(thk, res) # paste together to one model ############################################################################### # We first set up EM forward operator and generate synthetic data with noise coilspacing = 50. nf = 10 freq = pg.Vector(nf, 110.) for i in range(nf - 1): freq[i + 1] = freq[i] * 2. fEM = pg.core.FDEM1dModelling(nlay, freq, coilspacing) dataEM = fEM(model) dataEM += pg.randn(len(dataEM), seed=1234) * noiseEM ############################################################################### # We define model transformations: logarithms and log with upper+lower bounds transRhoa = pg.trans.TransLog() transThk = pg.trans.TransLog() transRes = pg.trans.TransLogLU(1., 1000.) transEM = pg.trans.Trans() fEM.region(0).setTransModel(transThk) fEM.region(1).setTransModel(transRes) ############################################################################### # We set up the independent EM inversion and run the model. invEM = pg.core.Inversion(dataEM, fEM, transEM, True, True)
def simulate(mesh, slowness, scheme, verbose=False, **kwargs): """Simulate a traveltime measurement. Perform the forward task for a given mesh, a slowness distribution (per cell) and return data (traveltime) for a measurement scheme. This is a static method since it does not interfere with the managers inversion approaches. Parameters ---------- mesh : :gimliapi:`GIMLI::Mesh` Mesh to calculate for. slowness : array(mesh.cellCount()) | array(N, mesh.cellCount()) slowness distribution for the given mesh cells can be: * a single array of len mesh.cellCount() * a matrix of N slowness distributions of len mesh.cellCount() * a res map as [[marker0, res0], [marker1, res1], ...] scheme : :gimliapi:`GIMLI::DataContainer` data measurement scheme verbose : boolean Be verbose. Other parameters ---------------- noisify : boolean add normal distributed noise based on scheme('err') Returns ------- t : array(N, data.size()) | DataContainer The resulting simulated travel time values. Either one column array or matrix in case of slowness matrix. A DataContainer is return if noisify set to True. """ fop = Refraction.createFOP(verbose=verbose) fop.setData(scheme) fop.setMesh(mesh, ignoreRegionManager=True) if len(slowness) == mesh.cellCount(): if max(slowness) > 1.: print('Warning: slowness values larger than 1 (' + str(max(slowness)) + ').. assuming that are velocity ' 'values .. building reciprocity') t = fop.response(1./slowness) else: t = fop.response(slowness) else: print(mesh) print("slowness: ", slowness) raise BaseException("Simulate called with wrong slowness array.") ret = pg.DataContainer(scheme) ret.set('t', t) noiseLevel = kwargs.pop('noiseLevel', 0) noiseAbs = kwargs.pop('noiseAbs', 0) if noiseLevel > 0 or noiseAbs > 0: if not ret.allNonZero('err'): ret.set('t', t) ret.set('err', pg.physics.Refraction.estimateError( ret, absoluteError=noiseAbs)) if verbose: print("Data error estimates (min:max) ", min(ret('err')), ":", max(ret('err'))) t += pg.randn(ret.size()) * ret('err') ret.set('t', t) if kwargs.pop('returnArray', False): return t return ret
def calcApparentResistivities(mesh, meshERT, poro, rhoBrine): ert = ERT(verbose=False) meshFOP = appendTriangleBoundary(meshERT, xbound=50, ybound=50, marker=1, quality=34.0, smooth=False, markerBoundary=1, isSubSurface=False, verbose=False) swatch = pg.Stopwatch(True) print("res 1:", swatch.duration(True)) resis = resistivityArchie(rBrine=rhoBrine, porosity=poro, S=1.0, mesh=mesh, meshI=meshFOP) print("res 2:", swatch.duration(True)) ertPointsX = [pg.RVector3(x, 0) for x in np.arange(-19, 19.1, 1)] ertScheme = ert.createData(ertPointsX, scheme="Dipole Dipole (CC-PP)") solutionName = createCacheName('appRes', mesh) + "-" + \ str(ertScheme.size()) + "-" + str(len(rhoBrine)) try: rhoa = np.load(solutionName + '.bmat.npy') ertData = pb.DataContainerERT(solutionName + '.dat') except Exception as e: print(e) print("Building .... ") rhoa = np.zeros((len(resis), ertScheme.size())) ertScheme.set('k', pb.geometricFactor(ertScheme)) ertData = ert.simulate(meshFOP, resis[0], ertScheme) errPerc = 1 errVolt = 1e-5 voltage = ertData('rhoa') / ertData('k') ertData.set('err', pg.abs(errVolt / voltage) + errPerc / 100.0) print('err min:', min(ertData('err')) * 100, 'max:', max(ertData('err')) * 100) ertData.save(solutionName + '.dat', 'a b m n rhoa err k') for i in range(0, len(resis)): tic = time.time() rhoa[i] = ert.fop.response(resis[i]) rand = pg.RVector(len(rhoa[i])) pg.randn(rand) rhoa[i] *= (1.0 + rand * ertData('err')) print(i, "/", len(resis), " : ", time.time() - tic, "s", "min:", min(resis[i]), "max:", max(resis[i]), "min:", min(rhoa[i]), "max:", max(rhoa[i])) np.save(solutionName + '.bmat', rhoa) return meshFOP, resis, ertData, rhoa
def simulateSynth(model, tMax=5000, satSteps=150, ertSteps=10, area=0.1, synthPath='synth/'): """Create synthetic example.""" if not os.path.exists('synth/'): os.mkdir(synthPath) world = mt.createWorld(start=[-20, 0], end=[20, -16], layers=[-2, -8], worldMarker=False) for i, b in enumerate(world.boundaries()): b.setMarker(i + 1) block = mt.createRectangle(start=[-6, -3.5], end=[6, -6.0], marker=4, boundaryMarker=11, area=area) geom = mt.mergePLC([world, block]) geom.save(synthPath + 'synthGeom') # pg.show(geom, boundaryMarker=1) paraMesh = pg.meshtools.createMesh(geom, quality=32, area=area, smooth=[1, 10]) # translate 1 2 3 4 - > 0 1 2 3 mapMarker = np.array([0, 0, 1, 2, 3], 'float') paraMesh.setCellMarkers(mapMarker[np.array(paraMesh.cellMarkers())]) paraMesh.save(synthPath + 'synth.bms') fop = HydroGeophysicalModelling(mesh=paraMesh, tMax=tMax, satSteps=satSteps, ertSteps=ertSteps, verbose=1) # openblas have some problems with to high thread count .. # we need to dig into print("TC", pg.threadCount()) pg.setThreadCount(4) print('##### Simulate synthetic data ' + '#' * 50) pg.tic() rhoaR = fop.response(pg.RVector(model)[paraMesh.cellMarkers()]) pg.toc() print('#' * 100) # add some noise here rand = pg.RVector(len(rhoaR)) pg.randn(rand) rhoaR *= (1.0 + rand * fop.ws.derr.flatten()) fop.ws.rhoaR = rhoaR.reshape(fop.ws.derr.shape) # fop.ws.mesh.save(synthPath + 'synth.bms') np.save(synthPath + 'synthK', fop.ws.k) np.save(synthPath + 'synthVel', fop.ws.vel) np.save(synthPath + 'synthSat', fop.ws.sat) fop.ws.scheme.save(synthPath + 'synth.shm', 'a b m n') np.save(synthPath + 'synthRhoaRatio', fop.ws.rhoaR) np.save(synthPath + 'synthRhoa', fop.ws.rhoa) np.save(synthPath + 'synthErr', fop.ws.derr)
res[2] = 50. model = pg.cat(thk, res) # paste together to one model ############################################################################### # We first set up EM forward operator and generate synthetic data with noise coilspacing = 50. nf = 10 freq = pg.Vector(nf, 110.) for i in range(nf - 1): freq[i + 1] = freq[i] * 2. fEM = pg.core.FDEM1dModelling(nlay, freq, coilspacing) dataEM = fEM(model) for i in range(len(dataEM)): dataEM[i] += pg.randn(1)[0] * noiseEM ############################################################################### # We define model transformations: logarithms and log with upper+lower bounds transRhoa = pg.trans.TransLog() transThk = pg.trans.TransLog() transRes = pg.trans.TransLogLU(1., 1000.) transEM = pg.trans.Trans() fEM.region(0).setTransModel(transThk) fEM.region(1).setTransModel(transRes) ############################################################################### # We set up the independent EM inversion and run the model. invEM = pg.core.Inversion(dataEM, fEM, transEM, True, True)