def test_NumpyToRMatrix(self): """Implemented in custom_rvalue.cpp.""" M = np.ndarray((5, 4)) A = pg.RMatrix(M) self.assertEqual(A.rows(), M.shape[0]) self.assertEqual(A.cols(), M.shape[1]) M = np.arange(20.).reshape((5, 4)) A = pg.RMatrix(M) self.assertEqual(sum(A[0]), sum(M[0])) self.assertEqual(sum(A[1]), sum(M[1])) self.assertEqual(sum(A[2]), sum(M[2])) self.assertEqual(sum(A[3]), sum(M[3]))
def resistivityArchie(rBrine, porosity, a=1.0, m=2.0, S=1.0, n=2.0, mesh=None, meshI=None): """ .. math:: \rho = a\rho_{\text{Brine}}\phi^{-m}\S_w^{-n} * :math:`\rho` - the electrical conductivity of the fluid saturated rock * :math:`\rho_{\text{Brine}}` - electrical conductivity of the brine * :math:`\phi` - porosity 0.0 --1.0 * :math:`a` - tortuosity factor. (common 1) * :math:`m` - cementation exponent of the rock (usually in the range 1.3 -- 2.5 for sandstones) * :math:`n` - is the saturation exponent (usually close to 2) """ rB = None if rBrine.ndim == 1: rB = pg.RMatrix(1, len(rBrine)) rB[0] = parseArgToArray(rBrine, mesh.cellCount(), mesh) elif rBrine.ndim == 2: rB = pg.RMatrix(len(rBrine), len(rBrine[0])) for i in range(len(rBrine)): rB[i] = rBrine[i] porosity = parseArgToArray(porosity, mesh.cellCount(), mesh) a = parseArgToArray(a, mesh.cellCount(), mesh) m = parseArgToArray(m, mesh.cellCount(), mesh) S = parseArgToArray(S, mesh.cellCount(), mesh) n = parseArgToArray(n, mesh.cellCount(), mesh) r = pg.RMatrix(len(rBrine), len(rBrine[0])) for i in range(len(r)): r[i] = rB[i] * a * porosity**(-m) * S**(-n) rI = pg.RMatrix(len(r), meshI.cellCount()) if meshI: pg.interpolate(mesh, r, meshI.cellCenters(), rI) for i in range(len(rI)): rI[i] = pg.solver.fillEmptyToCellArray(meshI, rI[i]) return rI
def loadKernel(self, name=''): """Load kernel matrix from mrsk or two bmat files.""" from scipy.io import loadmat # loading Matlab mat files if name[-5:].lower() == '.mrsk': kdata = loadmat(name, struct_as_record=False, squeeze_me=True)['kdata'] self.K = kdata.K self.z = np.hstack((0., kdata.model.z)) else: # try load real/imag parts (backward compat.) KR = pg.RMatrix(name + 'KR.bmat') KI = pg.RMatrix(name + 'KI.bmat') self.K = np.zeros((KR.rows(), KR.cols()), dtype='complex') for i in range(KR.rows()): self.K[i] = np.array(KR[i]) + np.array(KI[i]) * 1j
def calcGCells(pos, mesh, rho, nInt=0): """ """ G = pg.RMatrix(len(pos), mesh.cellCount()) rules = pg.IntegrationRules() for i, p in enumerate(pos): for cId, c in enumerate(mesh.cells()): Z = 0. if nInt == 0: for j in range(c.nodeCount()): A = c.node(j) B = c.node((j + 1) % c.nodeCount()) # negative Z as all cells are numbered counterclockwise Z -= 2.0 * pg.lineIntegraldGdz(A.pos() - p, B.pos() - p) # Z += lineIntegralZ(A.pos() - p, B.pos() - p) else: for j, t in enumerate(rules.abscissa(c, nInt)): w = rules.weights(c, nInt)[j] Z += 2.0 * w * functor(c.shape().xyz(t), p) # for j, t in enumerate(rules.quaAbscissa(nInt)): # w = rules.quaWeights(nInt)[j] # Z += 2.0 * w * functor(c.shape().xyz(t), p) Z *= c.jacobianDeterminant() # negative Z because all cells are numbered counterclockwise G[i][c.id()] = Z return G * rho * 6.67384e-11 * 1e5, G
def showSparseMatrix(A, full=False): """Show the content of a sparse matrix. Parameters ---------- A : :gimliapi:`GIMLI::SparseMatrix` | :gimliapi:`GIMLI::SparseMapMatrix` Matrix to be shown. full : bool [False] Show as dense matrix. """ S = A if isinstance(A, pg.RSparseMapMatrix): return showSparseMatrix(pg.SparseMatrix(A), full) else: rows = S.vecRowIdx() cols = S.vecColPtr() vals = S.vecVals() if full: Sd = pg.RMatrix(S.rows(), S.cols()) for i in range(S.rows()): for j in range(cols[i], cols[i + 1]): if full: Sd[i].setVal(vals[j], rows[j]) else: print(i, rows[j], vals[j]) if full: print(np.array(Sd))
def __init__(self, verbose=True): """Constructor.""" super(GravimetryModelling, self).__init__(verbose) self._J = pg.RMatrix() # unless doing reference counting we need to hold the reference here self.sensorPositions = None self.setJacobian(self._J)
def calcERT(ertScheme, rhoa): ert = ERT(verbose=False) solutionName = createCacheName('ERT') + "-" + str(ertScheme.size()) + \ "-" + str(len(rhoa)) try: ertModels = pg.load(solutionName + '.bmat') ertMesh = pg.load(solutionName + '.bms') except Exception as e: print(e) print("Building .... ") ertModels = ert.invert(ertScheme, values=rhoa, maxiter=10, lambd=50, paraDX=0.5, paraDZ=0.5, nLayers=20, paraDepth=15, verbose=1) ertMesh = ert.fop.regionManager().paraDomain() ertModels.save(solutionName + '.bmat') ertMesh.save(solutionName) ertRatioModels = pg.RMatrix(ertModels) for i in range(len(ertModels)): ertRatioModels[i] /= ertModels[0] return ertMesh, ertModels, ertRatioModels
def test_RMatrixToNumpy(self): """Implemented through automatic iterator """ M = np.arange(20.).reshape((5, 4)) A = pg.RMatrix(M) N = np.array(A) self.assertEqual(A.rows(), N.shape[0]) self.assertEqual(A.cols(), N.shape[1]) self.assertEqual(sum(A[0]), sum(N[0])) self.assertEqual(sum(A[1]), sum(N[1])) self.assertEqual(sum(A[2]), sum(N[2])) self.assertEqual(sum(A[3]), sum(N[3])) M = np.arange(16.).reshape((4, 4)) A = pg.RMatrix(M) M2 = np.array(A) np.testing.assert_equal(M, M2) A = np.array(pg.RMatrix(4, 4))
def numpy2gmat(nmat): """Convert numpy.array into pygimli RMatrix. TODO implement correct rval """ gmat = pg.RMatrix() for arr in nmat: gmat.push_back(arr) # pg.asvector(arr)) return gmat
def invert(self, sensorPositions, gz, errAbs, verbose=0, **kwargs): """ """ self.fop.setVerbose(verbose) self.inv.setMaxIter(kwargs.pop('maxiter', 10)) self.inv.setLambda(kwargs.pop('lambd', 10)) self.fop.setSensorPositions(sensorPositions) mesh = kwargs.pop('mesh', None) if mesh is None: raise('implement me') self.setParaMesh(mesh) startModel = pg.RVector(self.fop.regionManager().parameterCount(), 0.0) self.inv.setForwardOperator(self.fop) self.fop.regionManager().setConstraintType(10) # check err here self.inv.setData(gz) self.inv.setAbsoluteError(errAbs) self.inv.setModel(startModel) model = self.inv.run() return model # tl can start here values = model if values is not None: if isinstance(values, pg.RVector): values = [values] elif isinstance(values, np.ndarray): if values.ndim == 1: values = [values] allModel = pg.RMatrix(len(values)+1, len(model)) allModel[0] = model self.inv.setVerbose(False) for i in range(1, len(values)): tic = time.time() self.inv.setModel(model) self.inv.setReferenceModel(model) dData = pg.abs(values[i] - data) # relModel = self.inv.invSubStep(pg.log(dData)) # allModel[i] = model * pg.exp(relModel) relModel = self.inv.invSubStep(dData) allModel[i] = model + relModel print(i, "/", len(values), " : ", time.time()-tic, "s") return allModel return model
def __init__(self, num, nrows, ncols): super(NDMatrix, self).__init__() # call inherited init function self.Ji = [] # list of individual block matrices for i in range(num): self.Ji.append(pg.RMatrix()) self.Ji[-1].resize(nrows, ncols) n = self.addMatrix(self.Ji[-1]) self.addMatrixEntry(n, nrows * i, ncols * i) self.recalcMatrixSize() print(self.rows(), self.cols())
def _createParameterContraintsLines(mesh, cMat, cWeight=None): """TODO Documentme.""" C = pg.RMatrix() if isinstance(cMat, pg.SparseMapMatrix): cMat.save('tmpC.matrix') pg.loadMatrixCol(C, 'tmpC.matrix') else: C = cMat cellList = dict() for c in mesh.cells(): pID = c.marker() if pID not in cellList: cellList[pID] = [] cellList[pID].append(c) paraCenter = dict() for pID, vals in list(cellList.items()): p = pg.RVector3(0.0, 0.0, 0.0) for c in vals: p += c.center() p /= float(len(vals)) paraCenter[pID] = p nConstraints = C[0].size() start = [] end = [] # swatch = pg.Stopwatch(True) # not used for i in range(0, int(nConstraints / 2)): # print i # if i == 3: break; idL = int(C[1][i * 2]) idR = int(C[1][i * 2 + 1]) p1 = paraCenter[idL] p2 = paraCenter[idR] if cWeight is not None: pa = pg.RVector3(p1 + (p2 - p1) / 2.0 * (1.0 - cWeight[i])) pb = pg.RVector3(p2 + (p1 - p2) / 2.0 * (1.0 - cWeight[i])) else: pa = p1 pb = p2 # print(i, idL, idR, p1, p2) start.append(pa) end.append(pb) # updateAxes_(ax) # not existing return start, end
def createJacobian(self, slowness): """Generate Jacobian matrix using fat-ray after Jordi et al. (2016).""" self.J = pg.Matrix(self.data().size(), self.mesh().cellCount()) self.sensorNodes = [ self.mesh().findNearestNode(pos) for pos in self.data().sensorPositions() ] Di = self.dijkstra() slowPerCell = self.createMappedModel(slowness, 1e16) Di.setGraph(self.createGraph(slowPerCell)) numN = self.mesh().nodeCount() data = self.data() numS = data.sensorCount() Tmat = pg.RMatrix(numS, numN) Dmat = pg.RMatrix(numS, numS) print(self.mesh()) print(self.nnodes, max(self.mids)) for i, node in enumerate(self.sensorNodes): Di.setStartNode(node) dist0 = Di.distances() dist = Di.distances(withSecNodes=True) print("dist len ", len(dist0), len(dist)) Tmat[i] = dist[self.mids] # Tmat[i] = (self.nnodes, len(dist)) Dmat[i] = Tmat[i][self.sensorNodes] for i in range(data.size()): iS = int(data("s")[i]) iG = int(data("g")[i]) tsr = Dmat[iS][iG] # shot-receiver travel time dt = Tmat[iS] + Tmat[iG] - tsr weight = np.maximum(1 - 2 * self.frequency * dt, 0.0) # 1 on ray wa = weight # * np.sqrt(self.mesh().cellSizes()) if np.sum(wa) > 0: # not if all values are zero wa /= np.sum(wa) self.J[i] = wa * tsr / slowness self.setJacobian(self.J)
def loadmrsproject(mydir): """ load mrs project from given directory (zkernel.ve) (datacube.dat, KR/KI.bmat, zkernel.vec) """ if mydir is None: mydir = '.' if mydir[-1] != '/': mydir = mydir + '/' # load files from directory zvec = pg.RVector(mydir + 'zkernel.vec') KR = pg.RMatrix(mydir + 'KR.bmat') KI = pg.RMatrix(mydir + 'KI.bmat') A = pg.RMatrix() pg.loadMatrixCol(A, mydir + 'datacube.dat') t = np.array(A[0]) # data datvec = pg.RVector() for i in range(1, len(A)): datvec = pg.cat(datvec, A[i]) # print len(A), len(t) return KR, KI, zvec, t, datvec
def loadSIPallData(filename, outnumpy=False): """load SIP data with the columns ab/2,mn/2,rhoa and PHI with the corresponding frequencies in the first row.""" if outnumpy: A = N.loadtxt(filename) fr = A[0, 3:] ab2 = A[1:, 0] mn2 = A[1:, 1] rhoa = A[1:, 2] PHI = A[1:, 3:] else: A = pg.RMatrix() pg.loadMatrixCol(A, 'sch/dc.ves') ndata = A.cols() ab2 = A[0](1, ndata) mn2 = A[1](1, ndata) rhoa = A[2](1, ndata) PHI = pg.RMatrix() fr = [] for i in range(3, A.rows()): fr.append(A[i][0]) PHI.push_back(A[i](1, ndata)) return ab2, mn2, rhoa, PHI, fr
def rot2DGridToWorld(mesh, start, end): """Rotate 2D grid to the plane (start[-z], end) """ print(mesh, start, end) mesh.rotate(pg.degToRad(pg.RVector3(-90.0, 0.0, 0.0))) src = pg.RVector3(0.0, 0.0, 0.0).norm(pg.RVector3(0.0, 0.0, -10.0), pg.RVector3(10.0, 0.0, -10.0)) dest = start.norm(start - pg.RVector3(0.0, 0.0, 10.0), end) q = pg.getRotation(src, dest) rot = pg.RMatrix(4, 4) q.rotMatrix(rot) mesh.transform(rot) mesh.translate(start)
def createJacobian(self, slowness): """Generate Jacobian matrix using fat-ray after Jordi et al. (2016).""" self.J = pg.Matrix(self.data().size(), self.mesh().cellCount()) self.sensorNodes = [ self.mesh().findNearestNode(pos) for pos in self.data().sensorPositions() ] if (self.iMat.cols() != self.mesh().nodeCount() or self.iMat.rows() != self.mesh().cellCount()): self.iMat = self.mesh().interpolationMatrix( self.mesh().cellCenters()) Di = self.dijkstra() slowPerCell = self.createMappedModel(slowness, 1e16) Di.setGraph(self.createGraph(slowPerCell)) numN = self.mesh().nodeCount() data = self.data() numS = data.sensorCount() Tmat = pg.RMatrix(numS, numN) Dmat = pg.RMatrix(numS, numS) for i, node in enumerate(self.sensorNodes): Di.setStartNode(node) Tmat[i] = Di.distances() # (0, numN) Dmat[i] = Tmat[i][self.sensorNodes] for i in range(data.size()): iS = int(data("s")[i]) iG = int(data("g")[i]) tsr = Dmat[iS][iG] # shot-receiver travel time dt = self.iMat * (Tmat[iS] + Tmat[iG]) - tsr weight = np.maximum(1 - 2 * self.frequency * dt, 0.0) # 1 on ray wa = weight # * np.sqrt(self.mesh().cellSizes()) if np.sum(wa) > 0: # not if all values are zero wa /= np.sum(wa) self.J[i] = wa * tsr / slowness self.setJacobian(self.J)
def test_MT(self): """ """ nPars = 4 m = pg.RVector(nPars, 1) fop = ModellingMT(nPars, verbose=False) fop.setMultiThreadJacobian(1) fop.createJacobian(m) J1 = pg.RMatrix(fop.jacobian()) fop.setMultiThreadJacobian(nPars) fop.createJacobian(m) J2 = fop.jacobian() np.testing.assert_array_equal(J1 * 2.0, J2)
def rot2DGridToWorld(mesh, start, end): """Rotate a 2D mesh into 3D world coordinates. todo:: Complete Documentation. ...rotate a given 2D grid in... """ mesh.rotate(pg.degToRad(pg.RVector3(-90.0, 0.0, 0.0))) src = pg.RVector3(0.0, 0.0, 0.0).norm(pg.RVector3(0.0, 0.0, -10.0), pg.RVector3(10.0, 0.0, -10.0)) dest = start.norm(start - pg.RVector3(0.0, 0.0, 10.0), end) q = pg.getRotation(src, dest) rot = pg.RMatrix(4, 4) q.rotMatrix(rot) mesh.transform(rot) mesh.translate(start)
def calcGBounds(pos, mesh, rho): """ ?? """ G = pg.RMatrix(len(pos), mesh.cellCount()) for i, p in enumerate(pos): for cId, b in enumerate(mesh.boundaries()): A = b.node(0) B = b.node(1) # Z = lineIntegralZ(A.pos() - p, B.pos() - p) Z = pg.lineIntegraldGdz(A.pos() - p, B.pos() - p) if b.leftCell(): G[i][b.leftCell().id()] = G[i][b.leftCell().id()] - Z if b.rightCell(): G[i][b.rightCell().id()] = G[i][b.rightCell().id()] + Z return G * rho * 2.0 * 6.67384e-11 * 1e5, G
def __init__(self, fvec, tvec, verbose=False): # save reference in class """constructor with frequecy and tau vector""" self.f = fvec self.nf = len(fvec) self.t = tvec self.nt = len(tvec) mesh = pg.createMesh1D(len(tvec)) # standard 1d discretization pg.ModellingBase.__init__(self, mesh, verbose) T, W = np.meshgrid(tvec, fvec * 2. * pi) WT = W * T self.A = WT**2 / (WT**2 + 1) self.B = WT / (WT**2 + 1) self.J = pg.RMatrix() self.J.resize(len(fvec) * 2, len(tvec)) for i in range(self.nf): wt = fvec[i] * 2.0 * pi * tvec wt2 = wt**2 self.J[i] = wt2 / (wt2 + 1.0) self.J[i + self.nf] = wt / (wt2 + 1.0) self.setJacobian(self.J)
def createJacobian(self, model): print('=' * 100) if self.complex(): modelRe = model[0:int(len(model) / 2)] modelIm = model[int(len(model) / 2):len(model)] modelC = pg.toComplex(modelRe, modelIm) print("Real", min(modelRe), max(modelRe)) print("Imag", min(modelIm), max(modelIm)) u = self.prepareJacobian_(modelC) if self._J.rows() == 0: #re(data)/re(mod) = im(data)/im(mod) # we need a local copy until we have a gimli internal reference counter FIXTHIS M1 = pg.RMatrix() M2 = pg.RMatrix() self.matrixHeap.append(M1) self.matrixHeap.append(M2) JRe = self._J.addMatrix(M1) JIm = self._J.addMatrix(M2) self._J.addMatrixEntry(JRe, 0, 0) self._J.addMatrixEntry(JIm, 0, len(modelRe), -1.0) self._J.addMatrixEntry(JIm, self.data().size(), 0, 1.0) self._J.addMatrixEntry(JRe, self.data().size(), len(modelRe)) else: self._J.clean() k = pg.RVector(self.data()('k')) self.data().set('k', k * 0.0 + 1.0) dMapResponse = pb.DataMap() dMapResponse.collect(self.electrodes(), self.solution()) respRe = dMapResponse.data(self.data(), False, False) respIm = dMapResponse.data(self.data(), False, True) #CVector resp(toComplex(respRe, respIm)); #RVector am(abs(resp) * dataContainer_->get("k")); #RVector ph(-phase(resp)); print("respRe", pg.median(respRe), min(respRe), max(respRe)) print("respIm", pg.median(respIm), min(respIm), max(respIm)) JC = pg.CMatrix() self.createJacobian_(modelC, u, JC) for i in range(JC.rows()): #JC[i] *= 1.0/(modelC*modelC) * k[i] JC[i] /= (modelC * modelC) / k[i] self._J.mat(0).copy(pg.real(JC)) self._J.mat(1).copy(pg.imag(JC)) #self.createJacobian_(modelRe*0.0+1.0, pg.real(u), self._J.mat(1)) #self.createJacobian_(modelRe*0.0+1.0, pg.imag(u), self._J.mat(2)) #self.createJacobian_(modelRe*0.0+1.0, pg.imag(u), self._J.mat(3)) sumsens0 = pg.RVector(self._J.mat(0).rows()) sumsens1 = pg.RVector(self._J.mat(0).rows()) sumsens2 = pg.RVector(self._J.mat(0).rows()) for i in range(self._J.mat(0).rows()): #self._J.mat(0)[i] *= 1./modelRe / respRe[i] #self._J.mat(1)[i] *= 1./modelIm / respRe[i] #self._J.mat(2)[i] *= 1./modelRe / respIm[i] #self._J.mat(3)[i] *= 1./modelIm / respIm[i] #self._J.mat(0)[i] *= 1./(modelRe * modelRe) * k[i] #self._J.mat(1)[i] *= 1./(modelRe * modelIm) * k[i] #self._J.mat(2)[i] *= 1./(modelIm * modelRe) * k[i] #self._J.mat(3)[i] *= 1./(modelIm * modelIm) * k[i] sumsens0[i] = sum(self._J.mat(0)[i]) sumsens1[i] = sum(self._J.mat(1)[i]) sumsens2[i] = abs(sum(JC[i])) print(pg.median(sumsens0), min(sumsens0), max(sumsens0)) print(pg.median(sumsens1), min(sumsens1), max(sumsens1)) print(pg.median(sumsens2), min(sumsens2), max(sumsens2)) self.data().set('k', k) self._J.recalcMatrixSize() else: # self.setVerbose(True) u = self.prepareJacobian_(model) #J = pg.RMatrix() if self._J.rows() == 0: print('#' * 100) M1 = pg.RMatrix() Jid = self._J.addMatrix(M1) self._J.addMatrixEntry(Jid, 0, 0) else: self._J.clean() self.createJacobian_(model, u, self._J.mat(0)) self._J.recalcMatrixSize()
def solvePressureWave(mesh, velocities, times, sourcePos, uSource, verbose): r""" Solve pressure wave equation. Solve pressure wave for a given source function .. math:: \frac{\partial^2 u}{\partial t^2} & = \diverg(a\grad u) + f\\ finalize equation Parameters ---------- mesh : :gimliapi:`GIMLI::Mesh` Mesh to solve on velocities : array velocities for each cell of the mesh time : array Time base definition sourcePos : RVector3 Source position uSource : array u(t, sourcePos) source movement of length(times) Usually a Ricker wavelet of the desired seismic signal frequency. Returns ------- u : RMatrix Return Examples -------- See TODO write example """ A = pg.RSparseMatrix() M = pg.RSparseMatrix() # F = pg.RVector(mesh.nodeCount(), 0.0) rhs = pg.RVector(mesh.nodeCount(), 0.0) u = pg.RMatrix(len(times), mesh.nodeCount()) v = pg.RMatrix(len(times), mesh.nodeCount()) sourceID = mesh.findNearestNode(sourcePos) if len(uSource) != len(times): raise Exception("length of uSource does not fit length of times: " + str(uSource) + " != " + len(times)) A.fillStiffnessMatrix(mesh, velocities * velocities) M.fillMassMatrix(mesh) # M.fillMassMatrix(mesh, velocities) FV = 0 if FV: A, rhs = pygimli.solver.diffusionConvectionKernel(mesh, velocities * velocities, sparse=1) M = pygimli.solver.identity(len(rhs)) u = pg.RMatrix(len(times), mesh.cellCount()) v = pg.RMatrix(len(times), mesh.cellCount()) sourceID = mesh.findCell(sourcePos).id() dt = times[1] - times[0] theta = 0.51 #theta = 1. S1 = M + dt * dt * theta * theta * A S2 = M solver1 = pg.LinSolver(S1, verbose=False) solver2 = pg.LinSolver(S2, verbose=False) swatch = pg.Stopwatch(True) # ut = pg.RVector(mesh.nodeCount(), .0) # vt = pg.RVector(mesh.nodeCount(), .0) timeIter1 = np.zeros(len(times)) timeIter2 = np.zeros(len(times)) timeIter3 = np.zeros(len(times)) timeIter4 = np.zeros(len(times)) progress = pg.utils.ProgressBar(its=len(times), width=40, sign='+') for n in range(1, len(times)): u[n - 1, sourceID] = uSource[n - 1] # solve for u tic = time.time() # + * dt*dt * F rhs = dt * M * v[n - 1] + (M - dt * dt * theta * (1. - theta) * A) * u[n - 1] timeIter1[n - 1] = time.time() - tic tic = time.time() u[n] = solver1.solve(rhs) timeIter2[n - 1] = time.time() - tic # solve for v tic = time.time() rhs = M * v[n - 1] - dt * \ ((1. - theta) * A * u[n - 1] + theta * A * u[n]) # + dt * F timeIter3[n - 1] = time.time() - tic tic = time.time() v[n] = solver2.solve(rhs) timeIter4[n - 1] = time.time() - tic # same as above # rhs = M * v[n-1] - dt * A * u[n-1] + dt * F # v[n] = solver1.solve(rhs) t1 = swatch.duration(True) if verbose: progress(n) return u
def _createParameterContraintsLines(mesh, cMat, cWeight=None): """TODO Documentme.""" C = pg.RMatrix() if isinstance(cMat, pg.SparseMapMatrix): cMat.save('tmpC.matrix') pg.loadMatrixCol(C, 'tmpC.matrix') else: C = cMat paraMarker = mesh.cellMarkers() cellList = dict() for cID in range(len(paraMarker)): if cID not in cellList: cellList[cID] = [] cellList[cID].append(mesh.cell(cID)) paraCenter = dict() for cID, vals in list(cellList.items()): p = pg.RVector3(0.0, 0.0, 0.0) for c in vals: p += c.center() p /= float(len(vals)) paraCenter[cID] = p nConstraints = C[0].size() start = [] end = [] # swatch = pg.Stopwatch(True) # not used for i in range(0, int(nConstraints / 2)): # print i # if i == 1000: break; idL = int(C[1][i * 2]) idR = int(C[1][i * 2 + 1]) # leftCells = [] # rightCells = [] # for c, index in enumerate(paraMarker): # if idL == index: # leftCells.append(mesh.cell(c)) # if idR == index: # rightCells.append(mesh.cell(c)) # p1 = pg.RVector3(0.0,0.0); # for c in leftCells: # p1 += c.center() # p1 /= float(len(leftCells)) # p2 = pg.RVector3(0.0,0.0); # for c in rightCells: # p2 += c.center() # print cWeight[i] # p2 /= float(len(rightCells)) p1 = paraCenter[idL] p2 = paraCenter[idR] if cWeight is not None: pa = pg.RVector3(p1 + (p2 - p1) / 2.0 * (1.0 - cWeight[i])) pb = pg.RVector3(p2 + (p1 - p2) / 2.0 * (1.0 - cWeight[i])) else: pa = p1 pb = p2 start.append(pa) end.append(pb) # updateAxes_(ax) # not existing return start, end
def createJacobian(self, model): """Create Jacobian matrix.""" if self.subPotentials is None: self.response(model) J = self.jacobian() J.resize(self.data.size(), self.regionManager().parameterCount()) cells = self.mesh().findCellByMarker(0, -1) Si = pg.ElementMatrix() St = pg.ElementMatrix() u = self.subPotentials pg.tic() if self.verbose(): print("Calculate sensitivity matrix for model: ", min(model), max(model)) Jt = pg.RMatrix(self.data.size(), self.regionManager().parameterCount()) for kIdx, w in enumerate(self.w): k = self.k[kIdx] w = self.w[kIdx] Jt *= 0. A = pg.ElementMatrixMap() for i, c in enumerate(cells): modelIdx = c.marker() # 2.5D Si.u2(c) Si *= k * k Si += St.ux2uy2uz2(c) # 3D # Si.ux2uy2uz2(c); w = w* 2 A.add(modelIdx, Si) for dataIdx in range(self.data.size()): a = int(self.data('a')[dataIdx]) b = int(self.data('b')[dataIdx]) m = int(self.data('m')[dataIdx]) n = int(self.data('n')[dataIdx]) Jt[dataIdx] = A.mult(u[kIdx][a] - u[kIdx][b], u[kIdx][m] - u[kIdx][n]) J += w * Jt m2 = model * model k = self.data('k') for i in range(J.rows()): J[i] /= (m2 / k[i]) if self.verbose(): sumsens = np.zeros(J.rows()) for i in range(J.rows()): sumsens[i] = pg.sum(J[i]) print("sens sum: median = ", pg.median(sumsens), " min = ", pg.min(sumsens), " max = ", pg.max(sumsens))
def response(self, model): """Solve forward task. Create apparent resistivity values for a given resistivity distribution for self.mesh. """ mesh = self.mesh() nDof = mesh.nodeCount() nEle = len(self.electrodes) nData = self.data.size() self.resistivity = res = self.createMappedModel(model, -1.0) if self.verbose(): print("Calculate response for model:", min(res), max(res)) rMin = self.electrodes[0].dist(self.electrodes[1]) / 2.0 rMax = self.electrodes[0].dist(self.electrodes[-1]) * 2.0 k, w = self.getIntegrationWeights(rMin, rMax) self.k = k self.w = w rhs = self.createRHS(mesh, self.electrodes) # store all potential fields u = np.zeros((nEle, nDof)) self.subPotentials = [pg.RMatrix(nEle, nDof) for i in range(len(k))] for i, ki in enumerate(k): uE = pg.solve(mesh, a=1. / res, b=(ki * ki) / res, f=rhs, bc={'Robin': self.mixedBC}, userData={ 'sourcePos': self.electrodes, 'k': ki }, verbose=False, stat=0, debug=False, ret=self.subPotentials[i]) u += w[i] * uE # collect potential matrix, # i.e., potential for all electrodes and all injections pM = np.zeros((nEle, nEle)) for i in range(nEle): pM[i] = pg.interpolate(mesh, u[i, :], destPos=self.electrodes) # collect resistivity values for all 4 pole measurements r = np.zeros(nData) for i in range(nData): iA = int(self.data('a')[i]) iB = int(self.data('b')[i]) iM = int(self.data('m')[i]) iN = int(self.data('n')[i]) uAB = pM[iA] - pM[iB] r[i] = uAB[iM] - uAB[iN] self.lastResponse = r * self.data('k') if self.verbose(): print("Resp: ", min(self.lastResponse), max(self.lastResponse)) return self.lastResponse
def resistivityArchie(rFluid, porosity, a=1.0, m=2.0, sat=1.0, n=2.0, mesh=None, meshI=None, fill=None, show=False): r"""Resistivity of rock for the petrophysical model from Archies law. Calculates resistivity of rock for the petrophysical model from Archie's law. :cite:`Archie1942` .. math:: \rho = a\rho_{\text{fl}}\phi^{-m} S^{-n} * :math:`\rho` - the electrical resistivity of the fluid saturated rock in :math:`\Omega\text{m}` * :math:`\rho_{\text{fl}}` - rFluid: electrical resistivity of the fluid in :math:`\Omega\text{m}` * :math:`\phi` - porosity 0.0 --1.0 * :math:`S` - fluid saturation 0.0 --1.0 [sat] * :math:`a` - Tortuosity factor. (common 1) * :math:`m` - Cementation exponent of the rock (usually in the range 1.3 -- 2.5 for sandstones) * :math:`n` - is the saturation exponent (usually close to 2) If mesh is not None the resulting values are calculated for each cell of the mesh. All parameter can be scalar, array of length mesh.cellCount() or callable(pg.cell). If rFluid is non-steady n-step distribution than rFluid can be a matrix of size(n, mesh.cellCount()) If meshI is not None the result is interpolated to meshI.cellCenters() and prolonged (if fill ==1). Notes ----- We experience some unstable nonlinear behavior. Until this is clarified all results are rounded to the precision 1e-6. Examples -------- >>> # WRITEME """ phi = porosity if isinstance(porosity, list): phi = np.array(porosity) if mesh is None: return rFluid * a * phi**(-m) * sat**(-n) rB = None if isinstance(rFluid, float): rB = pg.RMatrix(1, mesh.cellCount()) rB[0] = pg.solver.parseArgToArray(rFluid, mesh.cellCount(), mesh) elif isinstance(rFluid, pg.RVector): rB = pg.RMatrix(1, len(rFluid)) rB[0] = pg.solver.parseArgToArray(rFluid, mesh.cellCount(), mesh) elif hasattr(rFluid, 'ndim') and rFluid.ndim == 1: rB = pg.RMatrix(1, len(rFluid)) rB[0] = pg.solver.parseArgToArray(rFluid, mesh.cellCount(), mesh) elif hasattr(rFluid, 'ndim') and rFluid.ndim == 2: rB = pg.RMatrix(len(rFluid), len(rFluid[0])) for i, rFi in enumerate(rFluid): rB[i] = rFi phi = pg.solver.parseArgToArray(phi, mesh.cellCount(), mesh) a = pg.solver.parseArgToArray(a, mesh.cellCount(), mesh) m = pg.solver.parseArgToArray(m, mesh.cellCount(), mesh) S = pg.solver.parseArgToArray(sat, mesh.cellCount(), mesh) n = pg.solver.parseArgToArray(n, mesh.cellCount(), mesh) if show: pg.show(mesh, S, label='S') pg.show(mesh, phi, label='p') pg.wait() r = pg.RMatrix(len(rB), len(rB[0])) for i, _ in enumerate(r): r[i] = rB[i] * a * phi**(-m) * S**(-n) r.round(1e-6) if meshI is None: if len(r) == 1: return r[0].copy() return r rI = pg.RMatrix(len(r), meshI.cellCount()) if meshI: pg.interpolate(mesh, r, meshI.cellCenters(), rI) if fill: for i, ri_ in enumerate(rI): # slope == True produce unstable behavior .. check!!!!!! rI[i] = mt.fillEmptyToCellArray(meshI, ri_, slope=False) rI.round(1e-6) if len(rI) == 1: # copy here because of missing refcounter TODO return rI[0].array() return rI
#!/usr/bin/env python # -*- coding: utf-8 -*- import pygimli as g import pylab as P from pygimli.utils.base import draw1dmodel nlay = 4 lam = 200. errPerc = 3. abmnr = g.RMatrix() g.loadMatrixCol(abmnr, "sond1-100.ves") ab2 = abmnr[0] mn2 = abmnr[1] rhoa = abmnr[2] transRho = g.RTransLogLU(1., 1000.) transThk = g.RTransLog() transRhoa = g.RTransLog() f = g.DC1dModelling(nlay, ab2, mn2) f.region(0).setTransModel(transThk) f.region(1).setTransModel(transRho) paraDepth = max(ab2) / 3 f.region(0).setStartValue(max(ab2) / 3. / nlay / 2.) f.region(1).setStartValue(P.median(rhoa)) model = f.createStartVector() model[nlay] *= 1.5
def calc(out, mesh, density, viscosity): print(mesh) velBoundary = [[1, [0.0, 'nan']], [2, [0.0, 'nan']], [3, ['nan', 0.0]], [4, ['nan', 0.0]]] preBoundary = [ [1, 0.0], [2, 0.0], [3, 0.0], [4, 0.0], ] densMatrix = pg.RMatrix() vels = [] swatch = pg.Stopwatch(True) class WS(): pass wsfv = WS() ax, _ = pg.show(mesh, density) v = 1 nSteps = 3000 dt = 0.1 * v dtSteps = 20 meshC = pg.createGrid(x=np.linspace(-10, 10, 21), y=np.linspace(0, 20, 21)) vel = None pre = None for i in range(nSteps): print(i, 'dens', min(density), max(density), "t:", dt * i) densMatrix.push_back(density) if v > 1: viscosity = 1.0 * density #v3 elif v < 1: viscosity = 1.0 / density #v3 else: viscosity = 1.0 vel, pre, pCNorm, divVNorm = solver.solveStokes( mesh, velBoundary=velBoundary, preBoundary=preBoundary, viscosity=viscosity, density=density, pre0=pre, vel0=vel, f=[density * 0, (density - 1.0) * -9.81], maxIter=1000, tol=1e-6, verbose=1, vRelax=0.1, pRelax=0.1, ws=wsfv) vels.append(vel) print("stokes:", swatch.duration(True), "div V: ", divVNorm[-1]) dens2 = solver.solveFiniteVolume( mesh, a=1. / 500, b=0.0, u0=density, vel=vel, times=np.linspace(0, dt, dtSteps), #uBoundary=[4, 0], scheme='PS', verbose=0) print("Convekt:", swatch.duration(True)) density = dens2[-1] ax.clear() pg.show(mesh, density, axes=ax) pg.show(mesh, vel, coarseMesh=meshC, axes=ax, color='white') mesh.save(out) meshC.save(out + 'C') densMatrix.save(out + 'density.bmat') np.save(out + 'velo.bmat', vels)
def invert(self, data, values=None, verbose=0, **kwargs): """ Invert the given data. A parametric mesh for the inversion will be created if non is given before. Parameters ---------- """ self.fop.setVerbose(verbose) self.inv.setVerbose(verbose) self.inv.setMaxIter(kwargs.pop('maxiter', 10)) self.inv.setLambda(kwargs.pop('lambd', 10)) if self.paraMesh is None: self.paraMesh = createParaMesh2dGrid(data.sensorPositions(), **kwargs) self.setParaMesh(self.paraMesh) if verbose: print(self.paraMesh) # pg.show(self.paraMesh) err = data('err') rhoa = data('rhoa') startModel = pg.RVector(self.fop.regionManager().parameterCount(), pg.median(rhoa)) self.fop.setData(data) self.inv.setForwardOperator(self.fop) # check err here self.inv.setData(rhoa) self.inv.setError(err) self.inv.setModel(startModel) model = self.inv.run() if values is not None: if isinstance(values, pg.RVector): values = [values] elif isinstance(values, np.ndarray): if values.ndim == 1: values = [values] allModel = pg.RMatrix(len(values), len(model)) self.inv.setVerbose(False) for i in range(len(values)): print(i) tic = time.time() self.inv.setModel(model) self.inv.setReferenceModel(model) dData = pg.abs(values[i] / rhoa) relModel = self.inv.invSubStep(pg.log(dData)) allModel[i] = model * pg.exp(relModel) print(i, "/", len(values), " : ", time.time() - tic, "s min/max: ", min(allModel[i]), max(allModel[i])) return allModel return model