Пример #1
0
    def checkData(self):
        """Check data

        w.r.t. shot/geophone identity and zero/negative
        traveltimes, plus check y/z sensor positions
        """
        oldsize = self.dataContainer.size()
        self.dataContainer.markInvalid(pg.abs(self.dataContainer('s') -
                                              self.dataContainer('g')) < 1)
        self.dataContainer.markInvalid(self.dataContainer('t') <= 0.)
        self.dataContainer.removeInvalid()
        newsize = self.dataContainer.size()

        if newsize < oldsize:
            if self.verbose:
                print('Removed ' + str(oldsize - newsize) + ' values.')

        maxyabs = max(pg.abs(pg.y(self.dataContainer.sensorPositions())))
        maxzabs = max(pg.abs(pg.z(self.dataContainer.sensorPositions())))

        if maxzabs > 0 and maxyabs == 0:
            for i in range(self.dataContainer.sensorCount()):
                pos = self.dataContainer.sensorPosition(i).rotateX(-pi / 2)
                self.dataContainer.setSensorPosition(i, pos)

        if self.verbose:
            print(self.dataContainer)
Пример #2
0
    def checkData(self):
        """Check data

        w.r.t. shot/geophone identity and zero/negative
        traveltimes, plus check y/z sensor positions
        """
        oldsize = self.dataContainer.size()
        self.dataContainer.markInvalid(
            pg.abs(self.dataContainer('s') - self.dataContainer('g')) < 1)
        self.dataContainer.markInvalid(self.dataContainer('t') <= 0.)
        self.dataContainer.removeInvalid()
        newsize = self.dataContainer.size()

        if newsize < oldsize:
            if self.verbose:
                print('Removed ' + str(oldsize - newsize) + ' values.')

        maxyabs = max(pg.abs(pg.y(self.dataContainer.sensorPositions())))
        maxzabs = max(pg.abs(pg.z(self.dataContainer.sensorPositions())))

        if maxzabs > 0 and maxyabs == 0:
            for i in range(self.dataContainer.sensorCount()):
                pos = self.dataContainer.sensorPosition(i).rotateX(-pi / 2)
                self.dataContainer.setSensorPosition(i, pos)

        if self.verbose:
            print(self.dataContainer)
Пример #3
0
def drawWaveField(ax, u):
    ui = u / max(pg.abs(u))
    ui = pg.logDropTol(ui, 1e-2)
    cMax = max(pg.abs(ui))

    drawField(ax, mesh, data=ui, cMin=-cMax, cMax=cMax, cmap='RdBu',
              #interpolate=0, shading='gouraud'
              )
Пример #4
0
    def animate(i):
        i = i*5
        if i > len(uI)-1:
            return
        print("Frame:", i, "/", len(uI))
        ui = uI[i]
        ui = ui / max(pg.abs(ui))
        ui = pg.logDropTol(ui, 1e-2)
        cMax = max(pg.abs(ui))

        pg.mplviewer.setMappableData(gci, ui,
                                     cMin=-cMax, cMax=cMax,
                                     logScale=False)
Пример #5
0
def drawWaveField(ax, u):
    ui = u / max(pg.abs(u))
    ui = pg.logDropTol(ui, 1e-2)
    cMax = max(pg.abs(ui))

    drawField(
        ax,
        mesh,
        data=ui,
        cMin=-cMax,
        cMax=cMax,
        cmap='RdBu',
        #interpolate=0, shading='gouraud'
    )
Пример #6
0
    def animate(i):
        i = i * 5
        if i > len(uI) - 1:
            return
        print("Frame:", i, "/", len(uI))
        ui = uI[i]
        ui = ui / max(pg.abs(ui))
        ui = pg.logDropTol(ui, 1e-2)
        cMax = max(pg.abs(ui))

        pg.viewer.mpl.setMappableData(gci,
                                      ui,
                                      cMin=-cMax,
                                      cMax=cMax,
                                      logScale=False)
Пример #7
0
def readTOMfile(filename, ndig=2, roundto=0):
    """Read Reflex tomography (*.TOM) file"""
    t, xT, zT, xR, zR = np.loadtxt(filename, usecols=(0, 2, 3, 4, 5), unpack=1)
    if roundto > 0:
        pT = (np.round(xT / roundto) - np.round(zT / roundto) * 1j) * roundto
        pR = (np.round(xR / roundto) - np.round(zR / roundto) * 1j) * roundto
    else:
        pT = xT.round(ndig) - zT.round(ndig) * 1j
        pR = xR.round(ndig) - zR.round(ndig) * 1j
    pU = np.unique(np.hstack((pT, pR)))
    iT = np.array([np.nonzero(pU == pi)[0][0] for pi in pT], dtype=float)
    iR = np.array([np.nonzero(pU == pi)[0][0] for pi in pR], dtype=float)
    data = pg.DataContainer()
    for pp in pU:
        data.createSensor(pg.RVector3(pp.real, pp.imag))

    for tok in ['s', 'g']:
        data.registerSensorIndex(tok)

    data.resize(len(t))
    data.set('t', t)
    data.set('s', iT)
    data.set('g', iR)
    data.markValid(pg.abs(data('s') - data('g')) > 0)
    return data
Пример #8
0
def readTOMfile(filename, ndig=2, roundto=0):
    """Read Reflex tomography (*.TOM) file"""
    t, xT, zT, xR, zR = np.loadtxt(filename, usecols=(0, 2, 3, 4, 5), unpack=1)
    if roundto > 0:
        pT = (np.round(xT/roundto) - np.round(zT/roundto) * 1j) * roundto
        pR = (np.round(xR/roundto) - np.round(zR/roundto) * 1j) * roundto
    else:
        pT = xT.round(ndig) - zT.round(ndig) * 1j
        pR = xR.round(ndig) - zR.round(ndig) * 1j
    pU = np.unique(np.hstack((pT, pR)))
    iT = np.array([np.nonzero(pU == pi)[0][0] for pi in pT], dtype=float)
    iR = np.array([np.nonzero(pU == pi)[0][0] for pi in pR], dtype=float)
    data = pg.DataContainer()
    for pp in pU:
        data.createSensor(pg.RVector3(pp.real, pp.imag))

    for tok in ['s', 'g']:
        data.registerSensorIndex(tok)

    data.resize(len(t))
    data.set('t', t)
    data.set('s', iT)
    data.set('g', iR)
    data.markValid(pg.abs(data('s') - data('g')) > 0)
    return data
Пример #9
0
def drawSeismogramm(axes, mesh, u, ids, dt, i=None):
    r"""Extract and show time series from wave field

    Parameters
    ----------
    """
    axes.set_xlim(-20., 20.)
    axes.set_ylim(0., dt * len(u) * 1000)
    axes.set_aspect(1)
    axes.set_ylabel('Time in ms')

    if i is None:
        i = len(u) - 1

    t = np.linspace(0, i * dt * 1000, i + 1)

    for iw, n in enumerate(ids):
        pos = mesh.node(n).pos()
        print(pos)
        axes.plot(pos[0], 0.05, '^', color='black')

        trace = pg.cat(pg.RVector(0), u[:(i + 1), n])
        #        print(i+1, n)
        #        print(trace, (max(pg.abs(trace))))

        #        if max(pg.abs(trace)) > 1e-8:

        trace *= np.exp(0.5 * t)
        trace /= (max(pg.abs(trace)) * 1.5)

        drawWiggle(axes, trace, t=t, xoffset=pos[0])
    axes.invert_yaxis()
Пример #10
0
def drawSeismogramm(axes, mesh, u, ids, dt, i=None):
    r"""Extract and show time series from wave field

    Parameters
    ----------
    """
    axes.set_xlim(-20., 20.)
    axes.set_ylim(0., dt*len(u)*1000)
    axes.set_aspect(1)
    axes.set_ylabel('Time in ms')

    if i is None:
        i = len(u)-1

    t = np.linspace(0, i*dt*1000, i+1)

    for iw, n in enumerate(ids):
        pos = mesh.node(n).pos()
        print(pos)
        axes.plot(pos[0], 0.05, '^', color='black')

        trace = pg.cat(pg.RVector(0), u[:(i+1), n])
#        print(i+1, n)
#        print(trace, (max(pg.abs(trace))))

#        if max(pg.abs(trace)) > 1e-8:

        trace *= np.exp(0.5*t)
        trace /= (max(pg.abs(trace))*1.5)

        drawWiggle(axes, trace, t=t, xoffset=pos[0])
    axes.invert_yaxis()
Пример #11
0
    def response_mt(self, par, i=0):
        """Return response (multi threaded)."""

        verbose = 1
        if i == 0:
            ws = self.ws
        else:
            ws = WorkSpace()

        mesh = pg.Mesh(self.mesh())

        k = self.createMappedModel(par)

        ws.mesh, ws.vel, ws.p, ws.k, ws.velC = solveDarcy(mesh,
                                                          k=k,
                                                          p0=0.75,
                                                          verbose=verbose)

        ws.sat = solveAdvection(ws.mesh,
                                ws.vel,
                                self.timesAdvection,
                                diffusion=pg.abs(ws.velC.T) * 1e-2,
                                verbose=verbose)

        ws.meshERT, ws.scheme, ws.resis, ws.rhoa, ws.rhoaR, ws.derr = \
            solveERT(ws.mesh, ws.sat[self.timesERT], verbose=verbose)

        return ws.rhoaR.flatten()
Пример #12
0
 def animate(i):
     print(out + ": Frame:", i, "/", len(data))
     pg.mplviewer.setMappableData(gci,
                                  pg.abs(data[i]),
                                  cMin=cMin,
                                  cMax=cMax,
                                  logScale=logScale)
Пример #13
0
def animate(i):
    if i > 0:
        tic = time.time()
        i = i * 10
        ax.clear()
        uShow = drawWaveField(ax, u[i,:])
        print(i, time.time()-tic, len(u), min(pg.abs(u[i,:])), min(u[i,:]), max(u[i,:]), dt*i)
Пример #14
0
    def showVAold(self, vals=None, ax=None, usepos=True, name='va'):
        """show apparent velocity as image plot (old style)"""
        va = self.getVA(t=vals)
        data = self.dataContainer
        A = np.ones((data.sensorCount(), data.sensorCount())) * np.nan
        for i in range(data.size()):
            A[int(data('s')[i]), int(data('g')[i])] = va[i]

        if ax is None:
            fig, ax = plt.subplots()
            self.figs[name] = fig

        gci = ax.imshow(A, interpolation='nearest')
        ax.grid(True)
        if usepos:
            xt = np.linspace(0, data.sensorCount() - 1, 7)
            xt.round()
            px = pg.abs(pg.y(self.dataContainer.sensorPositions()))
            ax.set_xticks(xt)
            ax.set_xticklabels([str(int(px[int(xti)])) for xti in xt])
            ax.set_yticks(xt)
            ax.set_yticklabels([str(int(px[int(xti)])) for xti in xt])

        plt.colorbar(gci, ax=ax)
        return va
Пример #15
0
    def showVAold(self, vals=None, ax=None, usepos=True, name='va'):
        """show apparent velocity as image plot (old style)"""
        va = self.getVA(t=vals)
        data = self.dataContainer
        A = np.ones((data.sensorCount(), data.sensorCount())) * np.nan
        for i in range(data.size()):
            A[int(data('s')[i]), int(data('g')[i])] = va[i]

        if ax is None:
            fig, ax = plt.subplots()
            self.figs[name] = fig

        gci = ax.imshow(A, interpolation='nearest')
        ax.grid(True)
        if usepos:
            xt = np.linspace(0, data.sensorCount()-1, 7)
            xt.round()
            px = pg.abs(pg.y(self.dataContainer.sensorPositions()))
            ax.set_xticks(xt)
            ax.set_xticklabels([str(int(px[int(xti)])) for xti in xt])
            ax.set_yticks(xt)
            ax.set_yticklabels([str(int(px[int(xti)])) for xti in xt])

        plt.colorbar(gci, ax=ax)
        return va
Пример #16
0
        def _test_(mesh, show=False):
            vTest = 0.1
            u = pg.solve(mesh, a=1, f=0,
                         bc={'Node': [mesh.findNearestNode([0.0, 0.0]), 0.],
                             'Neumann': [[1, -vTest], [2, vTest]]}, verbose=0)

            if show:
                if mesh.dim() == 1:
                    pg.plt.plot(pg.x(mesh), u)
                elif mesh.dim() == 2:
                    pg.show(grid, pg.abs(v))
                    pg.show(grid, v, showMesh=1)
                pg.wait()
            
            v = pg.solver.grad(mesh, u)
            #print("|v|:", min(pg.abs(v)), max(pg.abs(v)), pg.mean(pg.abs(v)))
            np.testing.assert_allclose(pg.abs(v), np.ones(mesh.cellCount())*vTest)
            return v
Пример #17
0
        def _test_(mesh, show=False):
            vTest = 0.1
            u = pg.solve(mesh, a=1, f=0,
                         bc={'Node': [mesh.findNearestNode([0.0, 0.0]), 0.],
                             'Neumann': [[1, -vTest], [2, vTest]]}, verbose=0)

            if show:
                if mesh.dim() == 1:
                    pg.plt.plot(pg.x(mesh), u)
                elif mesh.dim() == 2:
                    pg.show(grid, pg.abs(v))
                    pg.show(grid, v, showMesh=1)
                pg.wait()
            
            v = pg.solver.grad(mesh, u)
            #print("|v|:", min(pg.abs(v)), max(pg.abs(v)), pg.mean(pg.abs(v)))
            np.testing.assert_allclose(pg.abs(v), np.ones(mesh.cellCount())*vTest)
            return v
Пример #18
0
def calcApparentResistivities(mesh, meshERT, poro, rhoBrine):
    ert = ERT(verbose=False)

    meshFOP = appendTriangleBoundary(meshERT,
                                     xbound=50, ybound=50, marker=1,
                                     quality=34.0, smooth=False,
                                     markerBoundary=1,
                                     isSubSurface=False, verbose=False)

    swatch = pg.Stopwatch(True)

    print("res 1:", swatch.duration(True))

    resis = resistivityArchie(rBrine=rhoBrine, porosity=poro, S=1.0,
                              mesh=mesh, meshI=meshFOP)

    print("res 2:", swatch.duration(True))

    ertPointsX = [pg.RVector3(x, 0) for x in np.arange(-19, 19.1, 1)]
    ertScheme = ert.createData(ertPointsX, scheme="Dipole Dipole (CC-PP)")

    solutionName = createCacheName('appRes', mesh) + "-" + \
        str(ertScheme.size()) + "-" + str(len(rhoBrine))

    try:
        rhoa = np.load(solutionName + '.bmat.npy')
        ertData = pb.DataContainerERT(solutionName + '.dat')
    except Exception as e:
        print(e)
        print("Building .... ")
        rhoa = np.zeros((len(resis), ertScheme.size()))
        ertScheme.set('k', pb.geometricFactor(ertScheme))
        ertData = ert.simulate(meshFOP, resis[0], ertScheme)

        errPerc = 1
        errVolt = 1e-5
        voltage = ertData('rhoa') / ertData('k')
        ertData.set('err', pg.abs(errVolt / voltage) + errPerc / 100.0)
        print('err min:', min(ertData('err'))*100, 'max:',
              max(ertData('err'))*100)
        ertData.save(solutionName + '.dat', 'a b m n rhoa err k')
        for i in range(0, len(resis)):
            tic = time.time()
            rhoa[i] = ert.fop.response(resis[i])

            rand = pg.RVector(len(rhoa[i]))
            pg.randn(rand)

            rhoa[i] *= (1.0 + rand * ertData('err'))

            print(i, "/", len(resis), " : ", time.time()-tic, "s",
                  "min:", min(resis[i]), "max:", max(resis[i]),
                  "min:", min(rhoa[i]), "max:", max(rhoa[i]))

        np.save(solutionName + '.bmat', rhoa)

    return meshFOP, resis, ertData, rhoa
Пример #19
0
    def checkData(self):
        """ check data w.r.t. shot/geophone identity and zero/negative
        traveltimes, plus check y/z sensor positions """
        oldsize = self.data.size()
        self.data.markInvalid(pg.abs(self.data("s") - self.data("g")) < 1)
        self.data.markInvalid(self.data("t") <= 0.0)
        self.data.removeInvalid()
        newsize = self.data.size()
        if newsize < oldsize:
            print("Removed " + str(oldsize - newsize) + " values.")
        maxyabs = max(pg.abs(pg.y(self.data.sensorPositions())))
        maxzabs = max(pg.abs(pg.z(self.data.sensorPositions())))
        if maxzabs > 0 and maxyabs == 0:
            for i in range(self.data.sensorCount()):
                pos = self.data.sensorPosition(i).rotateX(-pi / 2)
                self.data.setSensorPosition(i, pos)

        print(self.data)
Пример #20
0
    def invert(self, sensorPositions, gz, errAbs,
               verbose=0, **kwargs):
        """
        """
        self.fop.setVerbose(verbose)
        self.inv.setMaxIter(kwargs.pop('maxiter', 10))
        self.inv.setLambda(kwargs.pop('lambd', 10))

        self.fop.setSensorPositions(sensorPositions)
        mesh = kwargs.pop('mesh', None)
        if mesh is None:
            raise('implement me')

        self.setParaMesh(mesh)

        startModel = pg.RVector(self.fop.regionManager().parameterCount(), 0.0)

        self.inv.setForwardOperator(self.fop)

        self.fop.regionManager().setConstraintType(10)
        # check err here
        self.inv.setData(gz)
        self.inv.setAbsoluteError(errAbs)
        self.inv.setModel(startModel)

        model = self.inv.run()
        return model
        # tl can start here
        values = model
        if values is not None:

            if isinstance(values, pg.RVector):
                values = [values]
            elif isinstance(values, np.ndarray):
                if values.ndim == 1:
                    values = [values]

            allModel = pg.RMatrix(len(values)+1, len(model))
            allModel[0] = model
            self.inv.setVerbose(False)
            for i in range(1, len(values)):
                tic = time.time()
                self.inv.setModel(model)
                self.inv.setReferenceModel(model)
                dData = pg.abs(values[i] - data)

                # relModel = self.inv.invSubStep(pg.log(dData))
                # allModel[i] = model * pg.exp(relModel)

                relModel = self.inv.invSubStep(dData)
                allModel[i] = model + relModel

                print(i, "/", len(values), " : ", time.time()-tic, "s")

            return allModel

        return model
Пример #21
0
def animate(i):
    if i > 0:
        tic = time.time()
        i = i * 10
        ax.clear()
        uShow = drawWaveField(ax, u[i, :])
        print(i,
              time.time() - tic, len(u), min(pg.abs(u[i, :])), min(u[i, :]),
              max(u[i, :]), dt * i)
Пример #22
0
def logDropTol( p, droptol = 1e-3 ):
    """"""
    tmp = pg.RVector( p );

    tmp = pg.abs( tmp / droptol )
    tmp.setVal( 1.0, pg.find( tmp < 1.0 ) )

    #for i, v in enumerate( tmp ):
        #tmp[ i ] = abs( tmp[ i ] / droptol );
        #if tmp[ i ] < 1.0: tmp[ i ] = 1.0;

    tmp = pg.log10( tmp );
    tmp *= pg.sign( p );
    return tmp;
Пример #23
0
        def _test_(mesh):
            vTest = 0.1
            u = pg.solve(mesh,
                         a=1,
                         bc={
                             'Node': [mesh.findNearestNode([0.0, 0.0]), 0.],
                             'Neumann': [[1, -vTest], [2, vTest]]
                         })

            v = pg.solver.grad(mesh, u)
            #print("|v|:", min(pg.abs(v)), max(pg.abs(v)), pg.mean(pg.abs(v)))
            np.testing.assert_allclose(pg.abs(v),
                                       np.ones(mesh.cellCount()) * vTest)
            return v
Пример #24
0
def logDropTol(p, droptol=1e-3):
    """
    Example
    -------
    >>> from pygimli.utils import logDropTol
    >>> x = logDropTol((-10,-1,0,1,100))
    >>> print(x.array())
    [-4. -3.  0.  3.  5.]
    """
    tmp = pg.RVector(p)

    tmp = pg.abs(tmp / droptol)
    tmp.setVal(1.0, pg.find(tmp < 1.0))

    tmp = pg.log10(tmp)
    tmp *= pg.sign(p)
    return tmp
Пример #25
0
def drawSeismogramm(ax, mesh, u, dt, ids=None, pos=None, i=None):
    r"""Extract and show time series from wave field

    Parameters
    ----------

    ids: list
        List of node ids for the given mesh.
    pos : list
        List of positions for the given mesh. We will look for the nearest node.

    """
    ax.set_xlim(mesh.xmin(), mesh.xmax())
    ax.set_ylim(0., dt * len(u) * 1000)
    ax.set_aspect(1)
    ax.set_ylabel('Time (ms)')
    ax.set_xlabel('Distance (m)')

    if i is None:
        i = len(u) - 1

    t = np.linspace(0, i * dt * 1000, i + 1)

    if ids is None and pos is not None:
        ids = []
        for p in pos:
            ids.append(mesh.findNearestNode(p))

    xDist = mesh.node(0).pos().distance(mesh.node(1).pos())
    for iw, n in enumerate(ids):
        pos = mesh.node(n).pos()
        ax.plot(pos[0], 0.05, '^', color='black')
        print(pos)
        trace = pg.cat(pg.RVector(0), u[:(i + 1), n])
        #        print(i+1, n)
        #        print(trace, (max(pg.abs(trace))))

        #        if max(pg.abs(trace)) > 1e-8:

        trace *= np.exp(1 / 1000 * t)
        trace /= (max(pg.abs(trace)))
        trace *= 10

        drawWiggle(ax, trace, t=t, xoffset=pos[0])
    ax.invert_yaxis()
Пример #26
0
def logDropTol(p, droptol=1e-3):
    """Create logarithmic scaled copy of p.

    Examples
    --------
    >>> from pygimli.utils import logDropTol
    >>> x = logDropTol((-10, -1, 0, 1, 100))
    >>> print(x.array())
    [-4. -3.  0.  3.  5.]
    """
    tmp = pg.RVector(p)

    tmp = pg.abs(tmp / droptol)
    tmp.setVal(1.0, pg.find(tmp < 1.0))

    tmp = pg.log10(tmp)
    tmp *= pg.sign(p)
    return tmp
Пример #27
0
def drawSeismogramm(ax, mesh, u, dt, ids=None, pos=None, i=None):
    r"""Extract and show time series from wave field

    Parameters
    ----------

    ids: list
        List of node ids for the given mesh.
    pos : list
        List of positions for the given mesh. We will look for the nearest node.

    """
    ax.set_xlim(mesh.xmin(), mesh.xmax())
    ax.set_ylim(0., dt*len(u)*1000)
    ax.set_aspect(1)
    ax.set_ylabel('Time (ms)')
    ax.set_xlabel('Distance (m)')

    if i is None:
        i = len(u)-1

    t = np.linspace(0, i*dt*1000, i+1)

    if ids is None and pos is not None:
        ids = []
        for p in pos:
           ids.append(mesh.findNearestNode(p))

    xDist = mesh.node(0).pos().distance(mesh.node(1).pos())
    for iw, n in enumerate(ids):
        pos = mesh.node(n).pos()
        ax.plot(pos[0], 0.05, '^', color='black')
        trace = pg.cat(pg.RVector(0), u[:(i+1), n])
#        print(i+1, n)
#        print(trace, (max(pg.abs(trace))))

#        if max(pg.abs(trace)) > 1e-8:

        trace *= np.exp(1/1000 * t)
        trace /= (max(pg.abs(trace)))
        trace *= 10

        drawWiggle(ax, trace, t=t, xoffset=pos[0])
    ax.invert_yaxis()
Пример #28
0
    def response_mt(self, par, i=0):
        """Return response (multi threaded)."""

        verbose = 1
        if i == 0:
            ws = self.ws
        else:
            ws = WorkSpace()

        mesh = pg.Mesh(self.mesh())

        k = self.createMappedModel(par)

        ws.mesh, ws.vel, ws.p, ws.k, ws.velC = solveDarcy(mesh, k=k, p0=0.75,
                                                          verbose=verbose)

        ws.sat = solveAdvection(ws.mesh, ws.vel, self.timesAdvection,
                                diffusion=pg.abs(ws.velC.T) * 1e-2,
                                verbose=verbose)

        ws.meshERT, ws.scheme, ws.resis, ws.rhoa, ws.rhoaR, ws.derr = \
            solveERT(ws.mesh, ws.sat[self.timesERT], verbose=verbose)

        return ws.rhoaR.flatten()
Пример #29
0
# %%
# as desired for a roughness operator. Therefore, an additional matrix called
# :py:func:`pg.matrix.GeostatisticalConstraintsMatrix`
# was implemented where this spur is corrected for.
# It is, like the correlation matrix, created by a mesh, a list of correlation
# lengths I, a dip angle# that distorts the x/y plane and a strike angle
# towards the third direction.
#
C = pg.matrix.GeostatisticConstraintsMatrix(mesh=mesh, I=5)

# %%
# In order to extract a certain column, we generate a vector with a single 1
vec = pg.Vector(mesh.cellCount())
vec[ind] = 1.0
pg.show(mesh, pg.log10(pg.abs(C * vec)), cMin=-6, cMax=0, cMap="magma_r")

# %%
# The constraints have a rather small footprint compared to the correlation
# (note the logarithmic scale) but still to the whole mesh unlike the classical
# constraint matrices that only include relations to neighboring cells.

# %%
# Such a matrix can also be defined for different ranges and a dip angle
Cdip = pg.matrix.GeostatisticConstraintsMatrix(mesh=mesh, I=[10, 3], dip=-25)
pg.show(mesh, pg.log10(pg.abs(Cdip * vec)), cMin=-6, cMax=0, cMap="magma_r")

# %%
# In order to illustrate the role of the constraints, we use a very simple
# mapping forward operator that retrieves the values in the mesh at some given
# positions. The constraints are therefore used as interpolation operators.
Пример #30
0
# towards the third direction.
#
C = pg.matrix.GeostatisticConstraintsMatrix(mesh=mesh, I=5)

# %%
# In order to extract a column, we generate a vector with a single 1, multiply
vec = pg.Vector(mesh.cellCount())
vec[ind] = 1.0
cor = C * vec

# %%
# and plot it using a linear or logarithmic scale
kwLin = dict(cMin=-1, cMax=1, cMap="bwr")
ax, cb = pg.show(mesh, cor, **kwLin)
kwLog = dict(cMin=1e-3, cMax=1, cMap="magma_r", logScale=True)
ax, cb = pg.show(mesh, pg.abs(cor), **kwLog)

# %%
# The constraints have a rather small footprint compared to the correlation
# if one considers values below a certain threshold as insignificant.

# %%
# Such a matrix can also be defined for different ranges and dip angles, e.g.
Cdip = pg.matrix.GeostatisticConstraintsMatrix(mesh=mesh, I=[9, 2], dip=-25)
ax, cb = pg.show(mesh, Cdip * vec, **kwLin)
ax, cb = pg.show(mesh, pg.abs(Cdip * vec), **kwLog)

# %%
# Even in the linear scale, but more in the log scale one can see the
# regularization footprint in the shape of an ellipsis.
Пример #31
0
x = np.arange( -10, 10, 1. );
rho = g.RVector( len( mesh.cellAttributes() ), 1. ) * 2000.0
print rho
pnts = g.stdVectorRVector3()

for i in x:
    pnts.append( g.RVector3( i, 0.0001 ) )

gzNum = []
gzNum.append( g.calcGCells( pnts, mesh, rho, 0 )[0] )

#P.plot(x, gzNum[0], label = str(0) )

for i in range( 1, 10 ):
    gzNum.append( g.calcGCells( pnts, mesh, rho, i )[0] )

    err = g.abs(gzNum[i]/gzNum[0]-1.)*100.
    P.semilogy(x, err, label = str(i) )
    
    P.plot(x, gzNum[i], label = str(i) )

P.legend()
#P.plot(x, p1 )


test2d()
#test3d()

P.show()
from pybert.manager import ERTManager
from pygimli.physics import Refraction
from pygimli.physics.traveltime import createRAData

mesh = pg.load("mesh.bms")
sensors = np.load("sensors.npy", allow_pickle=True)
rhotrue = np.loadtxt("rhotrue.dat")
veltrue = np.loadtxt("veltrue.dat")

pg.boxprint("Simulate apparent resistivities")

# Create more realistic data set
ertScheme = pb.createData(sensors, "dd", spacings=[1, 2, 4])
k = pb.geometricFactors(ertScheme)
ertScheme.markInvalid(pg.abs(k) > 5000)
ertScheme.removeInvalid()

ert = ERTManager()

# Create suitable mesh for ert forward calculation
# NOTE: In the published results paraMaxCellSize=1.0 was used, which is
# increased here to allow testing on Continuous Integration services.
meshERTFWD = mt.createParaMesh(ertScheme,
                               quality=33.5,
                               paraMaxCellSize=2.0,
                               paraDX=0.2,
                               boundaryMaxCellSize=50,
                               smooth=[1, 10],
                               paraBoundary=30)
pg.show(meshERTFWD)
Пример #33
0
mesh = pg.meshtools.createParaMesh2dGrid(data.sensorPositions())


fop = DCMultiElectrodeModellingC(mesh, data, verbose=True)
print(dir(fop))
print(fop.jacobian())
print(fop.jacobian().rows())

#fop = pb.DCMultiElectrodeModelling(mesh, data)

fop.regionManager().region(1).setBackground(True)
fop.createRefinedForwardMesh(refine=True, pRefine=False)

cData = pb.getComplexData(data)
mag = pg.abs(cData)
phi = -pg.phase(cData)

print(pg.norm(mag-data('rhoa')))
print(pg.norm(phi-data('ip')/1000))

inv = pg.Inversion(pg.cat(mag, phi),
                    fop,
                    verbose=True, dosave=True)


dataTrans = pg.trans.TransCumulative()
datRe = pg.trans.TransLog()
datIm = pg.trans.Trans()
dataTrans.add(datRe, data.size())
dataTrans.add(datIm, data.size())
Пример #34
0
 def animate(i):
     print(out + ": Frame:", i, "/", len(data))
     pg.mplviewer.setMappableData(gci, pg.abs(data[i]), cMin=cMin, cMax=cMax, logScale=logScale)
Пример #35
0
vel = -pg.solver.grad(mesh, p) * np.asarray([K, K, K]).T

#ax, _ = pg.show(mesh, data=K, label='Hydraulic conductivity $K$ in m$/$s',
#cMin=1e-5, cMax=1e-2, nLevs=4, cmap='viridis')
#ax, _ = pg.show(mesh, data=pg.abs(vel), logScale=0, label='Velocity $v$ in m$/$s')
#ax, _ = pg.show(mesh, data=vel, ax=ax, color='black', linewidth=0.5, dropTol=1e-6)

print('Solve Advection-diffusion equation ...')
S = pg.RVector(mesh.cellCount(), 0.0)
# Fill injection source vector for a fixed injection position
sourceCell = mesh.findCell([-19.1, -4.6])
S[sourceCell.id()] = 1.0 / sourceCell.size()  #g/(l s)
# Choose 800 time steps for 6 days in seconds
t = pg.utils.grange(0, 6 * 24 * 3600, n=800)
# Create dispersitivity, depending on the absolute velocity
dispersion = pg.abs(vel) * 1e-2
# Solve for injection time, but we need velocities on cell nodes
vel = mt.cellDataToNodeData(mesh, np.asarray([pg.x(vel), pg.y(vel)]).T).T
c1 = pg.solver.solveFiniteVolume(mesh,
                                 a=dispersion,
                                 f=S,
                                 vel=vel,
                                 times=t,
                                 uB=[1, 0],
                                 scheme='PS',
                                 verbose=0)
# Solve without injection starting with last result
c2 = pg.solver.solveFiniteVolume(mesh,
                                 a=dispersion,
                                 f=0,
                                 vel=vel,
Пример #36
0
        def _test_(mesh, p2=False, show=False):
            """
                \Laplace u = 0
                x = [0, -1], y, z
                int_0^1 u = 0

                du/dn =  1 (xMin) (inflow on -x)
                du/dn = -1 (xMax) (outflow on +x)
                du/dn = 0 (rest)
                u = 0.5 -x          linear solution, i.e., exact with p1

                du/dn = -1 (xMin) (outflow -x)
                du/dn = -1 (xMax) (outflow +x)
                du/dn = 0 (rest)
                u = -1/6 + x -x²    quadratic solution, i.e., exact with p2
            """
            uExact = lambda x, a, b, c: a + b * x + c * x**2
            bc = {'Neumann': {1: 1.0, 2: -1.0}}
            uE = uExact(pg.x(mesh), 0.5, -1.0, 0.0)

            if p2 is True:
                mesh = mesh.createP2()
                bc['Neumann'][1] = -1.0
                uE = uExact(pg.x(mesh), -1 / 6, 1.0, -1.0)

            u = pg.solve(mesh, a=1, f=0, bc=bc, verbose=0)
            v = pg.solver.grad(mesh, u)

            if show:
                if mesh.dim() == 1:
                    idx = np.argsort(pg.x(mesh))
                    fig, ax = pg.plt.subplots()
                    ax.plot(pg.x(mesh)[idx], uE[idx], label='exact')
                    ax.plot(pg.x(mesh)[idx], u[idx], 'o', label='FEM')

                    model, response = pg.frameworks.fit(uExact,
                                                        u[idx],
                                                        x=pg.x(mesh)[idx])
                    print(model)
                    ax.plot(pg.x(mesh)[idx], response, 'x', label='FIT')
                    ax.grid(True)
                    ax.legend()
                elif mesh.dim() == 2:
                    pg.show(mesh, u, label='u')
                    ax, _ = pg.show(mesh, pg.abs(v), label='abs(grad(u))')
                    pg.show(mesh, v, showMesh=1, ax=ax)

                pg.info("int Domain:", pg.solver.intDomain(u, mesh))
                pg.info("int Domain:",
                        pg.solver.intDomain([1.0] * mesh.nodeCount(), mesh),
                        sum(mesh.cellSizes()))

                pg.wait()
            ## test du/dn of solution and compare with Neumann BC
            for m, val in bc['Neumann'].items():
                for b in mesh.boundaries(mesh.boundaryMarkers() == m):
                    ## for non Tailor Hood Elements, the gradient is only
                    # known at the cell center so the accuracy for the
                    # gradient depends on the distance boundary to cell
                    # center. Accuracy = du/dx(dx) = 1-2x = 2 * dx
                    c = b.leftCell()
                    dx = c.center().dist(b.center())
                    dudn = b.norm(c).dot(v[c.id()])
                    if p2:
                        np.testing.assert_allclose(dudn, val, atol=2 * dx)
                        #print(dudn, val)
                    else:
                        np.testing.assert_allclose(dudn, val)

            np.testing.assert_allclose(pg.solver.intDomain([1.0]*\
                                                    mesh.nodeCount(), mesh),
                                       sum(mesh.cellSizes()))
            np.testing.assert_allclose(pg.solver.intDomain(u, mesh),
                                       0.0,
                                       atol=1e-8)
            np.testing.assert_allclose(np.linalg.norm(u - uE), 0.0, atol=1e-8)

            return v
Пример #37
0
def calcApparentResistivities(mesh, meshERT, poro, rhoBrine):
    ert = ERT(verbose=False)

    meshFOP = appendTriangleBoundary(meshERT,
                                     xbound=50,
                                     ybound=50,
                                     marker=1,
                                     quality=34.0,
                                     smooth=False,
                                     markerBoundary=1,
                                     isSubSurface=False,
                                     verbose=False)

    swatch = pg.Stopwatch(True)

    print("res 1:", swatch.duration(True))

    resis = resistivityArchie(rBrine=rhoBrine,
                              porosity=poro,
                              S=1.0,
                              mesh=mesh,
                              meshI=meshFOP)

    print("res 2:", swatch.duration(True))

    ertPointsX = [pg.RVector3(x, 0) for x in np.arange(-19, 19.1, 1)]
    ertScheme = ert.createData(ertPointsX, scheme="Dipole Dipole (CC-PP)")

    solutionName = createCacheName('appRes', mesh) + "-" + \
        str(ertScheme.size()) + "-" + str(len(rhoBrine))

    try:
        rhoa = np.load(solutionName + '.bmat.npy')
        ertData = pb.DataContainerERT(solutionName + '.dat')
    except Exception as e:
        print(e)
        print("Building .... ")
        rhoa = np.zeros((len(resis), ertScheme.size()))
        ertScheme.set('k', pb.geometricFactor(ertScheme))
        ertData = ert.simulate(meshFOP, resis[0], ertScheme)

        errPerc = 1
        errVolt = 1e-5
        voltage = ertData('rhoa') / ertData('k')
        ertData.set('err', pg.abs(errVolt / voltage) + errPerc / 100.0)
        print('err min:',
              min(ertData('err')) * 100, 'max:',
              max(ertData('err')) * 100)
        ertData.save(solutionName + '.dat', 'a b m n rhoa err k')
        for i in range(0, len(resis)):
            tic = time.time()
            rhoa[i] = ert.fop.response(resis[i])

            rand = pg.RVector(len(rhoa[i]))
            pg.randn(rand)

            rhoa[i] *= (1.0 + rand * ertData('err'))

            print(i, "/", len(resis), " : ",
                  time.time() - tic, "s", "min:", min(resis[i]), "max:",
                  max(resis[i]), "min:", min(rhoa[i]), "max:", max(rhoa[i]))

        np.save(solutionName + '.bmat', rhoa)

    return meshFOP, resis, ertData, rhoa
Пример #38
0
    def invert(self, data, values=None, verbose=0, **kwargs):
        """
        Invert the given data.

        A parametric mesh for the inversion will be created if non is given
        before.

        Parameters
        ----------
        """
        self.fop.setVerbose(verbose)
        self.inv.setVerbose(verbose)
        self.inv.setMaxIter(kwargs.pop('maxiter', 10))
        self.inv.setLambda(kwargs.pop('lambd', 10))

        if self.paraMesh is None:
            self.paraMesh = createParaMesh2dGrid(data.sensorPositions(),
                                                 **kwargs)
            self.setParaMesh(self.paraMesh)
            if verbose:
                print(self.paraMesh)


#                pg.show(self.paraMesh)

        err = data('err')
        rhoa = data('rhoa')

        startModel = pg.RVector(self.fop.regionManager().parameterCount(),
                                pg.median(rhoa))

        self.fop.setData(data)
        self.inv.setForwardOperator(self.fop)

        # check err here
        self.inv.setData(rhoa)
        self.inv.setError(err)
        self.inv.setModel(startModel)

        model = self.inv.run()

        if values is not None:

            if isinstance(values, pg.RVector):
                values = [values]
            elif isinstance(values, np.ndarray):
                if values.ndim == 1:
                    values = [values]

            allModel = pg.RMatrix(len(values), len(model))

            self.inv.setVerbose(False)
            for i in range(len(values)):
                print(i)
                tic = time.time()
                self.inv.setModel(model)
                self.inv.setReferenceModel(model)
                dData = pg.abs(values[i] / rhoa)

                relModel = self.inv.invSubStep(pg.log(dData))
                allModel[i] = model * pg.exp(relModel)
                print(i, "/", len(values), " : ",
                      time.time() - tic, "s min/max: ", min(allModel[i]),
                      max(allModel[i]))

            return allModel
        return model
Пример #39
0
print('Solve Darcy equation ... ')
# Map regions to hydraulic conductivity in $m/s$
kMap = [[1, 1e-8], [2, 5e-3], [3, 1e-4], [4, 8e-4]]
# Map conductivity value per region to each cell in the given mesh
K = pg.solver.parseMapToCellArray(kMap, mesh)
# Dirichlet conditions for hydraulic potential
pBound = [[[1, 2, 3], 0.75], [[5, 6, 7], 0.0]]
# Solve for hydraulic potential
p = pg.solver.solveFiniteElements(mesh, a=K, bc={'Dirichlet': pBound})
# Solve velocity as gradient of hydraulic potential
vel = -pg.solver.grad(mesh, p) * np.asarray([K, K, K]).T

ax, _ = pg.show(mesh, data=K, label='Hydraulic conductivity $K$ in m$/$s',
                cMin=1e-5, cMax=1e-2, nLevs=4, cMap='viridis')
ax, _ = pg.show(mesh, data=pg.abs(vel), logScale=0,
                label='Velocity $v$ in m$/$s')
ax, _ = pg.show(mesh, data=vel, ax=ax, color='black', linewidth=0.5,
                dropTol=1e-6)

print('Solve Advection-diffusion equation ...')
S = pg.RVector(mesh.cellCount(), 0.0)
# Fill injection source vector for a fixed injection position
sourceCell = mesh.findCell([-19.1, -4.6])
S[sourceCell.id()] = 1.0 / sourceCell.size()  # g/(l s)
# Choose 800 time steps for 6 days in seconds
t = pg.utils.grange(0, 6 * 24 * 3600, n=800)
# Create dispersitivity, depending on the absolute velocity
dispersion = pg.abs(vel) * 1e-2
# Solve for injection time, but we need velocities on cell nodes
vel = mt.cellDataToNodeData(mesh, vel)
Пример #40
0
    def invert(self, data, values=None, verbose=0, **kwargs):
        """
        Invert the given data.

        A parametric mesh for the inversion will be created if non is given
        before.

        Parameters
        ----------
        """
        self.fop.setVerbose(verbose)
        self.inv.setVerbose(verbose)
        self.inv.setMaxIter(kwargs.pop('maxiter', 10))
        self.inv.setLambda(kwargs.pop('lambd', 10))

        if self.paraMesh is None:
            self.paraMesh = createParaMesh2dGrid(data.sensorPositions(),
                                                 **kwargs)
            self.setParaMesh(self.paraMesh)
            if verbose:
                print(self.paraMesh)
#                pg.show(self.paraMesh)

        err = data('err')
        rhoa = data('rhoa')

        startModel = pg.RVector(self.fop.regionManager().parameterCount(),
                                pg.median(rhoa))

        self.fop.setData(data)
        self.inv.setForwardOperator(self.fop)

        # check err here
        self.inv.setData(rhoa)
        self.inv.setError(err)
        self.inv.setModel(startModel)

        model = self.inv.run()

        if values is not None:

            if isinstance(values, pg.RVector):
                values = [values]
            elif isinstance(values, np.ndarray):
                if values.ndim == 1:
                    values = [values]

            allModel = pg.RMatrix(len(values), len(model))

            self.inv.setVerbose(False)
            for i in range(len(values)):
                print(i)
                tic = time.time()
                self.inv.setModel(model)
                self.inv.setReferenceModel(model)
                dData = pg.abs(values[i] / rhoa)

                relModel = self.inv.invSubStep(pg.log(dData))
                allModel[i] = model * pg.exp(relModel)
                print(i, "/", len(values), " : ", time.time()-tic,
                      "s min/max: ", min(allModel[i]), max(allModel[i]))

            return allModel
        return model
Пример #41
0
# Dirichlet conditions for hydraulic potential
pBound = [[[1, 2, 3], 0.75], [[5, 6, 7], 0.0]]
# Solve for hydraulic potential
p = pg.solver.solveFiniteElements(mesh, a=K, bc={'Dirichlet': pBound})
# Solve velocity as gradient of hydraulic potential
vel = -pg.solver.grad(mesh, p) * np.asarray([K, K, K]).T

ax, _ = pg.show(mesh,
                data=K,
                label='Hydraulic conductivity $K$ in m$/$s',
                cMin=1e-5,
                cMax=1e-2,
                nLevs=4,
                cMap='viridis')
ax, _ = pg.show(mesh,
                data=pg.abs(vel),
                logScale=0,
                label='Velocity $v$ in m$/$s')
ax, _ = pg.show(mesh,
                data=vel,
                ax=ax,
                color='black',
                linewidth=0.5,
                dropTol=1e-6)

print('Solve Advection-diffusion equation ...')
S = pg.Vector(mesh.cellCount(), 0.0)
# Fill injection source vector for a fixed injection position
sourceCell = mesh.findCell([-19.1, -4.6])
S[sourceCell.id()] = 1.0 / sourceCell.size()  # g/(l s)
# Choose 800 time steps for 6 days in seconds
Пример #42
0
# %%
# as desired for a roughness operator. Therefore, an additional matrix called
# :py:mod:`pg.matrix.GeostatisticalConstraintsMatrix`
# was implemented where this spur is corrected for.
# It is, like the correlation matrix, created by a mesh, a list of correlation
# lengths I, a dip angle# that distorts the x/y plane and a strike angle
# towards the third direction.
#
C = pg.matrix.GeostatisticConstraintsMatrix(mesh=mesh, I=5)

# %%
# In order to extract a certain column, we generate a vector with a single 1
vec = pg.Vector(mesh.cellCount())
vec[ind] = 1.0
ax, cb = pg.show(mesh,
                 pg.log10(pg.abs(C * vec)),
                 cMin=-6,
                 cMax=0,
                 cMap="magma_r")

# %%
# The constraints have a rather small footprint compared to the correlation
# (note the logarithmic scale) but still to the whole mesh unlike the classical
# constraint matrices that only include relations to neighboring cells.

# %%
# Such a matrix can also be defined for different ranges and dip angles, e.g.
Cdip = pg.matrix.GeostatisticConstraintsMatrix(mesh=mesh, I=[10, 3], dip=-25)
ax, cb = pg.show(mesh,
                 pg.log10(pg.abs(Cdip * vec)),
                 cMin=-6,
Пример #43
0
mesh = pg.meshtools.createParaMesh2dGrid(data.sensorPositions())


fop = DCMultiElectrodeModellingC(mesh, data, verbose=True)
print(dir(fop))
print(fop.jacobian())
print(fop.jacobian().rows())

#fop = pb.DCMultiElectrodeModelling(mesh, data)

fop.regionManager().region(1).setBackground(True)
fop.createRefinedForwardMesh(refine=True, pRefine=False)

cData = pb.getComplexData(data)
mag = pg.abs(cData)
phi = -pg.phase(cData)

print(pg.norm(mag-data('rhoa')))
print(pg.norm(phi-data('ip')/1000))

inv = pg.RInversion(pg.cat(mag, phi),
                    fop,
                    verbose=True, dosave=True)


dataTrans = pg.RTransCumulative()
datRe = pg.RTransLog()
datIm = pg.RTrans()
dataTrans.add(datRe, data.size())
dataTrans.add(datIm, data.size())
Пример #44
0
geom = plc + cube

mesh = mt.createMesh(geom, area=4)

for bound in mesh.boundaries():
    x = bound.center().x()
    if x == mesh.xmin():
        bound.setMarker(1)
    elif x == mesh.xmax():
        bound.setMarker(2)

kMap = {1: 1e-4, 2: 1e-6}
kArray = pg.solver.parseMapToCellArray(list(kMap), mesh)  # dict does not work
kArray = np.column_stack([kArray] * 3)

bc = {"Dirichlet": {1: 20.0, 2: 10.0}}

h = pg.solver.solveFiniteElements(mesh, kMap, bc=bc)
vel = -pg.solver.grad(mesh, h) * kArray

pg.show(mesh, h, label="Hydraulic head (m)")

ax, _ = pg.show(mesh, hold=True, alpha=0.3)
drawStreamLines(ax, mesh, vel, radius=.1, source_radius=10)
drawSlice(ax,
          mesh,
          normal=[0, 1, 0],
          data=pg.abs(vel),
          label="Absolute velocity")
ax.show()
Пример #45
0
# We know the exact solution so we can compare it to the numerical results.
# Unfortunately, the point source singularity does not allow a good integration
# measure for the accuracy of the resulting field so we just look for the
# differences.
#
uAna = pg.Vector(
    list(
        map(lambda p__: uAnalytical(p__, sourcePosA, k, sigma),
            mesh.positions())))
uAna -= pg.Vector(
    list(
        map(lambda p__: uAnalytical(p__, sourcePosB, k, sigma),
            mesh.positions())))

ax = pg.show(mesh,
             data=pg.abs(uAna - u),
             cMap="Reds",
             orientation='horizontal',
             label='|$u_{exact}$ -$u$|',
             logScale=True,
             cMin=1e-7,
             cMax=1e-1,
             contourLines=False,
             nCols=12,
             nLevs=7,
             showMesh=True)[0]

#print('l2:', pg.pf(pg.solver.normL2(uAna-u)))
print('L2:', pg.pf(pg.solver.normL2(uAna - u, mesh)))
print('H1:', pg.pf(pg.solver.normH1(uAna - u, mesh)))
np.testing.assert_approx_equal(pg.solver.normL2(uAna - u, mesh),
Пример #46
0
x = np.arange(-10, 10, 1.0)
rho = pg.RVector(len(mesh.cellAttributes()), 1.0) * 2000.0
print (rho)
pnts = pg.stdVectorRVector3()

for i in x:
    pnts.append(pg.RVector3(i, 0.0001))

gzNum = []
gzNum.append(pg.calcGCells(pnts, mesh, rho, 0)[0])

# plt.plot(x, gzNum[0], label=str(0))

for i in range(1, 10):
    gzNum.append(pg.calcGCells(pnts, mesh, rho, i)[0])

    err = pg.abs(gzNum[i] / gzNum[0] - 1.0) * 100.0
    plt.semilogy(x, err, label=str(i))

    plt.plot(x, gzNum[i], label=str(i))

plt.legend()
# plt.plot(x, p1 )


test2d()
# test3d()

plt.show()
Пример #47
0
    def estimateError(self,
                      data,
                      absoluteError=0.001,
                      relativeError=0.03,
                      absoluteUError=None,
                      absoluteCurrent=0.1):
        """ Estimate error composed of an absolute and a relative part.
        This is a static method and will not alter any member of the Manager

        Parameters
        ----------
        absoluteError : float [0.001]
            Absolute data error in Ohm m. Need 'rhoa' values in data.

        relativeError : float [0.03]
            relative error level in %/100

        absoluteUError : float [0.001]
            Absolute potential error in V. Need 'u' values in data. Or
            calculate them from 'rhoa', 'k' and absoluteCurrent if no 'i'
            is given

        absoluteCurrent : float [0.1]
            Current level in A for reconstruction for absolute potential V

        Returns
        -------
        error : Array
        """

        if relativeError >= 0.5:
            print("relativeError set to a value > 0.5 .. assuming this "
                  "is a percentage Error level dividing them by 100")
            relativeError /= 100.0

        if absoluteUError is None:
            if not data.allNonZero('rhoa'):
                pg.critical("We need apparent resistivity values "
                            "(rhoa) in the data to estimate a "
                            "data error.")
            error = relativeError + absoluteError / data('rhoa')
        else:
            u = None
            i = absoluteCurrent
            if data.haveData("i"):
                i = data('i')

            if data.haveData("u"):
                u = data('u')
            else:
                if data.haveData("r"):
                    u = data('r') * i
                elif data.haveData("rhoa"):
                    if data.haveData("k"):
                        u = data('rhoa') / data('k') * i
                    else:
                        pg.critical("We need (rhoa) and (k) in the"
                                    "data to estimate data error.")

                else:
                    pg.critical("We need apparent resistivity values "
                                "(rhoa) or impedances (r) "
                                "in the data to estimate data error.")

            error = pg.abs(absoluteUError / u) + relativeError

        return error
Пример #48
0
print mesh.cellSizes()

x = np.arange(-10, 10, 1.)
rho = pg.RVector(len(mesh.cellAttributes()), 1.) * 2000.0
print(rho)
pnts = pg.stdVectorRVector3()

for i in x:
    pnts.append(pg.RVector3(i, 0.0001))

gzNum = []
gzNum.append(pg.calcGCells(pnts, mesh, rho, 0)[0])

# plt.plot(x, gzNum[0], label=str(0))

for i in range(1, 10):
    gzNum.append(pg.calcGCells(pnts, mesh, rho, i)[0])

    err = pg.abs(gzNum[i] / gzNum[0] - 1.) * 100.
    plt.semilogy(x, err, label=str(i))

    plt.plot(x, gzNum[i], label=str(i))

plt.legend()
# plt.plot(x, p1 )

test2d()
# test3d()

plt.show()
Пример #49
0
def solveFiniteVolume(mesh,
                      a=1.0,
                      b=0.0,
                      f=0.0,
                      fn=0.0,
                      vel=None,
                      u0=0.0,
                      times=None,
                      uL=None,
                      relax=1.0,
                      ws=None,
                      scheme='CDS',
                      **kwargs):
    r"""Solve partial differential equation with Finite Volumes.

    This function is a syntactic sugar proxy for using the Finite Volume
    functionality of the library core to solve elliptic and parabolic partial
    differential of the following type:

    .. math::

        \frac{\partial u}{\partial t} + \mathbf{v}\cdot\nabla u & = \nabla\cdot(a \nabla u) + b u + f(\mathbf{r},t)\\
        u(\mathbf{r}, t) & = u_B  \quad\mathbf{r}\in\Gamma_{\text{Dirichlet}}\\
        \frac{\partial u(\mathbf{r}, t)}{\partial \mathbf{n}} & = u_{\partial \text{B}}  \quad\mathbf{r}\in\Gamma_{\text{Neumann}}\\
        u(\mathbf{r}, t=0) & = u_0 \quad\text{with} \quad\mathbf{r}\in\Omega

    The Domain :math:`\Omega` and the Boundary :math:`\Gamma` are defined
    through the given mesh with appropriate boundary marker.

    The solution :math:`u(\mathbf{r}, t)` is given for each cell in the mesh.

    TODO:

     * Refactor with solver class and Runga-Kutte solver
     * non steady boundary conditions

    Parameters
    ----------
    mesh: :gimliapi:`GIMLI::Mesh`
        Mesh represents spatial discretization of the calculation domain
    a: value | array | callable(cell, userData)
        Stiffness weighting per cell values.
    b: value | array | callable(cell, userData)
        Scale for mass values b
    f: iterable(cell)
        Load vector
    fn: iterable(cell)
        TODO What is fn
    vel: ndarray (N,dim) | RMatrix(N,dim)
        Velocity field :math:`\mathbf{v}(\mathbf{r}, t=\text{const}) = \{[v_i]_j,\}`
        with :math:`i=[1\ldots 3]` for the mesh dimension
        and :math:`j = [0\ldots N-1]` with N either the amount of cells,
        nodes, or boundaries.
        Velocities per boundary are preferred and will be interpolated
        on demand.
    u0: value | array | callable(cell, userData)
        Starting field
    times: iterable
        Time steps to calculate for.
    ws Workspace
        This can be an empty class that will used as an Workspace to store and
        cache data.

        If ws is given: The system matrix is taken from ws or
        calculated once and stored in ws for further usage.

        The system matrix is cached in this Workspace as ws.S
        The LinearSolver with the factorized matrix is cached in
        this Workspace as ws.solver
        The rhs vector is only stored in this Workspace as ws.rhs
    scheme: str [CDS]
        Finite volume scheme:
        :py:mod:`pygimli.solver.diffusionConvectionKernel`
    **kwargs:
        * bc : Boundary Conditions dictionary, see pg.solver
        * uB : Dirichlet boundary conditions DEPRECATED
        * duB : Neumann boundary conditions DEPRECATED

    Returns
    -------
    u: ndarray(nTimes, nCells)
        Solution field for all time steps.
    """
    verbose = kwargs.pop('verbose', False)
    # The Workspace is to hold temporary data or preserve matrix rebuild
    # swatch = pg.core.Stopwatch(True)
    sparse = True

    workspace = pg.solver.WorkSpace()
    if ws:
        workspace = ws

    a = pg.solver.parseArgToArray(a, [mesh.cellCount(), mesh.boundaryCount()])
    b = pg.solver.parseArgToArray(b, mesh.cellCount())
    f = pg.solver.parseArgToArray(f, mesh.cellCount())
    fn = pg.solver.parseArgToArray(fn, mesh.cellCount())

    boundsDirichlet = None
    boundsNeumann = None

    # BEGIN check for Courant-Friedrichs-Lewy
    if vel is not None:

        if isinstance(vel, float):
            print("Warning! .. velocity is float and no vector field")

        # we need velocities for boundaries
        if len(vel) is not mesh.boundaryCount():
            if len(vel) == mesh.cellCount():
                vel = pg.meshtools.cellDataToNodeData(mesh, vel)

            if len(vel) == mesh.nodeCount():
                vel = pg.meshtools.nodeDataToBoundaryData(mesh, vel)
            else:
                print("mesh:", mesh)
                print("vel:", vel.shape)

                raise Exception("Cannot determine data format for velocities")

        if times is not None:
            pg.solver.checkCFL(times, mesh, np.max(pg.abs(vel)))

    if not hasattr(workspace, 'S'):

        boundsDirichlet = []
        if 'bc' in kwargs:
            bct = dict(kwargs['bc'])
            if 'Dirichlet' in bct:
                boundsDirichlet += pg.solver.parseArgToBoundaries(
                    bct.pop('Dirichlet'), mesh)

            if 'Node' in bct:
                n = bct.pop('Node')
                boundsDirichlet.append([mesh.node(n[0]), n[1]])

            if 'Neumann' in bct:
                boundsNeumann = pg.solver.parseArgToBoundaries(
                    bct.pop('Neumann'), mesh)

        if 'uB' in kwargs:
            pg.deprecated('use new bc dictionary')
            boundsDirichlet = pg.solver.parseArgToBoundaries(
                kwargs['uB'], mesh)

        if 'duB' in kwargs:
            pg.deprecated('use new bc dictionary')
            boundsNeumann = pg.solver.parseArgToBoundaries(kwargs['duB'], mesh)

        workspace.S, workspace.rhsBCScales = diffusionConvectionKernel(
            mesh=mesh,
            a=a,
            b=b,
            uB=boundsDirichlet,
            duB=boundsNeumann,
            # u0=u0,
            fn=fn,
            vel=vel,
            scheme=scheme,
            sparse=sparse,
            userData=kwargs.pop('userData', None))

        dof = len(workspace.rhsBCScales)
        workspace.ap = np.zeros(dof)

        # for nonlinears
        if uL is not None:
            for i in range(dof):
                val = 0.0
                if sparse:
                    val = workspace.S.getVal(i, i) / relax
                    workspace.S.setVal(i, i, val)
                else:
                    val = workspace.S[i, i] / relax
                    workspace.S[i, i] = val

                workspace.ap[i] = val

        # print('FVM kernel 2:', swatch.duration(True))
    # endif: not hasattr(workspace, 'S'):

    workspace.rhs = np.zeros(len(workspace.rhsBCScales))
    workspace.rhs[0:mesh.cellCount()] = f  # * mesh.cellSizes()

    workspace.rhs += workspace.rhsBCScales

    # for nonlinear: relax progress with scaled last result
    if uL is not None:
        workspace.rhs += (1. - relax) * workspace.ap * uL
    # print('FVM: Prep:', swatch.duration(True))

    if not hasattr(times, '__len__'):

        if sparse and not hasattr(workspace, 'solver'):
            Sm = pg.matrix.SparseMatrix(workspace.S)
            # hold Sm until we have reference counting,
            # loosing Sm here will kill LinSolver later
            workspace.Sm = Sm
            workspace.solver = pg.core.LinSolver(Sm, verbose=verbose)

        u = None
        if sparse:
            u = workspace.solver.solve(workspace.rhs)
        else:
            u = np.linalg.solve(workspace.S, workspace.rhs)
        # print('FVM solve:', swatch.duration(True))
        return u[0:mesh.cellCount():1]
    else:
        theta = kwargs.pop('theta', 0.5 + 1e-6)

        if sparse:
            I = pg.solver.identity(len(workspace.rhs))
        else:
            I = np.diag(np.ones(len(workspace.rhs)))

        progress = None
        if verbose:
            from pygimli.utils import ProgressBar
            progress = ProgressBar(its=len(times))

            print("Solve timesteps with Crank-Nicolson.")

        return pg.solver.crankNicolson(times,
                                       theta,
                                       workspace.S,
                                       I,
                                       f=workspace.rhs,
                                       u0=pg.solver.cellValues(mesh, u0),
                                       progress=progress)