Example #1
0
    def test_Convert(self):
        """
        """
        i = range(10)
        j = range(10)
        v = np.ones(10)

        # Construct SparseMap Matrix from python arrays
        A = pg.SparseMapMatrix(i, j, v)

        # Construct SparseMap -> CRS (compressed row storage)
        S = pg.SparseMatrix(A)

        # Construct CRS -> SparseMap
        A2 = pg.SparseMapMatrix(S)

        # all should by identity matrix
        np.testing.assert_equal(A2.getVal(1,1), 1.0)
        np.testing.assert_equal(sum(S * np.ones(S.cols())), S.rows())
        np.testing.assert_equal(sum(A2 * np.ones(A2.cols())), A2.rows())


        MAP1 = pg.SparseMapMatrix(r=3, c=15)
        CSR = pg.SparseMatrix(MAP1)
        MAP2 = pg.SparseMapMatrix(CSR)

        v3 = pg.RVector(3)
        v15 = pg.RVector(15)

        np.testing.assert_equal((MAP1*v15).size(), 3)
        np.testing.assert_equal((MAP1.transMult(v3)).size(), 15)

        np.testing.assert_equal((CSR*v15).size(), 3)
        np.testing.assert_equal((CSR.transMult(v3)).size(), 15)

        np.testing.assert_equal(MAP1.cols(), MAP2.cols())
        np.testing.assert_equal(CSR.cols(), MAP1.cols())
        np.testing.assert_equal(CSR.rows(), MAP1.rows())
        np.testing.assert_equal(MAP1.rows(), MAP2.rows())

        # testing SparseMatrix to Numpy
        csr = pg.SparseMapMatrix(r=4, c=5)
        check_rows = [0, 0, 1, 2, 3]
        check_cols = [0, 1, 2, 3, 4]
        check_csr_rows = [0, 1, 2, 3, 4]
        check_col_s_e = [0, 2, 3, 4, 5, 5]
        check_vals = np.array([1.0, 3, np.pi, 1e-12, -1.12345e13])
        for i in range(len(check_rows)):
            csr.addVal(check_rows[i], check_cols[i], check_vals[i])

        r, c, v = pg.utils.sparseMatrix2Array(csr)
        np.testing.assert_allclose(r, check_csr_rows)
        np.testing.assert_allclose(c, check_col_s_e)
        np.testing.assert_allclose(v, check_vals)

        r2, c2, v2 = pg.utils.sparseMatrix2Array(pg.SparseMatrix(csr),
                                                 getInCRS=False)
        np.testing.assert_allclose(r2, check_rows)
        np.testing.assert_allclose(c2, check_cols)
        np.testing.assert_allclose(v2, check_vals)
Example #2
0
def diff(v):
    """Calculate approximate derivative.

    Calculate approximate derivative from v as d = [v_1-v_0, v2-v_1, ...]

    Parameters
    ----------
    v : array(N) | pg.R3Vector(N)
        Array of double values or positions

    Returns
    -------
    d : [type(v)](N-1) |
        derivative array

    Examples
    --------
    >>> import pygimli as pg
    >>> from pygimli.utils import diff
    >>> p = pg.R3Vector(4)
    >>> p[0] = [0.0, 0.0]
    >>> p[1] = [0.0, 1.0]
    >>> print(diff(p)[0])
    RVector3: (0.0, 1.0, 0.0)
    >>> print(diff(p)[1])
    RVector3: (0.0, -1.0, 0.0)
    >>> print(diff(p)[2])
    RVector3: (0.0, 0.0, 0.0)
    >>> p = pg.RVector(3)
    >>> p[0] = 0.0
    >>> p[1] = 1.0
    >>> p[2] = 2.0
    >>> print(diff(p))
    2 [1.0, 1.0]
    """
    d = None

    if isinstance(v, np.ndarray):
        if v.ndim == 2:
            if v.shape[1] < 4:
                # v = pg.R3Vector(v.T)
                vt = v.copy()
                v = pg.R3Vector(len(vt))
                for i, vi in enumerate(vt):
                    v.setVal(pg.RVector3(vi), i)
            else:
                v = pg.R3Vector(v)
        else:
            v = pg.RVector(v)
    elif isinstance(v, list):
        v = pg.R3Vector(v)

    if isinstance(v, pg.R3Vector) or isinstance(v, pg.stdVectorRVector3):
        d = pg.R3Vector(len(v) - 1)
    else:
        d = pg.RVector(len(v) - 1)

    for i, _ in enumerate(d):
        d[i] = v[i + 1] - v[i]
    return d
Example #3
0
    def test_NumpyToScalar(self):
        """Implemented through automatic iterator """
        x = pg.RVector(2)
        x3 = pg.R3Vector(2)
        w = pg.RVector()

        x += np.float32(1.0)
        np.testing.assert_equal(sum(x + 1.0), 4.0)
        np.testing.assert_equal(sum(x + np.float32(1)), 4.0)
        np.testing.assert_equal(sum(x + np.float64(1)), 4.0)
        np.testing.assert_equal(sum(x - 1.0), 0.0)
        np.testing.assert_equal(sum(x - np.float32(1)), 0.0)
        np.testing.assert_equal(sum(x - np.float64(1)), 0.0)

        # HarmonicModelling(size_t nh, const RVector & tvec);
        pg.HarmonicModelling(np.int32(1), x)
        pg.HarmonicModelling(np.uint32(1), x)
        pg.HarmonicModelling(np.int64(1), x)
        pg.HarmonicModelling(np.uint64(1), x)

        # pg.PolynomialModelling(1, np.int32(1), x3, x);
        # pg.PolynomialModelling(1, np.int64(1), x3, x);
        # pg.PolynomialModelling(1, np.uint32(1), x3, x);
        # pg.PolynomialModelling(1, np.uint64(1), x3, x);

        x = pg.Pos(0.0, 0.0, 0.0)
        x += np.float32(1)

        np.testing.assert_equal(x, pg.Pos(1.0, 1.0, 1.0))
        np.testing.assert_equal(x - 1, pg.Pos(0.0, 0.0, 0.0))
        np.testing.assert_equal(x - np.float32(1), pg.Pos(0.0, 0.0, 0.0))
        np.testing.assert_equal(x - np.float64(1), pg.Pos(0.0, 0.0, 0.0))
Example #4
0
    def createConstraints(self):
        # First order smoothness matrix
        self._Ctmp = pg.RSparseMapMatrix()

        if self.corr_l is None:
            pg.info("Using smoothing with zWeight = %.2f." % self.zWeight)
            rm = self.RST.fop.regionManager()
            rm.fillConstraints(self._Ctmp)

            # Set zWeight
            rm.setZWeight(self.zWeight)
            self.cWeight = pg.RVector()
            rm.fillConstraintsWeight(self.cWeight)
            self._CW = pg.LMultRMatrix(self._Ctmp, self.cWeight)
        else:
            pg.info("Using geostatistical constraints with " +
                    str(self.corr_l))
            # Geostatistical constraints by Jordi et al., GJI, 2018
            CM = pg.utils.geostatistics.covarianceMatrix(self.mesh,
                                                         I=self.corr_l)
            self._Ctmp = pg.matrix.Cm05Matrix(CM)
            self._CW = self._Ctmp

        # Putting together in block matrix
        self._C = pg.RBlockMatrix()
        cid = self._C.addMatrix(self._CW)
        self._C.addMatrixEntry(cid, 0, 0)
        self._C.addMatrixEntry(cid, self._Ctmp.rows(), self.cellCount)
        self._C.addMatrixEntry(cid, self._Ctmp.rows() * 2, self.cellCount * 2)
        self._C.addMatrixEntry(cid, self._Ctmp.rows() * 3, self.cellCount * 3)
        self.setConstraints(self._C)

        # Identity matrix for interparameter regularization
        self._I = pg.IdentityMatrix(self.cellCount)

        self._G = pg.RBlockMatrix()
        iid = self._G.addMatrix(self._I)
        self._G.addMatrixEntry(iid, 0, 0)
        self._G.addMatrixEntry(iid, 0, self.cellCount)
        self._G.addMatrixEntry(iid, 0, self.cellCount * 2)
        self._G.addMatrixEntry(iid, 0, self.cellCount * 3)

        self.fix_val_matrices = {}
        # Optionally fix phases to starting model globally or in selected cells
        phases = ["water", "ice", "air", "rock matrix"]
        for i, phase in enumerate(
            [self.fix_water, self.fix_ice, self.fix_air, self.fix_poro]):
            name = phases[i]
            vec = pg.RVector(self.cellCount)
            if phase is True:
                pg.info("Fixing %s content globally." % name)
                vec += 1.0
            elif hasattr(phase, "__len__"):
                pg.info("Fixing %s content at selected cells." % name)
                phase = np.asarray(phase, dtype="int")
                vec[phase] = 1.0
            self.fix_val_matrices[name] = pg.matrix.DiagonalMatrix(vec)
            self._G.addMatrix(self.fix_val_matrices[name], self._G.rows(),
                              self.cellCount * i)
Example #5
0
    def inv2D(self,
              nlay,
              lam=100.,
              resL=1.,
              resU=1000.,
              thkL=1.,
              thkU=100.,
              minErr=1.0):
        """2d LCI inversion class."""
        if isinstance(nlay, int):
            modVec = pg.RVector(nlay * 2 - 1, 30.)
            cType = 0  # no reference model
        else:
            modVec = nlay
            cType = 10  # use this as referencemodel
            nlay = (len(modVec) + 1) / 2

        # init forward operator
        self.f2d = self.FOP2d(nlay)

        # transformations
        self.transData = pg.RTrans()
        self.transThk = pg.RTransLogLU(thkL, thkU)
        self.transRes = pg.RTransLogLU(resL, resU)

        for i in range(nlay - 1):
            self.f2d.region(i).setTransModel(self.transThk)

        for i in range(nlay - 1, nlay * 2 - 1):
            self.f2d.region(i).setTransModel(self.transRes)

        # set constraints
        self.f2d.region(0).setConstraintType(cType)
        self.f2d.region(1).setConstraintType(cType)

        # collect data vector
        datvec = pg.RVector(0)

        for i in range(len(self.x)):
            datvec = pg.cat(datvec, self.datavec(i))

        # collect error vector
        if self.ERR is None:
            error = 1.0
        else:
            error = []
            for i in range(len(self.x)):
                err = np.maximum(self.ERR[i][self.activeFreq] * 0.701, minErr)
                error.extend(err)

        # generate starting model by repetition
        model = pg.asvector(np.repeat(modVec, len(self.x)))
        INV = pg.RInversion(datvec, self.f2d, self.transData)
        INV.setAbsoluteError(error)
        INV.setLambda(lam)
        INV.setModel(model)
        INV.setReferenceModel(model)

        return INV
Example #6
0
 def createDefaultStartModel(self):
     """
     """
     res = pb.getComplexData(self.data())
     parCount = self.regionManager().parameterCount()
     re = pg.RVector(parCount, pg.mean(pg.real(res)))
     im = pg.RVector(parCount, -pg.mean(pg.imag(res)))
     return pg.cat(re, im)
Example #7
0
def DebyeDecomposition(fr,
                       phi,
                       maxfr=None,
                       tv=None,
                       verbose=False,
                       zero=False,
                       err=0.25e-3,
                       lam=10.,
                       blocky=False):
    """Debye decomposition of a phase spectrum."""
    if maxfr is not None:
        idx = (fr <= maxfr) & (phi >= 0.)
        phi1 = phi[idx]
        fr1 = fr[idx]
        print("using frequencies from ", N.min(fr), " to ", N.max(fr), "Hz")
    else:
        phi1 = phi
        fr1 = fr

    if tv is None:
        tmax = 1. / N.min(fr1) / 2. / N.pi * 4.
        tmin = 1. / N.max(fr1) / 2. / N.pi / 8.
        tvec = N.logspace(N.log10(tmin), N.log10(tmax), 30)
    else:
        tvec = tv

    f = DebyeModelling(fr1, tvec, zero=zero)
    tvec = f.t_
    tm = pg.RTransLog()
    start = pg.RVector(len(tvec), 1e-4)
    if zero:
        f.region(-1).setConstraintType(0)  # smoothness
        f.region(0).setConstraintType(1)  # smoothness
        f.region(1).setConstraintType(0)  # min length
        f.regionManager().setInterRegionConstraint(-1, 0, 1.)
        f.regionManager().setInterRegionConstraint(0, 1, 1.)
        f.region(-1).setTransModel(tm)
        f.region(0).setTransModel(tm)
        f.region(1).setTransModel(tm)
        f.region(-1).setModelControl(1000.)
        f.region(1).setModelControl(1000.)
    else:
        f.regionManager().setConstraintType(1)  # smoothness

    inv = pg.RInversion(pg.asvector(phi1 * 1e-3), f, verbose)
    inv.setAbsoluteError(pg.RVector(len(fr1), err))
    inv.setLambda(lam)
    inv.setModel(start)
    inv.setBlockyModel(blocky)
    if zero:
        inv.setReferenceModel(start)
    else:
        inv.setTransModel(tm)

    mvec = inv.run()
    resp = inv.response()

    return tvec, mvec, N.array(resp) * 1e3, idx
Example #8
0
    def testComparison(self):
        a = pg.RVector(10, 1)
        b = pg.RVector(10, 2)

        np.testing.assert_equal(len(a < 1), 10)
        np.testing.assert_equal(len(a > 2), 10)

        np.testing.assert_equal(len(a < b), 10)
        np.testing.assert_equal(len(a > b), 10)
Example #9
0
def testShapefunctions():
    poly = g.Mesh(3)
    n0 = poly.createNode(0.0, 0.0, 0.0)
    n1 = poly.createNode(1.0, 0.0, 0.0)
    n2 = poly.createNode(0.0, 1.0, 0.0)
    n3 = poly.createNode(0.0, 0.0, 0.5)

    poly.createTetrahedron(n0, n1, n2, n3)
    prism = poly.createP2Mesh()

    u = g.RVector(prism.nodeCount(), 0.0)
    u[5] = 1.0
    prism.addExportData("u", u)

    #n3 = poly.createNode( 1.0, 1.0, 0.0 )
    #poly.createTriangle( n0, n1, n3 )
    #prism = g.createMesh3D( poly, g.asvector( np.linspace( 0, -1, 2 ) ) )
    #prism.exportVTK( "prism" )

    #mesh2 = g.createMesh2D( g.asvector( np.linspace( 0, 1, 10 ) ),
    #g.asvector( np.linspace( 0, 1, 10 ) ) )

    #mesh2 = mesh2.createH2Mesh()

    #g.interpolate( prism, mesh2 )

    #ax = g.viewer.showMesh( mesh2, mesh2.exportData('u'), filled = True, showLater = True )

    mesh3 = g.createMesh3D(g.asvector(np.linspace(0, 1, 11)),
                           g.asvector(np.linspace(0, 1, 11)),
                           g.asvector(np.linspace(0, 1, 11)))

    grads = g.stdVectorRVector3()
    c = prism.cell(0)
    uc = g.RVector(mesh3.nodeCount())

    for n in mesh3.nodes():
        p = c.shape().xyz(n.pos())

        if not c.shape().isInside(p):
            grads.append(g.RVector3(0.0, 0.0, 0.0))
            uc[n.id()] = 0.0
            continue

        uc[n.id()] = c.pot(p, u)
        print uc[n.id()]
        gr = c.grad(p, u)
        grads.append(gr)

    g.interpolate(prism, mesh3)
    mesh3.addExportData('ua', uc)

    mesh3.exportVTK("prismHex", grads)

    P.show()
Example #10
0
    def test_ListToRVector(self):
        '''
            custom_rvalue.cpp
        '''
        l = [1.0, 2.0, 3.0, 4.0]
        a = pg.RVector(l)
        self.assertEqual(a.size(), len(l))
        self.assertEqual(pg.sum(a), sum(l))

        l = (0.2, 0.3, 0.4, 0.5, 0.6)
        x = pg.RVector(l)
        self.assertEqual(x.size(), len(l))
Example #11
0
def iterateBounds(inv, dchi2=0.5, maxiter=100, change=1.02):
    """Find parameter bounds by iterating model parameter.

    Find parameter bounds by iterating model parameter until error
    bound is reached

    Parameters
    ----------
    inv :
        gimli inversion object
    dchi2 :
        allowed variation of chi^2 values [0.5]
    maxiter :
        maximum iteration number for parameter iteration [100]
    change:
        changing factor of parameters [1.02, i.e. 2%]
    """
    f = inv.forwardOperator()

    model = inv.model()
    resp = inv.response()

    nd, nm = len(resp), len(model)
    modelU = np.zeros(nm)
    modelL = np.zeros(nm)
    maxchi2 = inv.chi2() + dchi2

    for im in range(nm):
        model1 = pg.RVector(model)
        chi2 = .0
        it = 0

        while (chi2 < maxchi2) & (it < maxiter):
            it += 1
            model1[im] *= change
            resp1 = f(model1)
            chi2 = inv.getPhiD(resp1) / nd

        modelU[im] = model1[im]

        model2 = pg.RVector(model)
        chi2 = 0.0
        it = 0

        while (chi2 < maxchi2) & (it < maxiter):
            it += 1
            model2[im] /= change
            resp2 = f(model2)
            chi2 = inv.getPhiD(resp2) / nd

        modelL[im] = model2[im]

    return modelL, modelU
Example #12
0
    def test_NumpyToRVector(self):
        '''
            custom_rvalue.cpp
        '''
        x = np.arange(0, 1., 0.2)
        a = pg.RVector(x)
        self.assertEqual(a.size(), len(x))
        self.assertEqual(pg.sum(a), sum(x))

        x = np.arange(0, 1., 0.2, dtype=np.float64)
        a = pg.RVector(x)
        self.assertEqual(a.size(), len(x))
        self.assertEqual(pg.sum(a), sum(x))
Example #13
0
    def test_Performance(self):
        """
        """
        #pg.setDebug(True)

        sw = pg.Stopwatch(True)
        #print(timeit.repeat('r = grid.cellSizes() * np1', setup=setup, number=1000))
        #print(timeit.repeat('r = c * np1', setup=setup, number=1000))

        print(
            ("np(np)", timeit.repeat('np.array(np1)', setup=setup,
                                     number=5000)))
        print(("pg(pg)",
               timeit.repeat('pg.RVector(pg1)', setup=setup, number=5000)))
        print(("pg(np)",
               timeit.repeat('pg.RVector(np1)', setup=setup, number=5000)))
        print(
            ("np(pg)", timeit.repeat('np.array(pg1)', setup=setup,
                                     number=5000)))
        print(("np * np",
               timeit.repeat('np3 = np1 * np2', setup=setup, number=5000)))
        print(("pg * pg",
               timeit.repeat('pg3 = pg1 * pg2', setup=setup, number=5000)))
        print(("pg * np", timeit.repeat('pg1 * np1', setup=setup,
                                        number=5000)))
        print(("np * pg", timeit.repeat('np1 * pg1', setup=setup,
                                        number=5000)))
        print(("sum(np)", timeit.repeat('sum(np1)', setup=setup, number=300)))
        print(("sum(pg)", timeit.repeat('sum(pg1)', setup=setup, number=300)))
        print(
            ("pg.sum(pg)", timeit.repeat('pg.sum(pg1)',
                                         setup=setup,
                                         number=300)))
        print(
            ("pg.sum(st)", timeit.repeat('pg.sum(st)', setup=setup,
                                         number=300)))

        print(("s", sw.duration(True)))

        N = 10001
        np1 = np.linspace(1.1, 1.2, N)
        np2 = np.linspace(2.1, 2.1, N)

        pg1 = pg.RVector(np1)
        pg2 = pg.RVector(np2)

        # print(sw.duration(True))
        print((sum(np1 * np1)))
        print((sum(pg1 * pg1)))
        print((sum(np1 * pg1)))
        print((sum(pg1 * np1)))
Example #14
0
    def test_ListToRVector(self):
        """ implemented in custom_rvalue.cpp"""
        l = [1.0, 2.0, 3.0, 4.0]
        a = pg.RVector(l)
        self.assertEqual(a.size(), len(l))
        self.assertEqual(pg.sum(a), sum(l))

        l = (0.2, 0.3, 0.4, 0.5, 0.6)
        x = pg.RVector(l)
        self.assertEqual(x.size(), len(l))

        l = [1, 2, 3]
        x = pg.RVector(l)
        self.assertEqual(x.size(), len(l))
Example #15
0
    def __init__(self, fop, data, error, startmodel, lam=20, beta=10000,
                 maxIter=50, fwmin=0, fwmax=1, fimin=0, fimax=1, famin=0,
                 famax=1, frmin=0, frmax=1):
        LSQRInversion.__init__(self, data, fop, verbose=True, dosave=True)
        self._error = pg.RVector(error)

        # Set data transformations
        self.logtrans = pg.RTransLog()
        self.trans = pg.RTrans()
        self.dcumtrans = pg.RTransCumulative()
        self.dcumtrans.add(self.trans,
                           self.forwardOperator().RST.dataContainer.size())
        self.dcumtrans.add(self.logtrans,
                           self.forwardOperator().ERT.data.size())
        self.setTransData(self.dcumtrans)

        # Set model transformation
        n = self.forwardOperator().cellCount
        self.mcumtrans = pg.TransCumulative()
        self.transforms = []
        phase_limits = [[fwmin, fwmax], [fimin, fimax],
                        [famin, famax], [frmin, frmax]]
        for i, (lower, upper) in enumerate(phase_limits):
            if lower == 0:
                lower = 0.001
            self.transforms.append(pg.RTransLogLU(lower, upper))
            self.mcumtrans.add(self.transforms[i], n)

        self.setTransModel(self.mcumtrans)

        # Set error
        self.setRelativeError(self._error)

        # Set some defaults

        # Set maximum number of iterations (default is 20)
        self.setMaxIter(maxIter)

        # Regularization strength
        self.setLambda(lam)
        self.setDeltaPhiAbortPercent(0.25)

        fop = self.forwardOperator()
        fop.createConstraints()  # Important!
        ones = pg.RVector(fop._I.rows(), 1.0)
        phiVec = pg.cat(ones, startmodel)
        self.setParameterConstraints(fop._G, phiVec, beta)
        self.setModel(startmodel)
Example #16
0
    def simulate(mesh, res, scheme, verbose=False, **kwargs):
        """Forward calculation vor given mesh, data and resistivity."""
        fop = ERTModelling(verbose=verbose)
        # fop = ERTManager.createFOP(verbose=verbose)

        fop.setData(scheme)
        fop.setMesh(mesh, ignoreRegionManager=True)

        if not scheme.allNonZero('k'):
            scheme.set('k', pg.RVector(scheme.size(), -1))

        rhoa = None
        isArrayData = None

        if hasattr(res[0], '__iter__'):
            isArrayData = True
            rhoa = np.zeros((len(res), scheme.size()))
            for i, r in enumerate(res):
                rhoa[i] = fop.response(r)
        else:
            rhoa = fop.response(res)

        noiseLevel = kwargs.pop('noiseLevel', 0.0)
        if noiseLevel > 0:
            err = kwargs.pop('noiseLevel',
                             0.03) + kwargs.pop('noiseAbs', 1e-4) / rhoa
            scheme.set('err', err)
            rhoa *= 1. + pg.randn(scheme.size()) * err

            if not isArrayData:
                scheme.set('rhoa', rhoa)

        if kwargs.pop('returnArray', False):
            return rhoa
        return scheme
Example #17
0
    def test_Interpolate(self):
        grid = pg.createGrid(x=[0.0, 1.0], y=[0.0, 1.0])
        u = pg.RVector(grid.nodeCount(), 1.)

        # test with pg.interpolate
        queryPos = [0.2, 0.2]
        uI = pg.interpolate(srcMesh=grid,
                            inVec=u,
                            destPos=[queryPos, queryPos])

        np.testing.assert_allclose(uI[0], 1.)

        # test manual interpolation
        c = grid.findCell(queryPos)
        uI = c.pot(queryPos, u)
        np.testing.assert_allclose(uI, 1.)

        # test with manual interpolationMatrix generation
        I = pg.RSparseMapMatrix(1, grid.nodeCount())
        cI = c.N(c.shape().rst(queryPos))
        for i in range(c.nodeCount()):
            I.addVal(0, c.node(i).id(), cI[i])

        uI = I.mult(u)
        np.testing.assert_allclose(uI[0], 1)

        # test with automatic interpolationMatrix generation
        I = grid.interpolationMatrix([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0],
                                      [0.0, 1.0]])
        uI = I * u
        np.testing.assert_allclose(uI, u)

        # api test https://github.com/gimli-org/gimli/issues/131
        x = np.linspace(grid.xmin(), grid.xmax(), 11)
        np.testing.assert_allclose(pg.interpolate(grid, pg.x(grid), x), x)
        np.testing.assert_allclose(
            pg.interpolate(grid, pg.x(grid.positions()), x=x), x)
        np.testing.assert_allclose(
            pg.interpolate(grid, pg.x(grid.positions()), x, x * 0.), x)
        np.testing.assert_allclose(
            pg.interpolate(grid, pg.x(grid.positions()), x=x, y=x * 0), x)
        np.testing.assert_allclose(
            pg.interpolate(grid, pg.x(grid.positions()), x, x * 0, x * 0), x)
        np.testing.assert_allclose(
            pg.interpolate(grid, pg.x(grid.positions()), x=x, y=x * 0,
                           z=x * 0), x)
        x = pg.Vector(x)
        np.testing.assert_allclose(
            pg.interpolate(grid, pg.x(grid.positions()), x), x)
        np.testing.assert_allclose(
            pg.interpolate(grid, pg.x(grid.positions()), x=x), x)
        np.testing.assert_allclose(
            pg.interpolate(grid, pg.x(grid.positions()), x, x * 0.), x)
        np.testing.assert_allclose(
            pg.interpolate(grid, pg.x(grid.positions()), x=x, y=x * 0), x)
        np.testing.assert_allclose(
            pg.interpolate(grid, pg.x(grid.positions()), x, x * 0, x * 0), x)
        np.testing.assert_allclose(
            pg.interpolate(grid, pg.x(grid.positions()), x=x, y=x * 0,
                           z=x * 0), x)
Example #18
0
def modelCavity1(maxArea=0.0025):
    boundary = []
    boundary.append([-1.0, -1.0])
    boundary.append([-0.5, -1.0])
    boundary.append([-0.5, -0.7])
    boundary.append([0.5, -0.7])
    boundary.append([0.5, -1.0])
    boundary.append([1.0, -1.0])
    boundary.append([1.0, 1.0])
    boundary.append([-1.0, 1.0])

    poly = pg.Mesh(2)
    nodes = [poly.createNode(b) for b in boundary]

    polyCreateDefaultEdges_(poly, boundaryMarker=[4, 4, 4, 4, 4, 2, 3, 1])
    mesh = createMesh(poly, quality=33.4, area=maxArea, smooth=[0, 10])

    # Diffusions coefficient, viscosity

    b7 = mesh.findBoundaryByMarker(1)[0]
    for b in mesh.findBoundaryByMarker(1):
        if b.center()[1] < b.center()[1]:
            b7 = b
    b7.setMarker(7)

    velBoundary = [[1, [0.0, 0.0]], [2, [0.0, 0.0]], [3, [1.0, 0.0]],
                   [4, [0.0, 0.0]], [7, [0.0, 0.0]]]

    preBoundary = [[7, 0.0]]

    a = pg.RVector(mesh.cellCount(), 10000.0)
    return mesh, velBoundary, preBoundary, a, 100
Example #19
0
def grange(start, end, dx=0, n=0, log=False):
    """Create array with possible increasing spacing.

    Create either array from start step-wise filled with dx until end reached
    [start, end] (like np.array with defined end).
    Fill the array from start to end with n steps.
    [start, end] (like np.linespace)
    Fill the array from start to end with n steps but logarithmic increasing,
    dx will be ignored.

    Parameters
    ----------
    start: float
        First value of the resulting array
    end: float
        Last value of the resulting array
    dx: float
        Linear step length, n will be ignored
    n: int
        Amount of steps
    log: bool

    Examples
    --------
    >>> from pygimli.utils import grange
    >>> v1 = grange(start=0, end=10, dx=3)
    >>> v2 = grange(start=0, end=10, n=3)
    >>> print(v1)
    4 [0.0, 3.0, 6.0, 9.0]
    >>> print(v2)
    3 [0.0, 5.0, 10.0]

    Returns
    -------
    ret: :gimliapi:`GIMLI::RVector`
        Return resulting array
    """
    s = float(start)
    e = float(end)
    d = float(dx)

    if dx != 0:
        if end < start and dx > 0:
            # print("grange: decreasing range but increasing dx, swap dx sign")
            d = -d
        if end > start and dx < 0:
            # print("grange: increasing range but decreasing dx, swap dx sign")
            d = -d
        ret = pg.RVector(range(int(floor(abs((e - s) / d)) + 1)))
        ret *= d
        ret += s
        return ret

    elif n > 0:
        if not log:
            return grange(start, end, dx=(e - s) / (n - 1))
        else:
            return pg.increasingRange(start, end, n)
    else:
        raise Exception('Either dx or n have to be given.')
Example #20
0
    def response(self, par):
        """Yield response (function value for given coefficients)."""
        y = pg.RVector(self.x_.size(), par[0])

        for i in range(1, self.nc_):
            y += pg.pow(self.x_, i) * par[i]
        return y
Example #21
0
def sparseMatrix2coo(A):
    """Convert SparseMatrix to scipy.coo_matrix.

    Parameters
    ----------
    A: pg.SparseMapMatrix | pg.SparseMatrix
        Matrix to convert from.

    Returns
    -------
    mat: scipy.coo_matrix
        Matrix to convert into.
    """
    from scipy.sparse import coo_matrix
    vals = pg.RVector()
    rows = pg.IndexArray([0])
    cols = pg.IndexArray([0])
    if isinstance(A, pg.SparseMatrix):
        C = pg.RSparseMapMatrix(A)
        C.fillArrays(vals=vals, rows=rows, cols=cols)
        return coo_matrix(vals, (rows, cols))
    elif isinstance(A, pg.SparseMapMatrix):
        A.fillArrays(vals, rows, cols)
        return coo_matrix(vals, (rows, cols))

    return coo_matrix(A)
Example #22
0
def divergence(mesh, F):
    div = pg.RVector(mesh.cellCount())

    for c in mesh.cells():
        div[c.id()] = divergenceCell(c, F)

    return div
Example #23
0
def drawSeismogramm(axes, mesh, u, ids, dt, i=None):
    r"""Extract and show time series from wave field

    Parameters
    ----------
    """
    axes.set_xlim(-20., 20.)
    axes.set_ylim(0., dt * len(u) * 1000)
    axes.set_aspect(1)
    axes.set_ylabel('Time in ms')

    if i is None:
        i = len(u) - 1

    t = np.linspace(0, i * dt * 1000, i + 1)

    for iw, n in enumerate(ids):
        pos = mesh.node(n).pos()
        print(pos)
        axes.plot(pos[0], 0.05, '^', color='black')

        trace = pg.cat(pg.RVector(0), u[:(i + 1), n])
        #        print(i+1, n)
        #        print(trace, (max(pg.abs(trace))))

        #        if max(pg.abs(trace)) > 1e-8:

        trace *= np.exp(0.5 * t)
        trace /= (max(pg.abs(trace)) * 1.5)

        drawWiggle(axes, trace, t=t, xoffset=pos[0])
    axes.invert_yaxis()
Example #24
0
def solveAdvection(mesh, vel, times, diffusion, verbose=False):
    """Solve Diffusion/Advection equation"""
    if verbose:
        print("Solve for concentration movement on", len(times),
              "time steps ...")

    S = pg.RVector(mesh.cellCount(), 0.0)
    injectPos = [-19.1, -4.6]
    sourceCell = mesh.findCell(injectPos)
    S[sourceCell.id()] = 1.0 / sourceCell.size()

    t = times[:int(len(times) / 2)]
    t = np.linspace(t[0], t[-1], len(t))

    c1 = pg.solver.solveFiniteVolume(mesh,
                                     a=diffusion,
                                     f=S,
                                     vel=vel,
                                     times=t,
                                     uB=[1, 0],
                                     scheme='PS',
                                     verbose=0)

    c2 = pg.solver.solveFiniteVolume(mesh,
                                     a=diffusion,
                                     f=0.,
                                     vel=vel,
                                     times=t,
                                     uB=[1, 0],
                                     u0=c1[-1],
                                     scheme='PS',
                                     verbose=0)
    c = np.vstack((c1, c2))

    return c[::]
Example #25
0
    def test_RVectorOP(self):
        v = pg.RVector(5, 1.0)

        self.assertEqual(sum(v + 1), 10)
        self.assertEqual(sum(v - 2), -5)
        self.assertEqual(sum(v * 2), 10)
        self.assertEqual(sum(v / 1), 5)
        self.assertEqual(sum(1 + v), 10)
        self.assertEqual(sum(-1 - v), -10)
        self.assertEqual(sum(2 * v), 10)
        self.assertEqual(sum(1 / v), 5)
        self.assertEqual(sum(v + 1.0), 10)
        self.assertEqual(sum(v - 2.0), -5)
        self.assertEqual(sum(v * 2.0), 10)
        self.assertEqual(sum(v / 1.0), 5)
        self.assertEqual(sum(1.0 + v), 10)
        self.assertEqual(sum(-1.0 - v), -10)
        self.assertEqual(sum(2.0 * v), 10)
        self.assertEqual(sum(1.0 / v), 5)

        v2 = np.ones(len(v)) * 0.01
        # check pg * np
        self.assertEqual(sum(v * v2), 5 * 0.01)
        # check np * pg
        self.assertEqual(sum(v2 * v), 5 * 0.01)
Example #26
0
    def test_Interpolate(self):
        grid = pg.createGrid(x=[0.0, 1.0], y=[0.0, 1.0])
        u = pg.RVector(grid.nodeCount(), 1.)

        # test with pg.interpolate
        queryPos = [0.2, 0.2]
        uI = pg.interpolate(srcMesh=grid,
                            inVec=u,
                            destPos=[queryPos, queryPos])

        np.testing.assert_allclose(uI[0], 1.)

        # test manual interpolation
        c = grid.findCell(queryPos)
        uI = c.pot(queryPos, u)
        np.testing.assert_allclose(uI, 1.)

        # test with manual interpolationMatrix generation
        I = pg.RSparseMapMatrix(1, grid.nodeCount())
        cI = c.N(c.shape().rst(queryPos))
        for i in range(c.nodeCount()):
            I.addVal(0, c.node(i).id(), cI[i])

        uI = I.mult(u)
        np.testing.assert_allclose(uI[0], 1)

        # test with automatic interpolationMatrix generation
        I = grid.interpolationMatrix([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0],
                                      [0.0, 1.0]])
        uI = I * u
        np.testing.assert_allclose(uI, u)
Example #27
0
def showSF(N, label=''):
    fig = P.figure()
    ax1 = fig.add_subplot(1, 2, 1, projection='3d')
    ax2 = fig.add_subplot(1, 2, 2)

    Nx = 21
    Ny = 21
    nLevels = 12

    tix = P.linspace(0.0, 1.0, Nx)
    tiy = P.linspace(0.0, 1.0, Ny)
    (X, Y) = P.meshgrid(tix, tiy)
    z = g.RVector(len(X.flat))

    for i, x in enumerate(X.flat):
        p = g.RVector3(X.flat[i], Y.flat[i])
        z[i] = N(p)

    Z = P.ma.masked_where(z == -99., z)
    Z = Z.reshape(Ny, Nx)

    ax2.contourf(X, Y, Z)
    ax2.set_aspect('equal')
    surf = ax1.plot_surface(X,
                            Y,
                            Z,
                            rstride=1,
                            cstride=1,
                            cmap=P.cm.jet,
                            linewidth=0)

    ax2.set_title(label + N.__str__())
    fig.colorbar(surf)
Example #28
0
def fitCCC(f,
           amp,
           phi,
           eRho=0.01,
           ePhi=0.001,
           lam=1000.,
           mstart=None,
           taupar=(1e-2, 1e-5, 100),
           cpar=(0.5, 0, 1)):
    """Fit complex spectrum by Cole-Cole model."""
    fCC = ColeColeComplex(f)
    tLog = pg.RTransLog()
    fCC.region(0).setStartValue(max(amp))
    if mstart is None:  # compute from amplitude decay
        mstart = 1. - min(amp) / max(amp)
    fCC.region(1).setParameters(mstart, 0, 1)  # m (start,lower,upper)
    fCC.region(2).setParameters(*taupar)  # tau
    fCC.region(3).setParameters(*cpar)  # c
    data = pg.cat(amp, phi)
    ICC = pg.RInversion(data, fCC, False)  # set up inversion class
    ICC.setTransModel(tLog)
    error = pg.cat(eRho * amp, pg.RVector(len(f), ePhi))
    ICC.setAbsoluteError(error)  # perr + ePhi/data)
    ICC.setLambda(lam)  # start with large damping and cool later
    ICC.setMarquardtScheme(0.8)  # lower lambda by 20%/it., no stop chi=1
    model = np.asarray(ICC.run())  # run inversion
    ICC.echoStatus()
    response = np.asarray(ICC.response())
    return model, response[:len(f)], response[len(f):]
Example #29
0
def randN(n, minVal=0.0, maxVal=1.0):
    """Create RVector of length n with normally distributed random numbers."""
    r = pg.RVector(n)
    pg.randn(r)
    r *= (maxVal-minVal)
    r += minVal
    return r
Example #30
0
    def responseDirect(self, model):
        y = pg.RVector(len(self.x_), model[0])

        for i in range(1, self.nc_):
            y += pg.pow(self.x_, i) * model[i]

        return y