Пример #1
0
    def test_DataContainerFilter(self):
        """
        """
        data = pg.DataContainer()
        data.resize(5)

        data.markValid([0, 4])
        self.assertEqual(data('valid'), [1.0, 0.0, 0.0, 0.0, 1.0])

        data.markInvalid(pg.IndexArray(np.arange(5, dtype="long")))
        self.assertEqual(data('valid'), [0.0, 0.0, 0.0, 0.0, 0.0])
        
        data.markValid(np.arange(5, dtype="long"))
        self.assertEqual(data('valid'), [1.0, 1.0, 1.0, 1.0, 1.0])
        
        data.markInvalid(range(5))
        self.assertEqual(data('valid'), [0.0, 0.0, 0.0, 0.0, 0.0])

        x = np.arange(5, dtype='float')

        data.markValid(pg.Vector(x) > 2.0)
        self.assertEqual(data('valid'), [0.0, 0.0, 0.0, 1.0, 1.0])
        
        data.markValid(pg.BVector(x < 2.0))
        self.assertEqual(data('valid'), [1.0, 1.0, 0.0, 1.0, 1.0])

        data.markInvalid(pg.find(x > 3.0))
        self.assertEqual(data('valid'), [1.0, 1.0, 0.0, 1.0, 0.0])
        
        data.markInvalid(x < 1.0)
        self.assertEqual(data('valid'), [0.0, 1.0, 0.0, 1.0, 0.0])
Пример #2
0
    def test_DataContainerFilter(self):
        """
        """
        data = pg.DataContainer()
        data.resize(5)

        data.markValid([0, 4])
        self.assertEqual(data('valid'), [1.0, 0.0, 0.0, 0.0, 1.0])

        data.markInvalid(pg.IndexArray(np.arange(5, dtype="long")))
        self.assertEqual(data('valid'), [0.0, 0.0, 0.0, 0.0, 0.0])

        data.markValid(np.arange(5, dtype="long"))
        self.assertEqual(data('valid'), [1.0, 1.0, 1.0, 1.0, 1.0])

        data.markInvalid(range(5))
        self.assertEqual(data('valid'), [0.0, 0.0, 0.0, 0.0, 0.0])

        x = np.arange(5, dtype='float')

        data.markValid(pg.Vector(x) > 2.0)
        self.assertEqual(data('valid'), [0.0, 0.0, 0.0, 1.0, 1.0])

        data.markValid(pg.BVector(x < 2.0))
        self.assertEqual(data('valid'), [1.0, 1.0, 0.0, 1.0, 1.0])

        data.markInvalid(pg.find(x > 3.0))
        self.assertEqual(data('valid'), [1.0, 1.0, 0.0, 1.0, 0.0])

        data.markInvalid(x < 1.0)
        self.assertEqual(data('valid'), [0.0, 1.0, 0.0, 1.0, 0.0])
Пример #3
0
    def test_createPartMesh(self):
        mesh = pg.meshtools.createMesh1D(np.linspace(0, 1, 10))
        self.assertEqual(mesh.cellCount(), 9)

        mesh2 = mesh.createMeshByCellIdx(
            pg.find(pg.x(mesh.cellCenters()) < 0.5))
        self.assertEqual(mesh2.cellCount(), 4)
        self.assertEqual(mesh2.cellCenters()[-1][0] < 0.5, True)
Пример #4
0
    def test_createPartMesh(self):
        mesh = pg.createMesh1D(np.linspace(0, 1, 10))
        self.assertEqual(mesh.cellCount(), 9)

        mesh2 = mesh.createMeshByCellIdx(
            pg.find(pg.x(mesh.cellCenters()) < 0.5))
        self.assertEqual(mesh2.cellCount(), 4)
        self.assertEqual(mesh2.cellCenters()[-1][0] < 0.5, True)
Пример #5
0
def parseMapToCellArray(attributeMap, mesh, default=0.0):
    """
    Parse a value map to cell attributes.

    A map should consist of pairs of marker and value.
    A marker is an integer and corresponds to the cell.marker().

    Parameters
    ----------
    mesh : :gimliapi:`GIMLI::Mesh`
        For each cell of mesh a value will be returned.

    attributeMap : list | dict
        List of pairs [marker, value] ] || [[marker, value]],
        or dictionary with marker keys

    default : float [0.0]
        Fill all unmapped atts to this default.

    Returns
    -------
    atts : array
        Array of length mesh.cellCount()
    """

    atts = pg.RVector(mesh.cellCount(), default)

    if isinstance(attributeMap, dict):
        raise Exception("Please implement me!")
    elif hasattr(attributeMap, '__len__'):
        if not hasattr(attributeMap[0], '__len__'):
            # assuming [marker, value]
            attributeMap = [attributeMap]

        for pair in attributeMap:
            if hasattr(pair, '__len__'):
                idx = pg.find(mesh.cellMarkers() == pair[0])
                if len(idx) == 0:
                    print("Warning! parseMapToCellArray: cannot find marker " +
                          str(pair[0]) + " within mesh.")
                else:
                    #print('---------------------')
                    #print(atts, idx, pair[1], type(pair[1]), float(pair[1]))
                    if isinstance(pair[1], np.complex):
                        #print('+++++++++++++++++')
                        if not isinstance(atts, pg.CVector):
                            atts = pg.toComplex(atts)
                        atts[idx] = pair[1]
                    else:
                        atts[idx] = float(pair[1])
            else:
                raise Exception("Please provide a list of [int, value] pairs" +
                                str(pair))
    else:
        print("attributeMap: ", attributeMap)
        raise Exception("Cannot interpret attributeMap!")

    return atts
Пример #6
0
def calcInvBlock(mesh, dens, out='gravInv'):

    # extract block delta density
    densBlock = pg.RVector(dens)
    densMarker2 = dens[pg.find(mesh.cellMarker() == 2)[0]]
#    densBlock[(mesh.cellMarker() == 1)|(mesh.cellMarker() == 3)] = densMarker2
    densBlock[pg.find((mesh.cellMarker() == 1) | (mesh.cellMarker() == 3))] = \
        densMarker2
    densBlock -= densMarker2

    # define meausrement positions
    gravPointsX = np.linspace(-20, 20, 41)
    sensorPositions = np.vstack((gravPointsX, np.zeros(len(gravPointsX)))).T

    # solve analytical
    gz = solveGravimetry(mesh, densBlock, pnts=sensorPositions, complete=False)

    # noisyfy
    errAbs = 0.00001
    dzerr = np.random.randn(len(sensorPositions)) * errAbs
    gz = gz + dzerr

    # createParamesh
    paraMesh = pg.createGrid(x=np.linspace(-20, 20, 41),
                             y=np.linspace(-20, 0, 21))

    # init Gravimetry manager (should do meshing, simulation and noisying)
    Grav = Gravimetry(verbose=True)

    model = Grav.invert(sensorPositions, gz, errAbs, verbose=1, mesh=paraMesh)

    fig, ax = plt.subplots()
    ax.plot(pg.x(sensorPositions), gz, label='gz')
    ax.plot(pg.x(sensorPositions), Grav.inv.response(), label='response')
    ax.legend()
    ax.grid()
    ax.set_xlabel('$x$ [m]')
    ax.set_ylabel('$\partial u / \partial z$ [mGal]')
    plt.show(block=False)
    ax.figure.savefig(out, bbox_inches='tight')

    return Grav, densBlock
Пример #7
0
def calcInvBlock(mesh, dens, out='gravInv'):

    # extract block delta density
    densBlock = pg.RVector(dens)
    densMarker2 = dens[pg.find(mesh.cellMarker() == 2)[0]]
    #densBlock[(mesh.cellMarker() == 1) | (mesh.cellMarker() == 3)] = densMarker2
    densBlock[pg.find((mesh.cellMarker() == 1)
                      | (mesh.cellMarker() == 3))] = densMarker2
    densBlock -= densMarker2

    # define meausrement positions
    gravPointsX = np.linspace(-20, 20, 41)
    sensorPositions = np.vstack((gravPointsX, np.zeros(len(gravPointsX)))).T

    # solve analytical
    gz = solveGravimetry(mesh, densBlock, pnts=sensorPositions, complete=False)

    # noisyfy
    errAbs = 0.00001
    dzerr = np.random.randn(len(sensorPositions)) * errAbs
    gz = gz + dzerr

    # createParamesh
    paraMesh = pg.createGrid(x=np.linspace(-20, 20, 41),
                             y=np.linspace(-20, 0, 21))

    # init Gravimetry manager (should do meshing, simulation and noisying)
    Grav = Gravimetry(verbose=True)

    model = Grav.invert(sensorPositions, gz, errAbs, verbose=1, mesh=paraMesh)

    fig, ax = plt.subplots()
    ax.plot(pg.x(sensorPositions), gz, label='gz')
    ax.plot(pg.x(sensorPositions), Grav.inv.response(), label='response')
    ax.legend()
    ax.grid()
    ax.set_xlabel('$x$ [m]')
    ax.set_ylabel('$\partial u / \partial z$ [mGal]')
    plt.show(block=False)
    ax.figure.savefig(out, bbox_inches='tight')

    return Grav, densBlock
Пример #8
0
def drawTravelTimeData(ax, data, t=None):
    """Draw first arrival traveltime data into mpl ax a.

    data of type pg.DataContainer must contain sensorIdx 's' and 'g'
    and thus being numbered internally [0..n)
    """
    x = pg.x(data.sensorPositions())
    # z = pg.z(data.sensorPositions())

    shots = pg.unique(pg.sort(data('s')))
    geoph = pg.unique(pg.sort(data('g')))

    startOffsetIDX = 0

    if min(min(shots), min(geoph)) == 1:
        startOffsetIDX = 1

    tShow = data('t')
    if t is not None:
        tShow = t

    ax.set_xlim([min(x), max(x)])
    ax.set_ylim([max(tShow), -0.002])
    ax.figure.show()

    for shot in shots:
        gIdx = pg.find(data('s') == shot)
        sensorIdx = [int(i__ - startOffsetIDX) for i__ in data('g')[gIdx]]
        ax.plot(x[sensorIdx], tShow[gIdx], 'x-')

    yPixel = ax.transData.inverted().transform_point((1, 1))[1] - \
        ax.transData.inverted().transform_point((0, 0))[1]
    xPixel = ax.transData.inverted().transform_point((1, 1))[0] - \
        ax.transData.inverted().transform_point((0, 0))[0]

    # draw shot points
    ax.plot(x[[int(i__ - startOffsetIDX) for i__ in shots]],
            np.zeros(len(shots)) + 8. * yPixel,
            'gv',
            markersize=8)

    # draw geophone points
    ax.plot(x[[int(i__ - startOffsetIDX) for i__ in geoph]],
            np.zeros(len(geoph)) + 3. * yPixel,
            'r^',
            markersize=8)

    ax.grid()
    ax.set_ylim([max(tShow), +16. * yPixel])
    ax.set_xlim([min(x) - 5. * xPixel, max(x) + 5. * xPixel])

    ax.set_xlabel('x-Coordinate [m]')
    ax.set_ylabel('Traveltime [ms]')
Пример #9
0
    def checkData(self, data=None):
        """Return data from container.

        THINKABOUT: Data will be changed, or should the manager keep a copy?
        """
        data = data or pg.DataContainerERT(self.data)
        if isinstance(data, pg.DataContainer):
            if not data.allNonZero('k'):
                pg.warn("Data file contains no geometric factors (token='k').")
                data['k'] = createGeometricFactors(data, verbose=True)
            if self.fop.complex():
                if not data.haveData('rhoa'):
                    pg.critical('Datacontainer have no "rhoa" values.')
                if not data.haveData('ip'):
                    pg.critical('Datacontainer have no "ip" values.')

                # pg.warn('check sign of phases')
                rhoa = data['rhoa']
                phia = -data['ip'] / 1000  # 'ip' is defined for neg mrad.
                # we should think about some 'phia' in rad

                return pg.utils.squeezeComplex(pg.utils.toComplex(rhoa, phia))

            else:
                if not data.haveData('rhoa'):
                    if data.allNonZero('r'):
                        pg.info("Creating apparent resistivies from "
                                "impedences rhoa = r * k")
                        data['rhoa'] = data['r'] * data['k']
                    elif data.allNonZero('u') and data.allNonZero('i'):
                        pg.info("Creating apparent resistivies from "
                                "voltage and currrent rhoa = u/i * k")
                        data['rhoa'] = data['u'] / data['i'] * data['k']
                    else:
                        pg.critical("Datacontainer have neither: "
                                    "apparent resistivies 'rhoa', "
                                    "or impedances 'r', "
                                    "or voltage 'u' along with current 'i'.")

                if any(data['rhoa'] < 0) and \
                        isinstance(self.inv.dataTrans, pg.core.TransLog):
                    print(pg.find(data['rhoa'] < 0))
                    print(data['rhoa'][data['rhoa'] < 0])
                    pg.critical("Found negative apparent resistivities. "
                                "These can't be processed with logarithmic "
                                "data transformation. You should consider to "
                                "filter them out using "
                                "data.remove(data['rhoa'] < 0).")

                return data['rhoa']

        return data
Пример #10
0
def parseMapToCellArray(attributeMap, mesh, default=0.0):
    """
    Parse a value map to cell attributes.

    A map should consist of pairs of marker and value.
    A marker is an integer and corresponds to the cell.marker().

    Parameters
    ----------
    mesh : :gimliapi:`GIMLI::Mesh`
        For each cell of mesh a value will be returned.

    attributeMap : list | dict
        List of pairs [marker, value] ] || [[marker, value]],
        or dictionary with marker keys

    default : float [0.0]
        Fill all unmapped atts to this default.

    Returns
    -------
    atts : array
        Array of length mesh.cellCount()
    """

    atts = pg.RVector(mesh.cellCount(), default)

    if isinstance(attributeMap, dict):
        raise Exception("Please implement me!")
    elif hasattr(attributeMap, '__len__'):
        if not hasattr(attributeMap[0], '__len__'):
            # assuming [marker, value]
            attributeMap = [attributeMap]

        for pair in attributeMap:
            if hasattr(pair, '__len__'):
                idx = pg.find(mesh.cellMarker() == pair[0])
                if len(idx) == 0:
                    print("Warning! parseMapToCellArray: cannot find marker " +
                          str(pair[0]) + " within mesh.")
                else:
                    #print(atts, idx, pair[1], float(pair[1]))
                    atts[idx] = float(pair[1])
            else:
                raise Exception("Please provide a list of [int, value] pairs!" +
                                str(pair))
    else:
        print("attributeMap: ", attributeMap)
        raise Exception("Cannot interpret attributeMap!")

    return atts
Пример #11
0
def drawTravelTimeData(axes, data, t=None):
    """
        Draw first arrival traveltime data into mpl axes a.
        data of type \ref DataContainer must contain sensorIdx 's' and 'g'
        and thus being numbered internally [0..n)
    """

    x = pg.x(data.sensorPositions())
#    z = pg.z(data.sensorPositions())

    shots = pg.unique(pg.sort(data('s')))
    geoph = pg.unique(pg.sort(data('g')))

    startOffsetIDX = 0

    if min(min(shots), min(geoph)) == 1:
        startOffsetIDX = 1

    tShow = data('t')
    if t is not None:
        tShow = t

    axes.set_xlim([min(x), max(x)])
    axes.set_ylim([max(tShow), -0.002])
    axes.figure.show()

    for shot in shots:
        gIdx = pg.find(data('s') == shot)
        sensorIdx = [int(i__ - startOffsetIDX) for i__ in data('g')[gIdx]]
        axes.plot(x[sensorIdx], tShow[gIdx], 'x-')

    yPixel = axes.transData.inverted().transform_point((1, 1))[1] - \
        axes.transData.inverted().transform_point((0, 0))[1]
    xPixel = axes.transData.inverted().transform_point((1, 1))[0] - \
        axes.transData.inverted().transform_point((0, 0))[0]

    # draw shot points
    axes.plot(x[[int(i__ - startOffsetIDX) for i__ in shots]],
           np.zeros(len(shots)) + 8. * yPixel, 'gv', markersize=8)

    # draw geophone points
    axes.plot(x[[int(i__ - startOffsetIDX) for i__ in geoph]],
           np.zeros(len(geoph)) + 3. * yPixel, 'r^', markersize=8)

    axes.grid()
    axes.set_ylim([max(tShow), +16. * yPixel])
    axes.set_xlim([min(x) - 5. * xPixel, max(x) + 5. * xPixel])

    axes.set_xlabel('x-Coordinate [m]')
    axes.set_ylabel('Traveltime [ms]')
Пример #12
0
def logDropTol( p, droptol = 1e-3 ):
    """"""
    tmp = pg.RVector( p );

    tmp = pg.abs( tmp / droptol )
    tmp.setVal( 1.0, pg.find( tmp < 1.0 ) )

    #for i, v in enumerate( tmp ):
        #tmp[ i ] = abs( tmp[ i ] / droptol );
        #if tmp[ i ] < 1.0: tmp[ i ] = 1.0;

    tmp = pg.log10( tmp );
    tmp *= pg.sign( p );
    return tmp;
Пример #13
0
def logDropTol(p, droptol=1e-3):
    """
    Example
    -------
    >>> from pygimli.utils import logDropTol
    >>> x = logDropTol((-10,-1,0,1,100))
    >>> print(x.array())
    [-4. -3.  0.  3.  5.]
    """
    tmp = pg.RVector(p)

    tmp = pg.abs(tmp / droptol)
    tmp.setVal(1.0, pg.find(tmp < 1.0))

    tmp = pg.log10(tmp)
    tmp *= pg.sign(p)
    return tmp
Пример #14
0
def logDropTol(p, droptol=1e-3):
    """Create logarithmic scaled copy of p.

    Examples
    --------
    >>> from pygimli.utils import logDropTol
    >>> x = logDropTol((-10, -1, 0, 1, 100))
    >>> print(x.array())
    [-4. -3.  0.  3.  5.]
    """
    tmp = pg.RVector(p)

    tmp = pg.abs(tmp / droptol)
    tmp.setVal(1.0, pg.find(tmp < 1.0))

    tmp = pg.log10(tmp)
    tmp *= pg.sign(p)
    return tmp
Пример #15
0
    def test_RVectorIndexRW(self):

        v = pg.Vector(5, 2.0)
        np.testing.assert_array_equal(v, [2, 2, 2, 2, 2])

        v += 1.0
        np.testing.assert_array_equal(v, [3, 3, 3, 3, 3])

        v += 1
        np.testing.assert_array_equal(v, [4, 4, 4, 4, 4])

        v[1] = 1.0
        np.testing.assert_array_equal(v, [4, 1, 4, 4, 4])

        v[1] += 1.0
        np.testing.assert_array_equal(v, [4, 2, 4, 4, 4])

        v[[1,2]] = 2.0
        np.testing.assert_array_equal(v, [4, 2, 2, 4, 4])

        v[pg.IVector(1,3)] = 3.0
        np.testing.assert_array_equal(v, [4, 2, 2, 3, 4])

        v[pg.IVector(5,2)] = 1.0
        np.testing.assert_array_equal(v, [4, 2, 1, 3, 4])

        v[pg.find(v==4.0)] = 5.0
        np.testing.assert_array_equal(v, [5, 2, 1, 3, 5])

        v[v==5.0] = 4.0
        np.testing.assert_array_equal(v, [4, 2, 1, 3, 4])

        v[v==4.0] = 5.0
        np.testing.assert_array_equal(v, [5, 2, 1, 3, 5])

        #this will work only if we overwrite __iadd__
        #v[v==4.0] += 1.0
        #np.testing.assert_array_equal(v, [6, 2, 1, 3, 6])

        v.setVal(1.0, 1)
        np.testing.assert_array_equal(v, [5, 1, 1, 3, 5])
Пример #16
0
    def test_RVectorIndexRW(self):

        v = pg.RVector(5, 2.0)
        np.testing.assert_array_equal(v, [2, 2, 2, 2, 2])

        v += 1.0
        np.testing.assert_array_equal(v, [3, 3, 3, 3, 3])

        v += 1
        np.testing.assert_array_equal(v, [4, 4, 4, 4, 4])

        v[1] = 1.0
        np.testing.assert_array_equal(v, [4, 1, 4, 4, 4])

        v[1] += 1.0
        np.testing.assert_array_equal(v, [4, 2, 4, 4, 4])

        v[[1,2]] = 2.0
        np.testing.assert_array_equal(v, [4, 2, 2, 4, 4])

        v[pg.IVector(1,3)] = 3.0
        np.testing.assert_array_equal(v, [4, 2, 2, 3, 4])

        v[pg.IVector(5,2)] = 1.0
        np.testing.assert_array_equal(v, [4, 2, 1, 3, 4])

        v[pg.find(v==4.0)] = 5.0
        np.testing.assert_array_equal(v, [5, 2, 1, 3, 5])

        v[v==5.0] = 4.0
        np.testing.assert_array_equal(v, [4, 2, 1, 3, 4])

        v[v==4.0] = 5.0
        np.testing.assert_array_equal(v, [5, 2, 1, 3, 5])

        #this will work only if we overwrite __iadd__
        #v[v==4.0] += 1.0
        #np.testing.assert_array_equal(v, [6, 2, 1, 3, 6])

        v.setVal(1.0, 1)
        np.testing.assert_array_equal(v, [5, 1, 1, 3, 5])
Пример #17
0
def parseArgPairToBoundaryArray(pair, mesh):
    """
    Parse boundary related pair argument to
    [ :gimliapi:`GIMLI::Boundary`, value|callable ] list.

    Parameters
    ----------

    pair : tuple

        - [marker, arg]
        - [[marker, ...], arg]
        - [boundary, arg]
        - [[boundary,...], arg]
        - [node, arg]

        arg will be parsed by
        :py:mod:`pygimli.solver.solver.generateBoundaryValue`
        and distributed to each boundary.
        Callable functions will be executed at runtime.

    mesh : :gimliapi:`GIMLI::Mesh`
        Used to find boundaries by marker

    Returns
    -------

    boundaries : list()
        [ :gimliapi:`GIMLI::Boundary`, value|callable ]
    """
    boundaries = []
    bounds = []

    #    print('+'*30, pair)
    if isinstance(pair[0], int):
        bounds = mesh.findBoundaryByMarker(pair[0])
    if isinstance(pair[0], list):
        # [[,,..], ]
        for b in pair[0]:
            for bi in mesh.boundaries(pg.find(mesh.boundaryMarkers() == b)):
                bounds.append(bi)

    elif isinstance(pair[0], pg.stdVectorBounds):
        bounds = pair[0]
    elif isinstance(pair[0], pg.Boundary):
        boundaries.append(pair)
        return boundaries
    elif isinstance(pair[0], pg.Node):
        boundaries.append(pair)
        return boundaries

    for b in bounds:
        val = None
        if hasattr(pair[1], '__call__'):
            # don't execute the callable here in init,
            # we want to call them at runtime
            val = pair[1]
        else:
            val = generateBoundaryValue(b, pair[1])
        boundaries.append([b, val])

    return boundaries
            [vel_layered, vel_gradient])):
    pg.boxprint(case)
    if case is "gradient":
        ana = analyticalSolutionGradient
    elif case is "layered":
        ana = analyticalSolution2Layer
    for boundary in mesh.boundaries():
        boundary.setMarker(0)

    xmin, xmax = mesh.xmin(), mesh.xmax()
    mesh.createNeighbourInfos()

    # In order to use the Dijkstra, we extract the surface positions >0
    mx = pg.x(mesh.positions())
    my = pg.y(mesh.positions())
    fi = pg.find((my == 0.0))
    px = np.sort(mx(fi))

    # A data container with index arrays named s (shot) and g (geophones) is
    # created and filled with the positions and shot/geophone indices.
    data = pg.DataContainer()
    data.registerSensorIndex('s')
    data.registerSensorIndex('g')

    for i, pxi in enumerate(px):
        data.createSensor([pxi, 0.0])
        if pxi == 0:
            source = i

    ndata = len(px) - 1
    data.resize(ndata)
Пример #19
0
def fillEmptyToCellArray(mesh, vals, slope=True):
    """
    Prolongate empty cell values to complete cell attributes.

    It is possible that you have zero values that need to be filled with
    appropriate attributes. This function tries to fill the empty values
    successive prolongation of the non zeros.

    Parameters
    ----------
    mesh : :gimliapi:`GIMLI::Mesh`
        For each cell of mesh a value will be returned.

    vals : array
        Array of size cellCount().

    Returns
    -------
    atts : array
        Array of length mesh.cellCount()
    """
    atts = pg.RVector(mesh.cellCount(), 0.0)
    oldAtts = mesh.cellAttributes()
    mesh.setCellAttributes(vals)
    mesh.createNeighbourInfos()
    # std::vector< Cell * >
    # empties = []

    if slope:
        # search all cells with empty neighbours
        ids = pg.find(mesh.cellAttributes() != 0.0)

        for c in mesh.cells(ids):
            for i in range(c.neighbourCellCount()):
                nc = c.neighbourCell(i)

                if nc:
                    if nc.attribute() == 0.0:
                        # c.setAttribute(99999)

                        b = pg.findCommonBoundary(c, nc)
                        # search along a slope
                        pos = b.center() - b.norm() * 1000.
                        sf = pg.RVector()
                        startCell = c

                        while startCell:

                            startCell.shape().isInside(pos, sf, False)
                            nextC = startCell.neighbourCell(sf)
                            if nextC:
                                if nextC.attribute() == 0.0:
                                    nextC.setAttribute(c.attribute())
                                else:
                                    break

                            startCell = nextC

    mesh.fillEmptyCells(mesh.findCellByAttribute(0.0), background=-1)
    atts = mesh.cellAttributes()
    mesh.setCellAttributes(oldAtts)
    return atts
Пример #20
0
def test2d():
    mesh = pg.Mesh("mesh/world2d.bms")
    print (mesh)

    xMin = mesh.boundingBox().min()[0]
    yMax = mesh.boundingBox().max()[0]
    x = np.arange(xMin, yMax, 1.0)

    mesh.createNeighbourInfos()
    rho = pg.RVector(len(mesh.cellAttributes()), 1.0) * 2000.0
    rho.setVal(0.0, pg.find(mesh.cellAttributes() == 1.0))

    swatch = pg.Stopwatch(True)
    pnts = []
    spnts = pg.stdVectorRVector3()

    for i in x:
        pnts.append(pg.RVector3(i, 0.0001))
        spnts.append(pg.RVector3(i, 0.0001))

    #    gzC, GC = calcGCells(pnts, mesh, rho, 1)
    gzC = pg.calcGCells(spnts, mesh, rho, 1)
    print ("calcGCells", swatch.duration(True))
    #    gzB, GB = calcGBounds(pnts, mesh, rho)
    #    gzB = pg.calcGBounds(spnts, mesh, rho)
    #    print("calcGBounds", swatch.duration(True))

    gZ_Mesh = gzC

    ax1, ax2 = getaxes()

    # sphere analytical solution
    gAna = analyticalCircle2D(spnts, radius=2.0, pos=pg.RVector3(0.0, -5.0), dDensity=2000)
    gAna2 = analyticalCircle2D(spnts, radius=2.0, pos=pg.RVector3(5.0, -5.0), dDensity=2000)

    gAna = gAna + gAna2

    ax1.plot(x, gAna, "-x", label="analytical")
    ax1.plot(x, gZ_Mesh, label="WonBevis1987-mesh")

    print (gAna / gZ_Mesh)

    #    rho=GB[0]/mesh.cellSizes()

    drawModel(ax2, mesh, rho)
    for i in (0, 1):
        drawSelectedMeshBoundaries(ax2, mesh.findBoundaryByMarker(i), color=(1.0, 1.0, 1.0, 1.0), linewidth=0.3)

    # sphere polygone
    radius = 2.0
    depth = 5.0
    poly1 = pg.stdVectorRVector3()
    poly2 = pg.stdVectorRVector3()
    nSegment = 124
    for i in range(nSegment):
        xp = np.sin((i + 1) * (2.0 * np.pi) / nSegment)
        yp = np.cos((i + 1) * (2.0 * np.pi) / nSegment)
        poly1.append(pg.RVector3(xp * radius, yp * radius - depth))
        poly2.append(pg.RVector3(xp * radius + 5.0, yp * radius - depth))

    gZ_Poly = calcPolydgdz(spnts, poly1, 2000)
    gZ_Poly += calcPolydgdz(spnts, poly2, 2000)

    ax1.plot(x, gZ_Poly, label="WonBevis1987-Poly")
    ax2.plot(pg.x(poly1), pg.y(poly1), color="red")
    ax2.plot(pg.x(poly2), pg.y(poly2), color="red")

    ax2.plot(pg.x(spnts), pg.y(spnts), marker="x", color="black")

    # test some special case
    for i, p in enumerate(poly1):
        poly1[i] = pg.RVector3(poly1[i] - pg.RVector3(5.0, -6.0))

    ax2.plot(pg.x(poly1), pg.y(poly1), color="green")

    gz = calcPolydgdz(spnts, poly1, 2000)
    ax1.plot(x, gz, label="Special Case", color="green")
    ax1.set_ylabel("dg/dz [mGal]")
    ax2.set_ylabel("Tiefe [m]")

    ax1.legend()
    ax2.set_xlim([x[0], x[-1]])
    ax2.grid()
Пример #21
0
def drawShapes(ax, mesh, u):
    '''
    '''
    
    ax.set_aspect('equal')

    Nx = 21
    Ny = 21
    nLevels = 12

    tix = np.linspace(-1.0, 1.0, Nx)
    tiy = np.linspace(-1.0, 1.0, Ny)
    (X,Y) = np.meshgrid(tix, tiy)

    uc = g.RVector(len(X.flat))
    pnts = []
    
    c = mesh.cell(0)
    
    imax = g.find(u == max(u))[0]
    
    for i in range(c.nodeCount()):
        print c.rst(i)
    print imax
    print c.createShapeFunctions()[imax]
    print "dx", c.createShapeFunctions()[imax].derive(0)
    print "dy", c.createShapeFunctions()[imax].derive(1)
    
    # draw nodes
    for i in range(c.nodeCount()):
        col = 'black'
        if i == imax:
            col = 'red'
            
        ax.plot(c.node(i).pos()[0], c.node(i).pos()[1], '.', markersize = 12, linewidth=0, color=col)
        
    # draw boundary
    drawMeshBoundaries(ax, mesh)
    ptns = []
    grads = []

    swatch = g.Stopwatch(True)
    for i, x in enumerate(X.flat):
        p = c.shape().xyz(g.RVector3(X.flat[i], Y.flat[i]))
        
        X.flat[i] = p[0]
        Y.flat[i] = p[1]
        
        #ax.plot(p[0], p[1], '.', zorder=10, color='black', markersize = 1)
        
        if not c.shape().isInside(p): 
            uc[i] = -99.0
            continue
            
        uc[i] = c.pot(p, u) 
        
        gr = c.grad(p, u)
        ptns.append(p)
        grads.append(gr)

    print swatch.duration(True)
    
    for i, p in enumerate(ptns):
        #print p, grads[i]
        ax.quiver(p[0], p[1], grads[i][0], grads[i][1], zorder=10)

    Z = np.ma.masked_where(uc == -99., uc)
    Z = Z.reshape(Ny, Nx)
    cs = ax.contourf(X, Y, Z, nLevels)
Пример #22
0
def rrmswitherr(a, b, err, errtol=1):
    """Compute root mean square of values with error above a threshold"""
    fi = pg.find(err < errtol)
    return pg.rrms(a[fi], b[fi])
Пример #23
0
def harmfit(y,
            x=None,
            error=None,
            nc=42,
            resample=None,
            lam=0.1,
            window=None,
            verbose=False,
            dosave=False,
            lineSearch=True,
            robust=False,
            maxiter=20):
    """HARMFIT - GIMLi based curve-fit by harmonic functions
        Parameters
        ----------
        y : 1d-array - values to be fitted

        x : 1d-array(len(y)) - data abscissa data. default: [0 .. len(y))

        error : 1d-array(len(y)) error of y. default (absolute error = 0.01)

        nc : int - Number of harmonic coefficients

        resample : 1d-array - resample y to x using fitting coeffients

        window : int - just fit data inside window bounds

        Returns
        -------
        response : 1d-array(len(resample) or len(x)) - smoothed values

        coefficients : 1d-array - fitting coefficients
    """
    if x is None:
        x = np.arange(len(y))
#    else:
#        if not isinstance(x, pg.RVector):
#            x = pg.asvector(x)
#
#    if not isinstance(y, pg.RVector):
#        y = pg.asvector(y)

    xToFit = None
    yToFit = None

    if window is not None:
        idx = pg.find((x >= window[0]) & (x < window[1]))
        #        idx = getIndex(x , lambda v: v > window[0] and v < window[1])

        xToFit = x(idx)
        yToFit = y(idx)

        if error is not None:
            error = error(idx)
    else:
        xToFit = x
        yToFit = y

#    print xToFit
#    print yToFit
    fop = pg.HarmonicModelling(nc, xToFit, verbose)
    inv = pg.RInversion(yToFit, fop, verbose, dosave)
    if error is not None:
        inv.setAbsoluteError(error)
    else:
        inv.setAbsoluteError(0.01)

    inv.setMarquardtScheme(0.8)
    if error is not None:
        inv.stopAtChi1(True)
    inv.setLambda(lam)
    inv.setMaxIter(maxiter)
    inv.setLineSearch(lineSearch)
    inv.setRobustData(robust)
    # inv.setConstraintType(0)

    coeff = inv.run()
    print(inv.chi2())

    if resample is not None:
        if not isinstance(resample, pg.RVector):
            resample = pg.asvector(resample)

        ret = fop.response(coeff, resample)

        if window is not None:
            # print pg.find((resample < window[0]) | (resample >= window[1]))
            ret.setVal(
                0.0, pg.find((resample < window[0]) | (resample >= window[1])))
#            idx = getIndex(resample,
#                           lambda v: v <= window[0] or v >= window[1])
#            for i in idx: ret[i] = 0.0
        return ret, coeff
    else:
        return inv.response(), coeff
Пример #24
0
# The second problem for pure Neumann domains is the non-uniqueness of
# the partial differential equation (there are only partial derivatives of the
# electric potential so an arbitrary value might be added, i.e. calibrated).
#
# Therefore we add calibration node with marker -1000 where the potential is
# fixed , somewhere on the boundary and far from the electrodes.

plc.createNode([0.75, 0.25, 0.5], marker=-1000)

###############################################################################
# For sufficient numerical accuracy it is generally a good idea to refine the
# mesh in the vicinity of the electrodes positions.
# We force the local mesh refinement by an additional node at 1/2 mm
# distance in -z-direction.

for s in plc.positions(pg.find(plc.nodeMarkers() == -99)):
    plc.createNode(s - [0.0, 0.0, 1e-3 / 2])

# Also refine the reference node
plc.createNode([0.5, 0.5, -0.5 - 1e-3 / 2])

###############################################################################
# Create the tetrahedron mesh (calling the tetgen mesh generator)

mesh = mt.createMesh(plc)

###############################################################################
# First we want to simulate our ERT response for a homogeneous resistivity
# of 1 :math:`\Omega`m. Usually, simulate will calculate apparent resistivities
# (rhoa) and put them into the returned DataContainerERT.
# However, for the calculation of rhoa, geometric factors (k) are expected in
Пример #25
0
def fillEmptyToCellArray(mesh, vals):
    """
    Prolongate empty cell values to complete cell attributes.

    It is possible that you have zero values that need to be filled with
    appropriate attributes. This function tries to fill the empty values
    successive prolongation of the non zeros.

    Parameters
    ----------
    mesh : :gimliapi:`GIMLI::Mesh`
        For each cell of mesh a value will be returned.

    vals : array
        Array of size cellCount().

    Returns
    -------
    atts : array
        Array of length mesh.cellCount()
    """
    atts = pg.RVector(mesh.cellCount(), 0.0)
    oldAtts = mesh.cellAttributes()
    mesh.setCellAttributes(vals)
    mesh.createNeighbourInfos()
    # std::vector< Cell * >
    #empties = []

    #! search all cells with empty neighbours
    ids = pg.find(mesh.cellAttributes() != 0.0)

    for c in mesh.cells(ids):
        for i in range(c.neighbourCellCount()):
            nc = c.neighbourCell(i)

            if nc:
                if nc.attribute() == 0.0:
                    #c.setAttribute(99999)

                    b = pg.findCommonBoundary(c, nc)
                    ### search along a slope
                    pos = b.center() - b.norm()*1000.
                    sf = pg.RVector()
                    startCell = c

                    while startCell:

                        startCell.shape().isInside(pos, sf, False)
                        nextC = startCell.neighbourCell(sf)
                        if nextC:
                            if nextC.attribute()==0.0:
                                nextC.setAttribute(c.attribute())
                            else:
                                break

                        startCell = nextC

    mesh.fillEmptyCells(mesh.findCellByAttribute(0.0), background=-1 )
    atts = mesh.cellAttributes()
    mesh.setCellAttributes(oldAtts)
    return atts
Пример #26
0
    def test_IndexAccess(self):
        # (double) array/vector
        an = np.arange(10.)
        ag = pg.Vector(an)

        # bn = nd.array(bool)
        bn = (an > 4.)
        self.assertEqual(type(bn), np.ndarray)
        self.assertEqual(bn.dtype, 'bool')
        self.assertEqual(sum(bn), 5)

        # bg = BVector
        bg = (ag > 4.)
        self.assertEqual(type(bg), pg.BVector)
        self.assertEqual(sum(bg), 5)

        # BVector(nd.array(bool))
        self.assertEqual(len(bg), len(pg.BVector(bn)))
        self.assertEqual(sum(bg), sum(pg.BVector(bn)))
        self.assertEqual(bg[0], pg.BVector(bn)[0])
        np.testing.assert_array_equal(bg, pg.BVector(bn))

        # In = nd.array(int)
        In = np.nonzero(bn)[0]
        self.assertEqual(type(In), np.ndarray)
        self.assertEqual(In.dtype, 'int64')
        self.assertEqual(len(In), 5)
        self.assertEqual(In[0], 5)

        # np.nonzero(bg)
        np.testing.assert_array_equal(In, np.nonzero(bg)[0])

        # Ig = IndexArray
        Ig = pg.find(bg)
        self.assertEqual(type(Ig), pg.core.IndexArray)
        self.assertEqual(len(Ig), 5)
        self.assertEqual(Ig[0], 5)

        # pg.find(nd.array(bool))
        np.testing.assert_array_equal(Ig, pg.find(bn))

        ## Indexoperators ##
        # ndarray [nd.array(bool)] == ndarray [nd.array(int)]
        np.testing.assert_equal(an[bn], an[In])
        self.assertEqual(len(an[bn]), 5)
        self.assertEqual(an[bn][0], 5)

        # ndarray[IndexArray] == ndarray [nd.array(int)]
        np.testing.assert_equal(an[Ig], an[In])

        # ndarray[BVector] == ndarray [nd.array(bool)]
        np.testing.assert_array_equal(an[np.array(bg, dtype='bool')], an[bn])
        np.testing.assert_array_equal(an[np.array(bg)], an[bn])
        np.testing.assert_array_equal(an[bg.array()], an[bn])
        np.testing.assert_array_equal(an[an>5], [6, 7, 8, 9])

        np.testing.assert_array_equal(ag[bg], ag[Ig])
        self.assertEqual(len(ag[bg]), 5)
        self.assertEqual(ag[bg][0], 5)

        # RVector [BVector] ==  RVector [nd.array(bool)]
        np.testing.assert_array_equal(ag[bg], ag[bn])
        np.testing.assert_equal(sum(ag[bg]), sum(ag[bn]))

        # RVector [IndexArray] ==  RVector [nd.array(int)]
        np.testing.assert_array_equal(ag[Ig], ag[In])
        # RVector(BVector) ==  RVector(nd.array(bool))
        # RVector(IndexArray) ==  RVector(nd.array(int))

        I = pg.core.IndexArray([0,1,1,0])
        np.testing.assert_array_equal(pg.sum(I), 2)
        np.testing.assert_array_equal(sum(I), 2)
        np.testing.assert_array_equal(np.sum(I), 2)
Пример #27
0
# -*- coding: utf-8 -*-

import numpy as np
import pygimli as pg

# (double) array/vector
an = np.arange(10.)
ag = pg.RVector(an)

# boolean array/vector
bn = (an > 4.)
bg = (ag > 4.)

# index vectors from bools
fn = np.nonzero(bn)[0]
fg = pg.find(bg)

# pure numpy indexing
print((an[bn]))
print((an[fn]))

# pure pygimli indexing
print((ag(bg)))
print((ag(fg)))

# work
print((ag[bg]))
print((ag[fg]))
print((ag[fn]))
print((ag[bn]))
Пример #28
0
    def test_IndexAccess(self):
        # (double) array/vector
        an = np.arange(10.)
        ag = pg.RVector(an)

        # bn = nd.array(bool)
        bn = (an > 4.)
        self.assertEqual(type(bn), np.ndarray)
        self.assertEqual(bn.dtype, 'bool')
        self.assertEqual(sum(bn), 5)

        # bg = BVector
        bg = (ag > 4.)
        self.assertEqual(type(bg), pg.BVector)
        self.assertEqual(sum(bg), 5)

        # BVector(nd.array(bool))
        self.assertEqual(len(bg), len(pg.BVector(bn)))
        self.assertEqual(sum(bg), sum(pg.BVector(bn)))
        self.assertEqual(bg[0], pg.BVector(bn)[0])
        np.testing.assert_array_equal(bg, pg.BVector(bn))
         
        # In = nd.array(int)
        In = np.nonzero(bn)[0]
        self.assertEqual(type(In), np.ndarray)
        self.assertEqual(In.dtype, 'int')
        self.assertEqual(len(In), 5)
        self.assertEqual(In[0], 5)
        
        # np.nonzero(bg)
        np.testing.assert_array_equal(In, np.nonzero(bg)[0])

        # Ig = IndexArray
        Ig = pg.find(bg)
        self.assertEqual(type(Ig), pg.IndexArray)
        self.assertEqual(len(Ig), 5)
        self.assertEqual(Ig[0], 5)
        
        # pg.find(nd.array(bool))
        np.testing.assert_array_equal(Ig, pg.find(bn))
        
        ## Indexoperators ##
        # ndarray [nd.array(bool)] == ndarray [nd.array(int)]
        np.testing.assert_equal(an[bn], an[In])
        self.assertEqual(len(an[bn]), 5)
        self.assertEqual(an[bn][0], 5)
        
        # ndarray[IndexArray] == ndarray [nd.array(int)]
        np.testing.assert_equal(an[Ig], an[In])
        
        # ndarray[BVector] == ndarray [nd.array(bool)]
        np.testing.assert_array_equal(an[np.array(bg, dtype='bool')], an[bn])
        np.testing.assert_array_equal(an[np.array(bg)], an[bn])
        np.testing.assert_array_equal(an[bg.array()], an[bn])
        ## this fails because it is interpreted as an[[0,0,0,1,1,1]] .. 
        #np.testing.assert_equal(an[bg], an[bn])
        

        # RVector [BVector] == RVector [IndexArray]
        np.testing.assert_array_equal(ag[bg], ag[Ig])
        self.assertEqual(len(ag[bg]), 5)
        self.assertEqual(ag[bg][0], 5)

        # RVector [BVector] ==  RVector [nd.array(bool)]
        np.testing.assert_array_equal(ag[bg], ag[bn])
        np.testing.assert_equal(sum(ag[bg]), sum(ag[bn]))
        
        # RVector [IndexArray] ==  RVector [nd.array(int)]
        np.testing.assert_array_equal(ag[Ig], ag[In])
Пример #29
0
def harmfit(y, x=None, error=None, nc=42, resample=None, lam=0.1,
            window=None, verbose=False, dosave=False,
            lineSearch=True, robust=False, maxiter=20):
    """HARMFIT - GIMLi based curve-fit by harmonic functions

    Parameters
    ----------
    y : 1d-array - values to be fitted

    x : 1d-array(len(y)) - data abscissa data. default: [0 .. len(y))

    error : 1d-array(len(y)) error of y. default (absolute error = 0.01)

    nc : int - Number of harmonic coefficients

    resample : 1d-array - resample y to x using fitting coeffients

    window : int - just fit data inside window bounds

    Returns
    -------
    response : 1d-array(len(resample) or len(x)) - smoothed values

    coefficients : 1d-array - fitting coefficients
    """
    if x is None:
        x = np.arange(len(y))

    xToFit = None
    yToFit = None

    if window is not None:
        idx = pg.find((x >= window[0]) & (x < window[1]))
#        idx = getIndex(x , lambda v: v > window[0] and v < window[1])

        xToFit = x(idx)
        yToFit = y(idx)

        if error is not None:
            error = error(idx)
    else:
        xToFit = x
        yToFit = y

    fop = pg.HarmonicModelling(nc, xToFit, verbose)
    inv = pg.RInversion(yToFit, fop, verbose, dosave)

    if error is not None:
        inv.setAbsoluteError(error)
    else:
        inv.setAbsoluteError(0.01)

    inv.setMarquardtScheme(0.8)
    if error is not None:
        inv.stopAtChi1(True)
    inv.setLambda(lam)
    inv.setMaxIter(maxiter)
    inv.setLineSearch(lineSearch)
    inv.setRobustData(robust)
    # inv.setConstraintType(0)

    coeff = inv.run()

    if resample is not None:

        ret = fop.response(coeff, resample)

        if window is not None:
            # print pg.find((resample < window[0]) | (resample >= window[1]))
            ret.setVal(0.0, pg.find((resample < window[0]) |
                                    (resample >= window[1])))
#            idx = getIndex(resample,
#                           lambda v: v <= window[0] or v >= window[1])
#            for i in idx: ret[i] = 0.0
        return ret, coeff
    else:
        return inv.response(), coeff
Пример #30
0
pg.show(mesh, times, cMap='Spectral', fillContour=True, ax=ax)
drawStreamLines(ax, mesh, -times, nx=50, ny=50)

###############################################################################
# We compare the result with the analytical solution along the x axis
x = np.arange(0., 140., 0.5)
tFMM = pg.interpolate(mesh, times, x, x * 0., x * 0.)
tAna = analyticalSolution2Layer(x, zlay, v[0], v[1])
print("min(dt)={} ms  max(dt)={} ms".format(min(tFMM - tAna) * 1000,
                                            max(tFMM - tAna) * 1000))

###############################################################################
# In order to use the Dijkstra, we extract the surface positions >0
mx = pg.x(mesh.positions())
my = pg.y(mesh.positions())
fi = pg.find((my == 0.0) & (mx >= 0))
px = np.sort(mx(fi))

###############################################################################
# A data container with index arrays named s (shot) and g (geophones) is
# created and filled with the positions and shot/geophone indices.
data = pg.DataContainer()
data.registerSensorIndex('s')
data.registerSensorIndex('g')
for pxi in px:
    data.createSensor(pg.RVector3(pxi, 0.0))

ndata = len(px) - 1
data.resize(ndata)
data.set('s', pg.RVector(ndata, 0))  # only one shot at first sensor
data.set('g', pg.utils.grange(1, ndata, 1))  # all others and geophones
Пример #31
0
def fillEmptyToCellArray(mesh, vals, slope=True):
    """
    Prolongate empty cell values to complete cell attributes.

    It is possible to have zero values that are filled with appropriate
    attributes. This function tries to fill empty values successively by
    prolongation of the non-zeros.

    Parameters
    ----------
    mesh : :gimliapi:`GIMLI::Mesh`
        For each cell of mesh a value will be returned.

    vals : array
        Array of size cellCount().

    Returns
    -------
    atts : array
        Array of length mesh.cellCount()

    Examples
    --------
    >>> import pygimli as pg
    >>> import numpy as np
    >>> import matplotlib.pyplot as plt
    >>>
    >>> # Create a mesh with 3 layers and an outer region for extrapolation
    >>> layers = pg.meshtools.createWorld([0,-50],[100,0], layers=[-15,-35])
    >>> inner = pg.meshtools.createMesh(layers, area=3)
    >>> mesh = pg.meshtools.appendTriangleBoundary(inner, xbound=120, ybound=50,
    ...                                            area=20, marker=0)
    >>>
    >>> # Create data for the inner region only
    >>> layer_vals = [20,30,50]
    >>> data = np.array(layer_vals)[inner.cellMarkers() - 1]
    >>>
    >>> # The following fails since len(data) != mesh.cellCount(), extrapolate
    >>> # pg.show(mesh, data)
    >>>
    >>> # Create data vector, where zeros fill the outer region
    >>> data_with_outer = np.array([0] + layer_vals)[mesh.cellMarkers()]
    >>>
    >>> # Actual extrapolation
    >>> extrapolated_data = pg.meshtools.fillEmptyToCellArray(mesh,
    ...                                  data_with_outer, slope=False)
    >>> extrapolated_data_with_slope = pg.meshtools.fillEmptyToCellArray(mesh, 
    ...                                 data_with_outer, slope=True)
    >>>
    >>> # Visualization
    >>> fig, (ax1, ax2, ax3) = plt.subplots(1,3, figsize=(10,8), sharey=True)
    >>> _ = pg.show(mesh, data_with_outer, ax=ax1, cMin=0)
    >>> _ = pg.show(mesh, extrapolated_data, ax=ax2, cMin=0)
    >>> _ = pg.show(mesh, extrapolated_data_with_slope, ax=ax3, cMin=0)
    >>> _ = ax1.set_title("Original data")
    >>> _ = ax2.set_title("Extrapolated with slope=False")
    >>> _ = ax3.set_title("Extrapolated with slope=True")
    """
    # atts = pg.Vector(mesh.cellCount(), 0.0)  # not used
    # oldAtts = mesh.cellAttributes()  # not used
    mesh.setCellAttributes(vals)
    mesh.createNeighborInfos()
    # std::vector< Cell * >
    # empties = []

    if slope:
        # search all cells with empty neighbors
        ids = pg.find(mesh.cellAttributes() != 0.0)

        for c in mesh.cells(ids):
            for i in range(c.neighborCellCount()):
                nc = c.neighborCell(i)

                if nc:
                    if nc.attribute() == 0.0:
                        # c.setAttribute(99999)

                        b = pg.core.findCommonBoundary(c, nc)
                        # search along a slope
                        pos = b.center() - b.norm() * 1000.
                        sf = pg.Vector()
                        startCell = c

                        while startCell:

                            startCell.shape().isInside(pos, sf, False)
                            nextC = startCell.neighborCell(sf)
                            if nextC:
                                if nextC.attribute() == 0.0:
                                    nextC.setAttribute(c.attribute())
                                else:
                                    break

                            startCell = nextC

    vals = mesh.cellAttributes()
    mesh.prolongateEmptyCellsValues(vals, background=-9e99)
    mesh.setCellAttributes(vals)
    return vals
Пример #32
0
def test2d():    
    mesh = g.Mesh( 'mesh/world2d.bms' )
    print mesh

    xMin = mesh.boundingBox( ).min()[0]
    yMax = mesh.boundingBox( ).max()[0]
    x = P.arange( xMin, yMax, 1. );

    mesh.createNeighbourInfos()
    rho = g.RVector( len( mesh.cellAttributes() ), 1. ) * 2000.0 
    rho.setVal( 0.0, g.find( mesh.cellAttributes() == 1.0 ) )

    swatch = g.Stopwatch( True )
    pnts = []
    spnts = g.stdVectorRVector3()
    
    for i in x:
        pnts.append( g.RVector3( i, 0.0001 ) )
        spnts.append( g.RVector3( i, 0.0001 ) )
    
    #gzC, GC = calcGCells( pnts, mesh, rho, 1 )
    gzC = g.calcGCells( spnts , mesh, rho, 1 )
    print "calcGCells",  swatch.duration( True )
    #gzB, GB = calcGBounds( pnts, mesh, rho )
    gzB = g.calcGBounds( spnts , mesh, rho )
    print "calcGBounds", swatch.duration( True )

    gZ_Mesh = gzC

    
    ax1, ax2 = getaxes()

    # sphere analytical solution
    gAna  = analyticalCircle2D( spnts, radius = 2.0, pos = g.RVector3( 0.0, -5.0 ), dDensity = 2000 )
    gAna2 = analyticalCircle2D( spnts, radius = 2.0, pos = g.RVector3( 5.0, -5.0 ), dDensity = 2000 )
    
    gAna = gAna + gAna2

    ax1.plot( x, gAna, '-x', label= 'Analytisch' )
    ax1.plot( x, gZ_Mesh, label= 'WonBevis1987-mesh' )

    print gAna / gZ_Mesh

    #rho=GB[0]/mesh.cellSizes()

    gci = drawModel( ax2, mesh, rho )

    drawSelectedMeshBoundaries( ax2, mesh.findBoundaryByMarker( 0 )
                                , color = ( 1.0, 1.0, 1.0, 1.0 )
                                , linewidth = 0.3 )
    drawSelectedMeshBoundaries( ax2, mesh.findBoundaryByMarker( 1 )
                                , color = ( 1.0, 1.0, 1.0, 1.0 )
                                , linewidth = 0.3 )
       
    # sphere polygone
    radius = 2.
    depth = 5.
    poly1 = g.stdVectorRVector3()
    poly2 = g.stdVectorRVector3()
    nSegment=124
    for i in range( nSegment ):
        xp = np.sin( (i+1) * ( 2. * np.pi ) / nSegment )
        yp = np.cos( (i+1) * ( 2. * np.pi ) / nSegment )
        poly1.append( g.RVector3( xp * radius, yp * radius - depth ) )
        poly2.append( g.RVector3( xp * radius + 5., yp * radius - depth ) )

    gZ_Poly  = calcPolydgdz( spnts, poly1, 2000 )
    gZ_Poly += calcPolydgdz( spnts, poly2, 2000 )

    ax1.plot( x, gZ_Poly, label= 'WonBevis1987-Poly' )
    ax2.plot( g.x( poly1 ), g.y( poly1 ), color = 'red' )
    ax2.plot( g.x( poly2 ), g.y( poly2 ), color = 'red' )

    ax2.plot( g.x( spnts ), g.y( spnts ), marker = 'x', color = 'black' )

    # test some special case
    for i, p in enumerate( poly1 ):
        poly1[i] = g.RVector3( poly1[i] - g.RVector3( 5.0, -6. ) )
    
    ax2.plot( g.x( poly1 ), g.y( poly1 ), color = 'green' )

    gz  = calcPolydgdz( spnts, poly1, 2000 )
    ax1.plot( x, gz, label= 'Special Case', color = 'green' )
    ax1.set_ylabel( 'dg/dz [mGal]' )
    ax2.set_ylabel( 'Tiefe [m]' )

    ax1.legend()
    ax2.set_xlim( [ x[0], x[-1] ] )
    ax2.grid()
Пример #33
0
drawStreamLines(ax, mesh, -times, nx=50, ny=50)

###############################################################################
# We compare the result with the analytical solution along the x axis
x = np.arange(0., 140., 0.5)
tFMM = pg.interpolate(mesh, times, x, x * 0., x * 0.)
tAna = analyticalSolution2Layer(x)
print("min(dt)={} ms  max(dt)={} ms".format(
    min(tFMM - tAna) * 1000,
    max(tFMM - tAna) * 1000))

###############################################################################
# In order to use the Dijkstra, we extract the surface positions >0
mx = pg.x(mesh.positions())
my = pg.y(mesh.positions())
fi = pg.find((my == 0.0) & (mx >= 0))
px = np.sort(mx(fi))

###############################################################################
# A data container with index arrays named s (shot) and g (geophones) is
# created and filled with the positions and shot/geophone indices.
data = pg.DataContainer()
data.registerSensorIndex('s')
data.registerSensorIndex('g')
for pxi in px:
    data.createSensor(pg.RVector3(pxi, 0.0))

ndata = len(px) - 1
data.resize(ndata)
data.set('s', pg.RVector(ndata, 0))  # only one shot at first sensor
data.set('g', pg.utils.grange(1, ndata, 1))  # all others and geophones
Пример #34
0
def drawShapes(ax, mesh, u):
    '''
    '''

    ax.set_aspect('equal')

    Nx = 21
    Ny = 21
    nLevels = 12

    tix = np.linspace(-1.0, 1.0, Nx)
    tiy = np.linspace(-1.0, 1.0, Ny)
    X, Y = np.meshgrid(tix, tiy)

    uc = pg.RVector(len(X.flat))

    c = mesh.cell(0)

    imax = pg.find(u == max(u))[0]

    for i in range(c.nodeCount()):
        print(c.rst(i))
    print(imax)
    print(c.createShapeFunctions()[imax])
    print("dx", c.createShapeFunctions()[imax].derive(0))
    print("dy", c.createShapeFunctions()[imax].derive(1))

    # draw nodes
    for i in range(c.nodeCount()):
        col = 'black'
        if i == imax:
            col = 'red'

        ax.plot(c.node(i).pos()[0], c.node(i).pos()[1], '.', markersize=12,
                linewidth=0, color=col)

    # draw boundary
    drawMeshBoundaries(ax, mesh)
    ptns = []
    grads = []

    swatch = pg.Stopwatch(True)
    for i, x in enumerate(X.flat):
        p = c.shape().xyz(pg.RVector3(X.flat[i], Y.flat[i]))

        X.flat[i] = p[0]
        Y.flat[i] = p[1]

#        ax.plot(p[0], p[1], '.', zorder=10, color='black', markersize = 1)

        if not c.shape().isInside(p):
            uc[i] = -99.0
            continue

        uc[i] = c.pot(p, u)

        gr = c.grad(p, u)
        ptns.append(p)
        grads.append(gr)

    print(swatch.duration(True))

    for i, p in enumerate(ptns):
        ax.quiver(p[0], p[1], grads[i][0], grads[i][1], zorder=10)

    Z = np.ma.masked_where(uc == -99., uc)
    Z = Z.reshape(Ny, Nx)
    ax.contourf(X, Y, Z, nLevels)
Пример #35
0
def drawStreamLine_(ax, mesh, c, data, dataMesh=None, linewidth=1.0,
                    dropTol=0.0, **kwargs):
    """Draw a single streamline.

    Draw a single streamline into a given mesh for given data stating at
    the center of cell c.
    The Streamline will be enlarged until she reached a cell that
    already contains a streamline.

    TODO
        linewidth and color depends on absolute velocity
        or background color saturation

    Parameters
    ----------

    ax : matplotlib.ax
        ax to draw into

    mesh : :gimliapi:`GIMLI::Mesh`
        2d Mesh to draw the streamline

    c : :gimliapi:`GIMLI::Cell`
        start cell

    data : iterable float | [float, float]
        If data is an array (per cell or node) gradients are calculated
        otherwise the data will be interpreted as vector field.

    dataMesh : :gimliapi:`GIMLI::Mesh` [None]

        Optional mesh for the data. If you want high resolution
        data to plot on coarse draw mesh.

    linewidth : float [1.0]

        Streamline linewidth

    dropTol : float [0.0]

        Don't draw stream lines with velocity lower than drop tolerance.

    """
    x, y, v = streamline(mesh, data, startCoord=c.center(), dLengthSteps=5,
                         dataMesh=dataMesh, maxSteps=10000, verbose=False,
                         coords=[0, 1])

    if 'color' not in kwargs:
        kwargs['color'] = 'black'

    lines = None

    if len(x) > 2:
        points = np.array([x, y]).T.reshape(-1, 1, 2)

        segments = np.concatenate([points[:-1], points[1:]], axis=1)

        lwidths = pg.RVector(len(v), linewidth)
        lwidths[pg.find(pg.RVector(v) < dropTol)] = 0.0

        lines = mpl.collections.LineCollection(
            segments, linewidths=lwidths, **kwargs)
        ax.add_collection(lines)

        # probably the limits are wrong without plot call
        # lines = ax.plot(x, y, **kwargs)
        # updateAxes_(ax, lines)
        # ax.plot(x, y, '.-', color='black', **kwargs)
    if len(x) > 3:
        xmid = int(len(x) / 2)
        ymid = int(len(y) / 2)
        dx = x[xmid + 1] - x[xmid]
        dy = y[ymid + 1] - y[ymid]
        c = mesh.findCell([x[xmid], y[ymid]])
        # dLength = c.center().dist(c.node(0).pos()) / 4.  # NOT USED

        if v[xmid] > dropTol:
            # ax.arrow(x[xmid], y[ymid], dx, dy,
            #          #width=dLength / 3.,
            #          width=0,
            #          head_width=0.01,
            #          head_length=0.02
            #         #  head_width=dLength / 3.,
            #         #  head_length=dLength / 3.,
            #          head_starts_at_zero=True,
            #          length_includes_head=False,
            #          lw=4,
            #          ls=None,
            #          **kwargs)

            dx90 = -dy
            dy90 = dx
            aLen = 3
            aWid = 1
            xy = list(zip([x[xmid] + dx90*aWid, x[xmid] + dx*aLen,
                           x[xmid] - dx90*aWid],
                          [y[ymid] + dy90*aWid, y[ymid] + dy*aLen,
                           y[ymid] - dy90*aWid]))

            arrow = mpl.patches.Polygon(xy, ls=None, lw=0, closed=True,
                                        **kwargs)
            # arrow = mpl.collections.PolyCollection(xy, lines=None,
            #                                        closed=True, **kwargs)
            ax.add_patch(arrow)

    return lines
Пример #36
0
def drawStreamLine_(ax, mesh, c, data, dataMesh=None, linewidth=1.0,
                    dropTol=0.0, **kwargs):
    """Draw a single streamline.

    Draw a single streamline into a given mesh for given data stating at
    the center of cell c.
    The Streamline will be enlarged until she reached a cell that
    already contains a streamline.

    TODO
        linewidth and color depends on absolute velocity
        or background color saturation

    Parameters
    ----------

    ax : matplotlib.ax
        ax to draw into

    mesh : :gimliapi:`GIMLI::Mesh`
        2d Mesh to draw the streamline

    c : :gimliapi:`GIMLI::Cell`
        start cell

    data : iterable float | [float, float]
        If data is an array (per cell or node) gradients are calculated
        otherwise the data will be interpreted as vector field.

    dataMesh : :gimliapi:`GIMLI::Mesh` [None]

        Optional mesh for the data. If you want high resolution
        data to plot on coarse draw mesh.

    linewidth : float [1.0]

        Streamline linewidth

    dropTol : float [0.0]

        Don't draw stream lines with velocity lower than drop tolerance.

    """
    x, y, v = streamline(mesh, data, startCoord=c.center(), dLengthSteps=5,
                         dataMesh=dataMesh, maxSteps=10000, verbose=False,
                         coords=[0, 1])

    if 'color' not in kwargs:
        kwargs['color'] = 'black'

    lines = None

    if len(x) > 2:
        points = np.array([x, y]).T.reshape(-1, 1, 2)

        segments = np.concatenate([points[:-1], points[1:]], axis=1)

        lwidths = pg.RVector(len(v), linewidth)
        lwidths[pg.find(pg.RVector(v) < dropTol)] = 0.0

        lines = mpl.collections.LineCollection(
            segments, linewidths=lwidths, **kwargs)
        ax.add_collection(lines)

        # probably the limits are wrong without plot call
        # lines = ax.plot(x, y, **kwargs)
        # updateAxes_(ax, lines)
        # ax.plot(x, y, '.-', color='black', **kwargs)
    if len(x) > 3:
        xmid = int(len(x) / 2)
        ymid = int(len(y) / 2)
        dx = x[xmid + 1] - x[xmid]
        dy = y[ymid + 1] - y[ymid]
        c = mesh.findCell([x[xmid], y[ymid]])
        # dLength = c.center().dist(c.node(0).pos()) / 4.  # NOT USED

        if v[xmid] > dropTol:
            # ax.arrow(x[xmid], y[ymid], dx, dy,
            #          #width=dLength / 3.,
            #          width=0,
            #          head_width=0.01,
            #          head_length=0.02
            #         #  head_width=dLength / 3.,
            #         #  head_length=dLength / 3.,
            #          head_starts_at_zero=True,
            #          length_includes_head=False,
            #          lw=4,
            #          ls=None,
            #          **kwargs)

            dx90 = -dy
            dy90 = dx
            aLen = 3
            aWid = 1
            xy = list(zip([x[xmid] + dx90*aWid, x[xmid] + dx*aLen,
                           x[xmid] - dx90*aWid],
                          [y[ymid] + dy90*aWid, y[ymid] + dy*aLen,
                           y[ymid] - dy90*aWid]))

            arrow = mpl.patches.Polygon(xy, ls=None, lw=0, closed=True,
                                        **kwargs)
            # arrow = mpl.collections.PolyCollection(xy, lines=None,
            #                                        closed=True, **kwargs)
            ax.add_patch(arrow)

    return lines
Пример #37
0
def test2d():
    mesh = pg.Mesh('mesh/world2d.bms')
    print(mesh)

    xMin = mesh.boundingBox().min()[0]
    yMax = mesh.boundingBox().max()[0]
    x = np.arange(xMin, yMax, 1.)

    mesh.createNeighbourInfos()
    rho = pg.RVector(len(mesh.cellAttributes()), 1.) * 2000.0
    rho.setVal(0.0, pg.find(mesh.cellAttributes() == 1.0))

    swatch = pg.Stopwatch(True)
    pnts = []
    spnts = pg.stdVectorRVector3()

    for i in x:
        pnts.append(pg.RVector3(i, 0.0001))
        spnts.append(pg.RVector3(i, 0.0001))

#    gzC, GC = calcGCells(pnts, mesh, rho, 1)
    gzC = pg.calcGCells(spnts, mesh, rho, 1)
    print("calcGCells", swatch.duration(True))
    #    gzB, GB = calcGBounds(pnts, mesh, rho)
    #    gzB = pg.calcGBounds(spnts, mesh, rho)
    #    print("calcGBounds", swatch.duration(True))

    gZ_Mesh = gzC

    ax1, ax2 = getaxes()

    # sphere analytical solution
    gAna = analyticalCircle2D(spnts,
                              radius=2.0,
                              pos=pg.RVector3(0.0, -5.0),
                              dDensity=2000)
    gAna2 = analyticalCircle2D(spnts,
                               radius=2.0,
                               pos=pg.RVector3(5.0, -5.0),
                               dDensity=2000)

    gAna = gAna + gAna2

    ax1.plot(x, gAna, '-x', label='analytical')
    ax1.plot(x, gZ_Mesh, label='WonBevis1987-mesh')

    print(gAna / gZ_Mesh)

    #    rho=GB[0]/mesh.cellSizes()

    drawModel(ax2, mesh, rho)
    for i in (0, 1):
        drawSelectedMeshBoundaries(ax2,
                                   mesh.findBoundaryByMarker(i),
                                   color=(1.0, 1.0, 1.0, 1.0),
                                   linewidth=0.3)

    # sphere polygone
    radius = 2.
    depth = 5.
    poly1 = pg.stdVectorRVector3()
    poly2 = pg.stdVectorRVector3()
    nSegment = 124
    for i in range(nSegment):
        xp = np.sin((i + 1) * (2. * np.pi) / nSegment)
        yp = np.cos((i + 1) * (2. * np.pi) / nSegment)
        poly1.append(pg.RVector3(xp * radius, yp * radius - depth))
        poly2.append(pg.RVector3(xp * radius + 5., yp * radius - depth))

    gZ_Poly = calcPolydgdz(spnts, poly1, 2000)
    gZ_Poly += calcPolydgdz(spnts, poly2, 2000)

    ax1.plot(x, gZ_Poly, label='WonBevis1987-Poly')
    ax2.plot(pg.x(poly1), pg.y(poly1), color='red')
    ax2.plot(pg.x(poly2), pg.y(poly2), color='red')

    ax2.plot(pg.x(spnts), pg.y(spnts), marker='x', color='black')

    # test some special case
    for i, p in enumerate(poly1):
        poly1[i] = pg.RVector3(poly1[i] - pg.RVector3(5.0, -6.))

    ax2.plot(pg.x(poly1), pg.y(poly1), color='green')

    gz = calcPolydgdz(spnts, poly1, 2000)
    ax1.plot(x, gz, label='Special Case', color='green')
    ax1.set_ylabel('dg/dz [mGal]')
    ax2.set_ylabel('Tiefe [m]')

    ax1.legend()
    ax2.set_xlim([x[0], x[-1]])
    ax2.grid()
Пример #38
0
def harmfit(y, x=None, error=None, nCoefficients=42, resample=None,
            window=None, verbose=False, dosave=False,
            lineSearch=True, robust=False, maxiter=20):
    """
        HARMFIT - GIMLi based curve-fit by harmonic functions
        y .. 1d-array(len(y)) values to be fitted
        x .. 1d-array(len(y)) abscissa data spacing. if not given: 1 * [0 .. len(y))
        error .. 1d-array(len(y))  data error of y (fit y into the range of error). if not given (absolute err = 1%)
        nCoefficients .. int Number of harmonic coefficients
        resample .. 1d-array(len(resample)) resample y based on fitting coeffients
        window .. just fit data inside window bounds
        return response, coefficients

        response .. 1d-array(len(y)) if no resample given, else 1d-array(len(resample))
        coefficients .. coefficients for harmic functions that fit y
    """
    if x is None:
        x = pg.asvector(np.arange(len(y)))
    else:
        if not isinstance(x, pg.RVector):
            x = pg.asvector(x)

    if not isinstance(y, pg.RVector):
        y = pg.asvector(y)

    xToFit = None
    yToFit = None

    if window is not None:
        idx = pg.find((x >= window[0]) & (x < window[1]))
        #idx = getIndex(x , lambda v: v > window[0] and v < window[1])

        xToFit = x(idx)
        yToFit = y(idx)

        if error is not None:
            error = error(idx)
    else:
        xToFit = x
        yToFit = y

#    print xToFit
#    print yToFit
    fop = pg.HarmonicModelling(nCoefficients, xToFit, verbose)
    inv = pg.RInversion(yToFit, fop, verbose, dosave)
    if error is not None:
        if not isinstance(error, pg.RVector):
            error = pg.asvector(error)

        inv.setRelativeError(error)
    else:
        inv.setAbsoluteError(0.01)

    inv.setLambda(000.0)
    inv.setLocalRegularization(True)
    inv.setMaxIter(maxiter)
    inv.setLineSearch(lineSearch)
    inv.setRobustData(robust)
    # inv.setConstraintType(0)

    coeff = inv.run()

    if resample is not None:
        if not isinstance(resample, pg.RVector):
            resample = pg.asvector(resample)

        ret = fop.response(coeff, resample)

#        print ret

        if window is not None:
            #            print resample
            #            print window[0], window[1]
            # print pg.find((resample < window[0]) | (resample >= window[1]))
            ret.setVal(
                0.0, pg.find(
                    (resample < window[0]) | (
                        resample >= window[1])))
#            idx = getIndex(resample, lambda v: v <= window[0] or v >= window[1])
#            for i in idx: ret[i] = 0.0
#        print ret
        # sys.exit
        return ret, coeff
    else:
        return inv.response(), coeff
Пример #39
0
def drawStreamLine_(ax,
                    mesh,
                    c,
                    data,
                    dataMesh=None,
                    linewidth=1.0,
                    dropTol=0.0,
                    **kwargs):
    """Draw a single streamline.

    Draw a single streamline into a given mesh for given data stating at
    the center of cell c.
    The Streamline will be enlarged until she reached a cell that
    already contains a streamline.

    TODO
        linewidth and color depends on absolute velocity
        or background color saturation

    Parameters
    ----------
    ax : matplotlib.ax
        ax to draw into
    mesh : :gimliapi:`GIMLI::Mesh`
        2d mesh
    c : :gimliapi:`GIMLI::Cell`
        Start point is c.center()
    data : iterable float | [float, float]
        If data is an array (per cell or node) gradients are calculated
        otherwise the data will be interpreted as vector field per nodes or
        cell centers.
    dataMesh : :gimliapi:`GIMLI::Mesh` [None]
        Optional mesh for the data. If you want high resolution
        data to plot on coarse draw mesh.
    linewidth : float [1.0]
        Streamline linewidth
    dropTol : float [0.0]
        Don't draw stream lines with velocity lower than drop tolerance.

    Keyword Arguments
    -----------------
    **kwargs
        arrowSize: int
            Size of the arrow's head.
        arrowColor: str
            Color of the arrow's head.
        Additional kwargs are being forwarded to mpl.LineCollection, mpl.Polygon
    """
    x, y, v = streamline(mesh,
                         data,
                         startCoord=c.center(),
                         dLengthSteps=5,
                         dataMesh=dataMesh,
                         maxSteps=10000,
                         verbose=False,
                         coords=[0, 1])

    if 'color' not in kwargs:
        kwargs['color'] = 'black'

    arrowSize = kwargs.pop('arrowSize', 12)
    arrowColor = kwargs.pop('arrowColor', 'black')

    lines = None

    if len(x) > 2:
        points = np.array([x, y]).T.reshape(-1, 1, 2)

        segments = np.concatenate([points[:-1], points[1:]], axis=1)

        lwidths = pg.Vector(len(v), linewidth)
        lwidths[pg.find(pg.Vector(v) < dropTol)] = 0.0

        lines = mpl.collections.LineCollection(segments,
                                               linewidths=lwidths,
                                               **kwargs)
        ax.add_collection(lines)

        # probably the limits are wrong without plot call
        # lines = ax.plot(x, y, **kwargs)
        # updateAxes_(ax, lines)
        # ax.plot(x, y, '.-', color='black', **kwargs)
    if len(x) > 3:
        xmid = int(len(x) / 2)
        ymid = int(len(y) / 2)
        dx = x[xmid + 1] - x[xmid]
        dy = y[ymid + 1] - y[ymid]
        c = mesh.findCell([x[xmid], y[ymid]])

        if v[xmid] > dropTol:

            absArrowSize = True
            if absArrowSize:
                ax.annotate(
                    '',
                    xytext=(x[xmid] - dx, y[ymid] - dy),
                    xy=(x[xmid], y[ymid]),
                    arrowprops=dict(arrowstyle="-|>", color=arrowColor),
                    size=arrowSize,
                    **kwargs,
                )
            else:
                ax.arrow(x[xmid],
                         y[ymid],
                         dx,
                         dy,
                         shape='full',
                         lw=0,
                         length_includes_head=True,
                         fc=arrowColor,
                         head_width=.35,
                         **kwargs)

            # dx90 = -dy
            # dy90 = dx
            # aLen = 3
            # aWid = 1
            # xy = list(zip([x[xmid] + dx90*aWid, x[xmid] + dx*aLen,
            #                x[xmid] - dx90*aWid],
            #               [y[ymid] + dy90*aWid, y[ymid] + dy*aLen,
            #                y[ymid] - dy90*aWid]))

            # arrow = mpl.patches.Polygon(xy, ls=None, lw=0, closed=True,
            #                             **kwargs)
            #ax.add_patch(arrow)

    return lines
Пример #40
0
def rmswitherr(a, b, err, errtol=1):
    """Compute (abs-)root mean square of values with error above a threshold"""
    fi = pg.find(err < errtol)
    return sqrt(pg.mean(pg.pow(a[fi] - b[fi], 2)))
Пример #41
0
def rrmsWithErr(a, b, err, errtol=1):
    """Compute root mean square of values with error above a threshold"""
    fi = pg.find(err < errtol)
    return rms((a[fi]-b[fi])/a[fi])
Пример #42
0
def showMesh(mesh,
             data=None,
             hold=False,
             block=False,
             colorBar=None,
             label=None,
             coverage=None,
             ax=None,
             savefig=None,
             showMesh=False,
             showBoundary=None,
             markers=False,
             **kwargs):
    """2D Mesh visualization.

    Create an axis object and plot a 2D mesh with given node or cell data.
    Returns the axis and the color bar. The type of data determine the
    appropriate draw method.

    Parameters
    ----------

    mesh : :gimliapi:`GIMLI::Mesh`
        2D or 3D GIMLi mesh

    data : iterable [None]
        Optionally data to visualize.

        . None (draw mesh only)
            forward to :py:mod:`pygimli.mplviewer.drawMesh`
            or if no cells are given:
            forward to :py:mod:`pygimli.mplviewer.drawPLC`

        . [[marker, value], ...]
            List of Cellvalues per cell marker
            forward to :py:mod:`pygimli.mplviewer.drawModel`

        . float per cell -- model, patch
            forward to :py:mod:`pygimli.mplviewer.drawModel`

        . float per node -- scalar field
            forward to :py:mod:`pygimli.mplviewer.drawField`

        . iterable of type [float, float] -- vector field
            forward to :py:mod:`pygimli.mplviewer.drawStreams`

        . pg.R3Vector -- vector field
            forward to :py:mod:`pygimli.mplviewer.drawStreams`

        . pg.stdVectorRVector3 -- sensor positions
            forward to :py:mod:`pygimli.mplviewer.drawSensors`


    hold : bool [false]
        Set interactive plot mode for matplotlib.
        If this is set to false [default] your script will open
        a window with the figure and draw your content.
        If set to true nothing happens until you either force another show with
        hold=False, you call plt.show() or pg.wait().
        If you want show with stopping your script set block = True.

    block : bool [false]
        Force show drawing your content and block the script until you
        close the current figure.

    colorBar : bool [None], Colorbar
        Create and show a colorbar. If colorBar is a valid colorbar then only
        its values will be updated.

    label : str
        Set colorbar label. If set colorbar is toggled to True. [None]

    coverage : iterable [None]
        Weight data by the given coverage array and fadeout the color.

    ax : matplotlib.Axes [None]
        Instead of create a new and empty ax, just draw into the a given.
        Useful to combine draws.

    savefig: string
        Filename for a direct save to disc.
        The matplotlib pdf-output is a little bit big so we try
        an epstopdf if the .eps suffix is found in savefig

    showMesh : bool [False]
        Shows the mesh itself aditional.

    showBoundary : bool [None]
        Shows all boundary with marker != 0. A value None means automatic
        True for cell data and False for node data.

    marker : bool [False]
        Show mesh and boundary marker.

    **kwargs :
        * xlabel : str [None]
            Add label to the x axis

        * ylabel : str [None]
            Add label to the y axis

        * all remaining
            Will be forwarded to the draw functions and matplotlib methods,
            respectively.

    Examples
    --------
    >>> import pygimli as pg
    >>> import pygimli.meshtools as mt
    >>> world = mt.createWorld(start=[-10, 0], end=[10, -10],
    ...                        layers=[-3, -7], worldMarker=False)
    >>> mesh = mt.createMesh(world, quality=32, area=0.2, smooth=[1, 10])
    >>> _ = pg.viewer.showMesh(mesh, markers=True)

    Returns
    -------
    ax : matplotlib.axes

    colobar : matplotlib.colorbar
    """
    pg.renameKwarg('cmap', 'cMap', kwargs)

    if ax is None:
        ax = plt.subplots()[1]

    # print('1*'*50)
    # print(locale.localeconv())

    # plt.subplots() resets locale setting to system default .. this went
    # horrible wrong for german 'decimal_point': ','
    pg.checkAndFixLocaleDecimal_point(verbose=False)

    # print('2*'*50)
    # print(locale.localeconv())

    if block:
        hold = True

    if hold:
        lastHoldStatus = pg.mplviewer.utils.holdAxes__
        pg.mplviewer.hold(val=1)

    gci = None
    validData = False

    if markers:
        kwargs["boundaryMarker"] = True
        if mesh.cellCount() > 0:
            uniquemarkers, uniqueidx = np.unique(np.array(mesh.cellMarkers()),
                                                 return_inverse=True)
            label = "Cell markers"
            kwargs["cMap"] = plt.cm.get_cmap("Set3", len(uniquemarkers))
            kwargs["logScale"] = False
            kwargs["cMin"] = -0.5
            kwargs["cMax"] = len(uniquemarkers) - 0.5
            data = np.arange(len(uniquemarkers))[uniqueidx]

    if data is None:
        showMesh = True
        if showBoundary is None:
            showBoundary = True
    elif isinstance(data, pg.stdVectorRVector3):
        drawSensors(ax, data, **kwargs)
    elif isinstance(data, pg.R3Vector):
        drawStreams(ax, mesh, data, **kwargs)
    else:
        #print('-----------------------------')
        #print(data, type(data))
        #print('-----------------------------')

        ### data=[[marker, val], ....]
        if isinstance(data, list) and \
            isinstance(data[0], list) and instance(data[0][0], int):
            data = pg.solver.parseMapToCellArray(data, mesh)

        if hasattr(data[0], '__len__') and not \
            isinstance(data, np.ma.core.MaskedArray):

            if len(data) == 2:  # [u,v] x N
                data = np.array(data).T

            if data.shape[1] == 2:
                drawStreams(ax, mesh, data, **kwargs)

            elif data.shape[1] == 3:  # probably N x [u,v,w]
                # if sum(data[:, 0]) != sum(data[:, 1]):
                # drawStreams(ax, mesh, data, **kwargs)
                drawStreams(ax, mesh, data[:, 0:2], **kwargs)
            else:
                pg.warn("No valid stream data:", data.shape, data.ndim)
                showMesh = True
        elif min(data) == max(data):  # or pg.haveInfNaN(data):
            pg.warn("No valid data: ", min(data), max(data),
                    pg.haveInfNaN(data))
            showMesh = True
        else:
            validData = True
            try:
                if len(data) == mesh.cellCount():
                    gci = drawModel(ax, mesh, data, **kwargs)
                    if showBoundary is None:
                        showBoundary = True

                elif len(data) == mesh.nodeCount():
                    gci = drawField(ax, mesh, data, **kwargs)

                cMap = kwargs.pop('cMap', None)
                if cMap is not None:
                    gci.set_cmap(cmapFromName(cMap))

            except BaseException as e:
                print("Exception occured: ", e)
                print("Data: ", min(data), max(data), pg.haveInfNaN(data))
                print("Mesh: ", mesh)
                drawMesh(ax, mesh, **kwargs)

    if mesh.cellCount() == 0:
        showMesh = False
        if mesh.boundaryCount() == 0:
            pg.mplviewer.drawPLC(ax,
                                 mesh,
                                 showNodes=True,
                                 fillRegion=False,
                                 showBoundary=False,
                                 **kwargs)
            showBoundary = False
            #ax.plot(pg.x(mesh), pg.y(mesh), '.', color='black')
        else:
            pg.mplviewer.drawPLC(ax, mesh, **kwargs)

    if showMesh:
        if gci is not None and hasattr(gci, 'set_antialiased'):
            gci.set_antialiased(True)
            gci.set_linewidth(0.3)
            gci.set_edgecolor("0.1")
        else:
            pg.mplviewer.drawSelectedMeshBoundaries(ax,
                                                    mesh.boundaries(),
                                                    color="0.1",
                                                    linewidth=0.3)
            #drawMesh(ax, mesh, **kwargs)

    if showBoundary is True or showBoundary is 1:
        b = mesh.boundaries(mesh.boundaryMarkers() != 0)
        pg.mplviewer.drawSelectedMeshBoundaries(ax,
                                                b,
                                                color=(0.0, 0.0, 0.0, 1.0),
                                                linewidth=1.4)

    if kwargs.pop('fitView', True):
        ax.set_xlim(mesh.xmin(), mesh.xmax())
        ax.set_ylim(mesh.ymin(), mesh.ymax())
        ax.set_aspect('equal')

    cbar = None

    if label is not None and colorBar is None:
        colorBar = True

    if colorBar and validData:
        # , **kwargs) # causes problems!
        labels = ['cMin', 'cMax', 'nLevs', 'cMap', 'logScale']
        subkwargs = {key: kwargs[key] for key in labels if key in kwargs}
        subkwargs['label'] = label

        if colorBar is True or colorBar is 1:
            cbar = createColorBar(gci,
                                  orientation=kwargs.pop(
                                      'orientation', 'horizontal'),
                                  size=kwargs.pop('size', 0.2),
                                  pad=kwargs.pop('pad', None))
            updateColorBar(cbar, **subkwargs)
        elif colorBar is not False:
            cbar = updateColorBar(colorBar, **subkwargs)

        if markers:
            ticks = np.arange(len(uniquemarkers))
            #print('show.ticks ********************', ticks)
            cbar.set_ticks(ticks)
            labels = []
            for marker in uniquemarkers:
                labels.append(str((marker)))
            #print('show.labels ********************', labels)
            cbar.set_ticklabels(labels)

    if coverage is not None:
        if len(data) == mesh.cellCount():
            addCoverageAlpha(gci, coverage)
        else:
            raise BaseException('toImplement')
            # addCoverageAlpha(gci, pg.cellDataToPointData(mesh, coverage))

    if showMesh:
        drawMesh(ax, mesh, **kwargs)

    if showBoundary is True or showBoundary is 1:
        b = mesh.boundaries(pg.find(mesh.boundaryMarkers() != 0))
        pg.mplviewer.drawSelectedMeshBoundaries(ax,
                                                b,
                                                color=(0.0, 0.0, 0.0, 1.0),
                                                linewidth=1.4)

    if not hold or block is not False and plt.get_backend() is not "Agg":
        if data is not None:
            if len(data) == mesh.cellCount():
                cb = CellBrowser(mesh, data, ax=ax)

        plt.show(block=block)
        try:
            plt.pause(0.01)
        except BaseException as _:

            pass

    if hold:
        pg.mplviewer.hold(val=lastHoldStatus)

    if savefig:
        print('saving: ' + savefig + ' ...')

        if '.' not in savefig:
            savefig += '.pdf'

        ax.figure.savefig(savefig, bbox_inches='tight')
        # rc params savefig.format=pdf

        if '.eps' in savefig:
            try:
                print("trying eps2pdf ... ")
                os.system('epstopdf ' + savefig)
            except BaseException as _:
                pass
        print('..done')

    return ax, cbar
Пример #43
0
    def test_IndexAccess(self):
        # (double) array/vector
        an = np.arange(10.)
        ag = pg.RVector(an)

        # bn = nd.array(bool)
        bn = (an > 4.)
        self.assertEqual(type(bn), np.ndarray)
        self.assertEqual(bn.dtype, 'bool')
        self.assertEqual(sum(bn), 5)

        # bg = BVector
        bg = (ag > 4.)
        self.assertEqual(type(bg), pg.BVector)
        self.assertEqual(sum(bg), 5)

        # BVector(nd.array(bool))
        self.assertEqual(len(bg), len(pg.BVector(bn)))
        self.assertEqual(sum(bg), sum(pg.BVector(bn)))
        self.assertEqual(bg[0], pg.BVector(bn)[0])
        np.testing.assert_array_equal(bg, pg.BVector(bn))

        # In = nd.array(int)
        In = np.nonzero(bn)[0]
        self.assertEqual(type(In), np.ndarray)
        self.assertEqual(In.dtype, 'int')
        self.assertEqual(len(In), 5)
        self.assertEqual(In[0], 5)

        # np.nonzero(bg)
        np.testing.assert_array_equal(In, np.nonzero(bg)[0])

        # Ig = IndexArray
        Ig = pg.find(bg)
        self.assertEqual(type(Ig), pg.IndexArray)
        self.assertEqual(len(Ig), 5)
        self.assertEqual(Ig[0], 5)

        # pg.find(nd.array(bool))
        np.testing.assert_array_equal(Ig, pg.find(bn))

        ## Indexoperators ##
        # ndarray [nd.array(bool)] == ndarray [nd.array(int)]
        np.testing.assert_equal(an[bn], an[In])
        self.assertEqual(len(an[bn]), 5)
        self.assertEqual(an[bn][0], 5)

        # ndarray[IndexArray] == ndarray [nd.array(int)]
        np.testing.assert_equal(an[Ig], an[In])

        # ndarray[BVector] == ndarray [nd.array(bool)]
        np.testing.assert_array_equal(an[np.array(bg, dtype='bool')], an[bn])
        np.testing.assert_array_equal(an[np.array(bg)], an[bn])
        np.testing.assert_array_equal(an[bg.array()], an[bn])
        ## this fails because it is interpreted as an[[0,0,0,1,1,1]] ..
        #np.testing.assert_equal(an[bg], an[bn])

        # RVector [BVector] == RVector [IndexArray]
        np.testing.assert_array_equal(ag[bg], ag[Ig])
        self.assertEqual(len(ag[bg]), 5)
        self.assertEqual(ag[bg][0], 5)

        # RVector [BVector] ==  RVector [nd.array(bool)]
        np.testing.assert_array_equal(ag[bg], ag[bn])
        np.testing.assert_equal(sum(ag[bg]), sum(ag[bn]))

        # RVector [IndexArray] ==  RVector [nd.array(int)]
        np.testing.assert_array_equal(ag[Ig], ag[In])
Пример #44
0
def rmsWithErr(a, b, err, errtol=1):
    """Compute (abs-)root mean square of values with error above a threshold"""
    fi = pg.find(err < errtol)
    return rms(a[fi] - b[fi])
Пример #45
0
def drawShapes( ax, mesh, u ):
    #ax.set_aspect( 'equal' )
    N = 11
    
    mesh3 = g.createMesh3D( g.asvector( np.linspace( 0, 1, N ) ), 
                            g.asvector( np.linspace( 0, 1, N ) ), 
                            g.asvector( np.linspace( 0, 1, N ) ) )
                            
    uc = g.RVector( mesh3.nodeCount(  ) )
    
    grads = g.stdVectorRVector3( )
    pnts = g.stdVectorRVector3( )
    
    c = mesh.cell( 0 )
    imax=g.find( u == max( u ) )[0]
    
    N = c.createShapeFunctions()[ imax ] 
    print imax, N
#    print imax, c.shape().createShapeFunctions()[imax]
    for i in range( c.nodeCount() ):
        print c.rst( i ), N( c.rst( i ) )
    
    # draw nodes
    for i in range( c.nodeCount() ):
        col = 'black'
        if i == imax:
            col = 'red'
            
        #ax.plot( [c.node( i ).pos()[0], c.node( i ).pos()[0] ], 
                 #[c.node( i ).pos()[1], c.node( i ).pos()[1] ],
                 #[c.node( i ).pos()[2], c.node( i ).pos()[2] ],
                #'.', markersize = 15, linewidth=0, color=col )
    
    newNode = []
    
    for i in range( mesh3.nodeCount(  ) ):
        p = c.shape().xyz( mesh3.node( i ).pos() )
        newNode.append( p )
        
        #ax.plot( p[0], p[1], '.', zorder=10, color='black', markersize = 1 )
        
        if not c.shape().isInside( p ): 
            uc[ i ] = -99.0
            grads.append( g.RVector3( 0.0, 0.0 ) )
            continue
          
        uc[ i ] = c.pot( p, u ) 
        gr = c.grad( p, u ).normalise()
        grads.append( gr )
        
        #ax.plot( [ p[ 0 ], p[ 0 ] + gr[ 0 ]*0.1 ], 
                 #[ p[ 1 ], p[ 1 ] + gr[ 1 ]*0.1 ], 
                 #[ p[ 2 ], p[ 2 ] + gr[ 2 ]*0.1 ], '-', color='black' )
    
        #pnts.append( p )
            
    #print len(pnts)
    #Z = np.ma.masked_where( uc == -99., uc )
    ##ax.plot( g.x(pnts), g.y(pnts), g.z(pnts), '.' )
    
    for i, n in enumerate( mesh3.nodes() ):
        n.setPos( newNode[ i ] )
        
    mesh3.addExportData( 'u', uc.setVal( 0.0, g.find( uc == -99 ) ) )
    name = 'cell' + str( c.nodeCount() ) + '-' + str( imax )
    print "write ", name
    mesh3.exportVTK( name, grads )