mesh_fwd = mt.createMesh(geom, quality=34, area=0.25)
model = np.array([2000., 2300, 1700])[mesh_fwd.cellMarkers()]
pg.show(mesh_fwd, model,
        label=pg.unit('vel'), cMap=pg.cmap('vel'), nLevs=3, logScale=False)

###############################################################################
# Next, we create an empty DataContainer and fill it with sensor positions and
# all possible shot-recevier pairs for the two-borehole scenario using the
# product function in the itertools module (Python standard library).

from itertools import product
numbers = np.arange(len(depth))
rays = list(product(numbers, numbers + len(numbers)))

# Empty container
scheme = pg.DataContainer()

# Add sensors
for sen in sensors:
    scheme.createSensor(sen)

# Add measurements
rays = np.array(rays)
scheme.resize(len(rays))
scheme["s"] = rays[:, 0]
scheme["g"] = rays[:, 1]
scheme["valid"] = np.ones(len(rays))
scheme.registerSensorIndex("s")
scheme.registerSensorIndex("g")

###############################################################################
Esempio n. 2
0
            tsr = self.dataMatrix[iS][iG]  # shot-receiver travel time
            dt = self.timeMatrix[iS] + self.timeMatrix[iG] - tsr  # difference
            weight = np.maximum(1 - 2 * self.frequency * dt, 0.0)  # 1 on ray
            if self.debug:
                print(pg.sum(pg.sign(weight)))
            wa = weight * cellSizes
            self.jacobian()[i] = wa / np.sum(wa) * tsr / slowness
            # TODO: check "invalid value in true divide" warning

    def createDefaultStartModel(self):
        """Create a meaningful starting model in case none is given."""
        return pg.RVector(self.fop.regionManager().parameterCount(), 0.001)

if __name__ == '__main__':
    # Set up FMM modelling operator and run a synthetic model
    mydata = pg.DataContainer('example_topo.sgt', 's g')
    print(mydata)
    mymesh = pg.meshtools.createParaMesh(mydata,
                                         boundary=0,
                                         paraBoundary=5,
                                         paraDepth=20,
                                         quality=34.5,
                                         paraMaxCellSize=5)
    mymesh.createNeighbourInfos()
    print(mymesh)
    slo = createGradientModel2D(mydata, mymesh, vTop=1000, vBot=2000)
    fwd = TravelTimeFMM(mymesh, mydata, frequency=500)  #
    fwd.createRefinedForwardMesh(False)
    resp = fwd.response(slo)
    mydata.set('t', resp)
    print("ready with response, starting jacobian")
Esempio n. 3
0
    def simulate(mesh, slowness, scheme, verbose=False, **kwargs):
        """Simulate a traveltime measurement.

        Perform the forward task for a given mesh,
        a slowness distribution (per cell) and return data
        (Traveltime) for a measurement scheme.
        This is a static method since it does not interfere with the Managers
        inversion approaches.

        Parameters
        ----------
        mesh : :gimliapi:`GIMLI::Mesh`
            Mesh to calculate for.

        slowness : array(mesh.cellCount()) | array(N, mesh.cellCount())
            slowness distribution for the given mesh cells can be:

            * a single array of len mesh.cellCount()
            * a matrix of N slowness distributions of len mesh.cellCount()
            * a res map as [[marker0, res0], [marker1, res1], ...]

        scheme : :gimliapi:`GIMLI::DataContainer`
            data measurement scheme

        **kwargs :
            * noisify : add normal distributed noise based on scheme('err')
                IMPLEMENTME

        Returns
        -------
        t : array(N, data.size()) | DataContainer
            The resulting simulated travel time values.
            Either one column array or matrix in case of slowness matrix.
            A DataContainer is return if noisify set to True.

        """
        fop = Refraction.createFOP(verbose=verbose)

        fop.setData(scheme)
        fop.setMesh(mesh, ignoreRegionManager=True)

        if len(slowness) == mesh.cellCount():
            if max(slowness) > 1.:
                print('Warning: slowness values larger than 1 (' +
                      str(max(slowness)) + ').. assuming that are velocity '
                      'values .. building reciprocity')
                t = fop.response(1. / slowness)
            else:
                t = fop.response(slowness)
        else:
            print(mesh)
            print("slowness: ", slowness)
            raise BaseException("Simulate called with wrong slowness array.")

        ret = pg.DataContainer(scheme)
        ret.set('t', t)

        noiseLevel = kwargs.pop('noiseLevel', 0)

        if noiseLevel > 0:
            if not ret.allNonZero('err'):
                ret.set('t', t)
                ret.set(
                    'err',
                    pg.physics.Refraction.estimateError(
                        ret,
                        absoluteError=kwargs.pop('noiseAbs', 1e-4),
                        relativeError=noiseLevel))

            if verbose:
                print("Data error estimates (min:max) ", min(ret('err')), ":",
                      max(ret('err')))

            t += pg.randn(ret.size()) * ret('err')
            ret.set('t', t)

        if kwargs.pop('returnArray', False):
            return t

        return ret
Esempio n. 4
0
def prepare(electrode_groups,
            measurements,
            first_arrivals,
            mesh_cut_tool_param=None,
            use_only_verified=False):
    """
    Prepares data for GIMLI inversion.
    :param electrode_groups:
    :param measurements:
    :return:
    """
    electrodes = []
    sensor_ids = []
    s = pd.Series()
    g = pd.Series()
    t = pd.Series()

    data = pg.DataContainer()
    data.registerSensorIndex("s")
    data.registerSensorIndex("g")

    if mesh_cut_tool_param is not None:
        base_point, gen_vecs = cut_point_cloud.cut_tool_to_gen_vecs(
            mesh_cut_tool_param, only_inv=True)
        inv_tr_mat = cut_point_cloud.inv_tr(gen_vecs)

    for ms in measurements:
        if ms.data is None:
            continue

        meas_map_sensor = {}
        fa_list = []

        # receivers
        if ms.receiver_stop >= ms.receiver_start:
            receivers = list(range(ms.receiver_start, ms.receiver_stop + 1))
        else:
            receivers = list(range(ms.receiver_start, ms.receiver_stop - 1,
                                   -1))

        # remove measurements outside inversion region
        if mesh_cut_tool_param is not None:
            e = _find_el(electrode_groups, ms.source_id)
            nl = cut_point_cloud.tr_to_local(base_point, inv_tr_mat,
                                             np.array([e.x, e.y, e.z]))
            if not (0 <= nl[0] <= 1 and 0 <= nl[1] <= 1 and 0 <= nl[2] <= 1):
                continue

            ind_to_rem = set()
            for j, r in enumerate(receivers):
                e = _find_el(electrode_groups, r)
                nl = cut_point_cloud.tr_to_local(base_point, inv_tr_mat,
                                                 np.array([e.x, e.y, e.z]))
                if not (0 <= nl[0] <= 1 and 0 <= nl[1] <= 1
                        and 0 <= nl[2] <= 1):
                    ind_to_rem.add(j)

            receivers = [r for r in receivers if r not in ind_to_rem]

        receivers_used = []
        for meas_id, e_id in enumerate(receivers):
            fa = _find_fa(first_arrivals, ms.file,
                          meas_id + ms.channel_start - 1)
            if fa is None:
                print("chyba")
                continue
            if fa.use and (fa.verified or not use_only_verified):
                if fa.verified:
                    time = fa.time
                else:
                    time = fa.time_auto
                receivers_used.append(e_id)
                fa_list.append(time)

        if not receivers_used:
            continue

        for meas_id, e_id in enumerate(receivers_used):
            ind = -1
            for j, e in enumerate(electrodes):
                if e.id == e_id:
                    ind = j
                    break
            if ind < 0:
                e = _find_el(electrode_groups, e_id)
                if e is None:
                    print("chyba")
                ind = len(electrodes)
                electrodes.append(e)
                s_id = data.createSensor([e.x, e.y, e.z])
                sensor_ids.append(s_id)
            meas_map_sensor[meas_id] = sensor_ids[ind]

        # source
        e_id = ms.source_id
        meas_id = len(receivers_used)
        ind = -1
        for j, e in enumerate(electrodes):
            if e.id == e_id:
                ind = j
                break
        if ind < 0:
            e = _find_el(electrode_groups, e_id)
            if e is None:
                print("chyba")
            ind = len(electrodes)
            electrodes.append(e)
            s_id = data.createSensor([e.x, e.y, e.z])
            sensor_ids.append(s_id)
        meas_map_sensor[meas_id] = sensor_ids[ind]

        s = s.append(pd.Series([meas_map_sensor[len(receivers_used)]] *
                               len(receivers_used)),
                     ignore_index=True)
        g = g.append(pd.Series(
            [meas_map_sensor[v] for v in range(len(receivers_used))]),
                     ignore_index=True)
        t = t.append(pd.Series(fa_list), ignore_index=True)

    l = len(s)
    data.resize(l)
    if l > 0:
        data.set('s', s)
        data.set('g', g)
        data.set('t', t)

    return data
Esempio n. 5
0
tAna = analyticalSolution2Layer(x, zlay, v[0], v[1])
print("min(dt)={} ms  max(dt)={} ms".format(
    min(tFMM - tAna) * 1000,
    max(tFMM - tAna) * 1000))

###############################################################################
# In order to use the Dijkstra, we extract the surface positions >0
mx = pg.x(mesh.positions())
my = pg.y(mesh.positions())
fi = pg.find((my == 0.0) & (mx >= 0))
px = np.sort(mx(fi))

###############################################################################
# A data container with index arrays named s (shot) and g (geophones) is
# created and filled with the positions and shot/geophone indices.
data = pg.DataContainer()
data.registerSensorIndex('s')
data.registerSensorIndex('g')
for pxi in px:
    data.createSensor(pg.RVector3(pxi, 0.0))

ndata = len(px) - 1
data.resize(ndata)
data.set('s', pg.RVector(ndata, 0))  # only one shot at first sensor
data.set('g', pg.utils.grange(1, ndata, 1))  # all others and geophones
fop = pg.TravelTimeDijkstraModelling(mesh, data)
tDijkstra = fop.response(mesh.cellAttributes())

###############################################################################
# We plot the calculated and measured travel times and relative differences
fig, ax = plt.subplots()
Esempio n. 6
0
 def loadData(self, filename):
     """Load data from file."""
     # TODO check for file formats and import if necessary
     data = pg.DataContainer(filename, sensorTokens='s g')
     self.basename = filename[:filename.rfind('.')]
     self.setDataContainer(data)
                                    ybound=500,
                                    quality=34,
                                    isSubSurface=True)
meshERT.save("meshERT_%d.bms" % case)

# ERT inversion
ert = ERTManager()
ert.setMesh(meshERT)

resinv = ert.invert(ertData, lam=30, zWeight=zWeight, maxIter=maxIter)
print("ERT chi: %.2f" % ert.inv.chi2())
print("ERT rms: %.2f" % ert.inv.inv.relrms())
np.savetxt("res_conventional_%d.dat" % case, resinv)

# Seismic inversion
ttData = pg.DataContainer("tttrue.dat")
print(ttData)
rst = TravelTimeManager(verbose=True)
rst.setMesh(meshRST, secNodes=3)

veltrue = np.loadtxt("veltrue.dat")
startmodel = createGradientModel2D(ttData, meshRST, np.min(veltrue),
                                   np.max(veltrue))
np.savetxt("rst_startmodel_%d.dat" % case, 1 / startmodel)
vest = rst.invert(ttData,
                  zWeight=zWeight,
                  startModel=startmodel,
                  maxIter=maxIter,
                  lam=220)
print("RST chi: %.2f" % rst.inv.chi2())
print("RST rms: %.2f" % rst.inv.inv.relrms())
pg.boxprint("Calculating case %s" % case)

# Load meshes and data
ertScheme = pg.DataContainerERT("ert_filtered.data")

fr_min = 0.1
fr_max = 0.9
phi = np.ones(paraDomain.cellCount()) * poro

# Setup managers and equip with meshes
ert = ERTManager()
ert.setMesh(mesh)
ert.setData(ertScheme)
ert.fop.createRefinedForwardMesh()

ttData = pg.DataContainer("rst_filtered.data", "s g")
rst = Refraction()
rst.setMesh(paraDomain)
rst.setData(ttData)
rst.fop.createRefinedForwardMesh()

# Set errors
ttData.set("err", np.ones(ttData.size()) * rste)
ertScheme.set("err", np.ones(ertScheme.size()) * erte)

if constrained:
    # Find cells around boreholes to fix ice content to zero
    fixcells = []
    for cell in paraDomain.cells():
        x, y, _ = cell.center()
        if (x > 9) and (x < 11) and (y > -depth_5198):
Esempio n. 9
0
    def simulate(self, mesh, scheme, slowness=None, vel=None,
                 secNodes=2, noiseLevel=0.0, noiseAbs=0.0, seed=None, **kwargs):
        """Simulate Traveltime measurements.

        Perform the forward task for a given mesh, a slowness distribution (per
        cell) and return data (traveltime) for a measurement scheme.

        Parameters
        ----------
        mesh : :gimliapi:`GIMLI::Mesh`
            Mesh to calculate for or use the last known mesh.
        scheme: :gimliapi:`GIMLI::DataContainer`
            Data measurement scheme needs 's' for shot and 'g' for geophone
            data token.
        slowness : array(mesh.cellCount()) | array(N, mesh.cellCount())
            Slowness distribution for the given mesh cells can be:

            * a single array of len mesh.cellCount()
            * a matrix of N slowness distributions of len mesh.cellCount()
            * a res map as [[marker0, res0], [marker1, res1], ...]
        vel : array(mesh.cellCount()) | array(N, mesh.cellCount())
            Velocity distribution for the given mesh cells.
            Will overwrite given slowness.
        secNodes: int [2]
            Number of refinement nodes to increase accuracy of the forward
            calculation.
        noiseLevel: float [0.0]
            Add relative noise to the simulated data. noiseLevel*100 in %
        noiseAbs: float [0.0]
            Add absolute noise to the simulated data in ms.
        seed: int [None]
            Seed the random generator for the noise.

        Keyword Arguments
        -----------------
        returnArray: [False]
            Return only the calculated times.
        verbose: [self.verbose]
            Overwrite verbose level.
        **kwargs
            Additional kwargs ...

        Returns
        -------
        t : array(N, data.size()) | DataContainer
            The resulting simulated travel time values.
            Either one column array or matrix in case of slowness matrix.
        """
        verbose = kwargs.pop('verbose', self.verbose)

        fop = self.fop
        fop.data = scheme
        fop.verbose = verbose

        if mesh is not None:
            self.applyMesh(mesh, secNodes=secNodes, ignoreRegionManager=True)

        if vel is not None:
            slowness = 1/vel

        if slowness is None:
            pg.critical("Need some slowness or velocity distribution for simulation.")

        if len(slowness) == self.fop.mesh().cellCount():
            t = fop.response(slowness)
        else:
            print(self.fop.mesh())
            print("slowness: ", slowness)
            pg.critical("Simulate called with wrong slowness array.")

        ret = pg.DataContainer(scheme)
        ret.set('t', t)

        if noiseLevel > 0 or noiseAbs > 0:
            if not ret.allNonZero('err'):
                ret.set('t', t)
                err = noiseAbs + t * noiseLevel
                ret.set('err', err)

            pg.verbose("Absolute data error estimates (min:max) {0}:{1}".format(
                        min(ret('err')), max(ret('err'))))

            t += pg.randn(ret.size(), seed=seed) * ret('err')
            ret.set('t', t)

        if kwargs.pop('returnArray', False) is True:
            return t

        return ret
def showSynthData(synthPath):
    geom = pg.load(synthPath + 'synthGeom.bms')
    mesh = pg.load(synthPath + 'synth.bms')
    k = np.load(synthPath + 'synthK.npy')
    vel = np.load(synthPath + 'synthVel.npy')
    sat = np.load(synthPath + 'synthSat.npy')

    scheme = pg.DataContainer(synthPath + 'synth.shm', 'a b m n')
    rhoaR = np.load(synthPath + 'synthRhoaRatio.npy')
    rhoa = np.load(synthPath + 'synthRhoa.npy')

    row = 3
    col = 2

    ####### START model perm + input
    ax = savefig(mesh,
                 geom,
                 k,
                 'Hydraulic conductivity $K$ in m$/$s',
                 out='hydrConductModel',
                 cMin=1e-5,
                 cMax=1e-2,
                 nLevs=4,
                 cmap='viridis')

    ####### START velocity
    axVel, _ = pg.show(mesh,
                       np.sqrt(vel[0]**2 + vel[1]**2),
                       logScale=0,
                       colorBar=1,
                       pad=0.55,
                       label='Velocity $|v|$ in m$/$s',
                       hold=1)

    meshC = pg.meshtools.createMesh(geom, quality=33, area=0.5, smooth=[1, 10])
    pg.show(mesh,
            data=vel,
            ax=axVel,
            coarseMesh=meshC,
            color='black',
            linewidth=0.5,
            dropTol=1e-6)
    pg.show(geom, ax=axVel, fillRegion=False)
    saveAxes(axVel, 'hydrVelocity', adjust=True)

    ##### START Saturation
    axs = plt.subplots(row,
                       col,
                       sharex=True,
                       sharey=True,
                       figsize=(10. * 0.65, 7.25 * 0.65))[1].flatten()

    satScale = 0.001
    for i, a in enumerate(axs):
        savefig(
            mesh,
            geom,
            sat[i * len(sat) / (len(axs)) + 1] * satScale,  #/mesh.cellSizes(),
            label=None,
            out=None,
            cMin=0,
            cMax=2.5,
            ax=a,
            adjust=True)
        pg.mplviewer.drawSensors(a,
                                 scheme.sensorPositions(),
                                 diam=0.15,
                                 color='green')

        add_inner_title(a, "t = %d days" % days[i], 3, color="w")

        if i < (row - 1) * col:
            a.set_xlabel('')
        if i % col:
            a.set_ylabel('')
        a.set_ylim([-16, 0])

    pg.mplviewer.saveFigure(axs[0].figure, "hydrSaturation")
    pg.mplviewer.createColorBarOnly(cMin=0,
                                    cMax=2.5,
                                    logScale=False,
                                    cMap='Spectral_r',
                                    nLevs=5,
                                    label=r'Concentration $c$ in g$/$l',
                                    orientation='horizontal',
                                    savefig='hydrSaturationCbar')

    ###### END Saturation
    pg.wait()
import numpy as np

import pygimli as pg
pg.verbose = print  # temporary
import pygimli.meshtools as mt
from settings import depth_5000, depth_5198, erte, rste

ertData = pg.DataContainerERT("ert.data")

print("Number of electrodes:", ertData.sensorCount())
print(ertData)

rstData = pg.DataContainer("rst.data", "s g")
print("Number of shot/receivers:", rstData.sensorCount())
maxrst = pg.max(pg.x(rstData.sensors()))

idx = []
for i, sensor in enumerate(ertData.sensors()):
    if sensor[0] >= 50.0:
        idx.append(i)

ertData.removeSensorIdx(idx)
ertData.removeInvalid()
ertData.removeUnusedSensors()
ertData.set("err", pg.Vector(ertData.size(), erte))
ertData.save("ert_filtered.data")

rstData.set("err", pg.Vector(rstData.size(), rste))
#
# # Remove two data points with high v_a at zero-offset
# Calculate offset
Esempio n. 12
0
def importGTT(filename, return_header=False):
    """Import refraction data from Tomo+ GTT data file into DataContainer."""
    header = {}
    with open(filename, 'rb') as fid:
        block = fid.read(100)
        nshots = struct.unpack(">I", block[:4])[0]
        ngeoph = struct.unpack(">I", block[4:8])[0]
        header['ntrace'] = struct.unpack(">Q", block[8:16])[0]
        header['nchan'] = struct.unpack(">I", block[16:20])[0]
        header['tminmax'] = struct.unpack(">2f", block[20:28])
        header['offsetminmax'] = struct.unpack(">2f", block[28:36])
        header['angle'] = struct.unpack(">f", block[36:40])[0]
        header['origin'] = struct.unpack(">2f", block[40:48])
        header['unit'] = struct.unpack(">I", block[48:52])[0]
        header['shotSpacing'] = struct.unpack(">f", block[52:56])[0]
        header['receiverSpacing'] = struct.unpack(">f", block[56:60])[0]
        SPOS = np.zeros((nshots, 3))
        RPOS = np.zeros((ngeoph*5, 3))
        SHOT, REC, TT, VA = [], [], [], []
        # tmat = np.ones((nshots, ngeoph)) * np.nan
        for i in range(nshots):
            block = fid.read(24)  # shot information
            shotid = struct.unpack(">I", block[:4])[0]
            nci = struct.unpack(">I", block[4:8])[0]  # channels for the shot
            spos = np.array(struct.unpack(">4f", block[8:24]))
            SPOS[i, :] = spos[:3]
            # print(nci, spos)
            X, Y = [], []
            for j in range(nci):
                block = fid.read(24)  # receiver information
                recid = struct.unpack(">I", block[:4])[0]
                rpos = np.array(struct.unpack(">4f", block[4:20]))
                RPOS[recid, :] = rpos[:3]
                tt = struct.unpack(">f", block[20:24])[0]
                # print(shotid, recid, tt)
                SHOT.append(shotid)
                REC.append(recid)
                TT.append(tt)
                X.append(rpos[0])
                Y.append(rpos[0])
                offset = np.sqrt(np.sum((rpos-spos)**2))
                VA.append(offset/tt)
                # tmat[shotid, recid] = tt

        SHOT = np.array(SHOT, dtype=int) - 1
        REC = np.array(REC, dtype=int) - 1 + len(SPOS)
        pos = np.vstack((SPOS, RPOS[1:max(REC)+1]))
        x, ifwd, irev = np.unique(pos[:, 0],
                                  return_index=True, return_inverse=True)
        data = pg.DataContainer()
        data.registerSensorIndex('s')
        data.registerSensorIndex('g')
        for i in ifwd:
            data.createSensor([pos[i, 0], pos[i, 2], 0])

        data.resize(len(TT))
        data.set('t', np.array(TT))
        data.set('s', irev[SHOT].astype(float))
        data.set('g', irev[REC].astype(float))
        data.markValid(data('t') > 0.)
        if return_header:
            return data, header
        else:
            print(header)
            return data
Esempio n. 13
0
def inv_st(inversion_conf, project_conf):
    inv_par = inversion_conf.inversion_param
    cut_par = inversion_conf.mesh_cut_tool_param

    remove_old_files()

    ret, bw_surface = prepare(cut_par, inv_par, project_conf)
    if not ret:
        return
    #return

    # snap electrodes
    print()
    print_headline("Snapping electrodes")
    if inv_par.meshFrom == MeshFrom.SURFACE_CLOUD:
        snap_surf.main(inv_par,
                       project_conf,
                       bw_surface,
                       max_dist=inv_par.snapDistance)
    else:
        snap_electrodes.main(inv_par,
                             project_conf,
                             max_dist=inv_par.snapDistance)

    #ball_mesh("inv_mesh.msh", "inv_mesh2.msh", [-622342, -1128822, 22], 5.0)
    #return

    print()
    print_headline("Creating inversion mesh")
    mesh_from_brep("inv_mesh_tmp.brep", "inv_mesh_tmp.msh2", project_conf,
                   inv_par)

    print()
    print_headline("Modify mesh")
    modify_mesh("inv_mesh_tmp.msh2", "inv_mesh.msh", cut_par)

    #if inv_par.meshFrom == MeshFrom.SURFACE_CLOUD:
    print()
    print_headline("Snapping electrodes final")
    snap_electrodes.main(inv_par,
                         project_conf,
                         max_dist=inv_par.snapDistance,
                         final=True)

    print()
    print_headline("Inversion")

    # load data file
    data = pg.DataContainer("input_snapped.dat",
                            sensorTokens='s g',
                            removeInvalid=False)

    # remove invalid data
    oldsize = data.size()
    data.removeInvalid()
    newsize = data.size()
    if newsize < oldsize:
        print('Removed ' + str(oldsize - newsize) + ' values.')

    # create FOP
    fop = pg.core.TravelTimeDijkstraModelling(verbose=inv_par.verbose)
    fop.setThreadCount(psutil.cpu_count(logical=False))
    fop.setData(data)

    # create Inv
    inv = pg.core.RInversion(verbose=inv_par.verbose, dosave=False)
    # variables tD, tM are needed to prevent destruct objects
    tM = pg.core.RTransLogLU(1.0 / inv_par.maxModel, 1.0 / inv_par.minModel)
    tD = pg.core.RTrans()
    inv.setTransData(tD)
    inv.setTransModel(tM)
    inv.setForwardOperator(fop)

    # mesh
    mesh_file = "inv_mesh.msh"
    if mesh_file == "":
        depth = inv_par.depth
        if depth is None:
            depth = pg.core.DCParaDepth(data)

        poly = pg.meshtools.createParaMeshPLC(
            data.sensorPositions(),
            paraDepth=depth,
            paraDX=inv_par.paraDX,
            paraMaxCellSize=inv_par.maxCellArea,
            paraBoundary=2,
            boundary=2)

        if inv_par.verbose:
            print("creating mesh...")
        mesh = pg.meshtools.createMesh(poly,
                                       quality=inv_par.quality,
                                       smooth=(1, 10))
    else:
        mesh = pg.Mesh(pg.load(mesh_file))

    mesh.createNeighbourInfos()

    mesh.createSecondaryNodes()

    if inv_par.verbose:
        print(mesh)

    sys.stdout.flush()  # flush before multithreading
    fop.setMesh(mesh)
    fop.regionManager().setConstraintType(1)

    if not inv_par.omitBackground:
        if fop.regionManager().regionCount() > 1:
            fop.regionManager().region(1).setBackground(True)

    if mesh_file == "":
        fop.createRefinedForwardMesh(True, False)
    else:
        fop.createRefinedForwardMesh(inv_par.refineMesh, inv_par.refineP2)

    paraDomain = fop.regionManager().paraDomain()
    inv.setForwardOperator(fop)  # necessary?

    # inversion parameters
    inv.setData(data('t'))
    absoluteError = 0.001
    relativeError = 0.001
    inv.setAbsoluteError(absoluteError + data('t') * relativeError)
    #inv.setRelativeError(pg.RVector(data.size(), 0.03))
    fop.regionManager().setZWeight(inv_par.zWeight)
    inv.setLambda(inv_par.lam)
    inv.setOptimizeLambda(inv_par.optimizeLambda)
    inv.setMaxIter(inv_par.maxIter)
    inv.setRobustData(inv_par.robustData)
    inv.setBlockyModel(inv_par.blockyModel)
    inv.setRecalcJacobian(inv_par.recalcJacobian)

    startModel = fop.createDefaultStartModel()
    inv.setModel(startModel)

    # Run the inversion
    sys.stdout.flush()  # flush before multithreading
    model = inv.run()
    velocity = 1.0 / model[paraDomain.cellMarkers()]
    np.savetxt('velocity.vector', velocity)
    paraDomain.addData('Velocity', velocity)
    #paraDomain.exportVTK('velocity')

    # output in local coordinates
    if inv_par.local_coord:
        base_point, gen_vecs = cut_point_cloud.cut_tool_to_gen_vecs(cut_par)
        localparaDomain = pg.Mesh(paraDomain)
        localparaDomain.translate(pg.RVector3(-base_point))
        localparaDomain.rotate(
            pg.RVector3(0, 0, -math.atan2(gen_vecs[0][1], gen_vecs[0][0])))
        localparaDomain.exportVTK('velocity')
    else:
        paraDomain.exportVTK('velocity')

    if inv_par.p3d:
        print()
        print_headline("Saving p3d")
        t = time.time()
        save_p3d(paraDomain, 1.0 / model.array(), cut_par, inv_par.p3dStep,
                 "velocity", inv_par.local_coord)
        print("save_p3d elapsed time: {:0.3f} s".format(time.time() - t))

    print()
    print("All done.")
import numpy as np
from matplotlib import pyplot as plt
# Import of PyGimli
import pygimli as pg
from pygimli import meshtools as mt
from pygimli.physics import TravelTimeManager as TTMgr
# Import of TKinker
from tkinter import Tk
from tkinter.filedialog import askopenfilename as askfilename


if __name__ == '__main__':
    root = Tk()
    filename = askfilename(filetypes = (("First-Arrival", "*.sgt"), ("All types", "*.*")))
    root.destroy()
    # Parameters:
    lambdaParam = 10 # Smoothing the model
    depthMax = 50 # Forcing a given depth to the model
    maxCellSize = 2.5 # Forcing a given size for the mesh cells
    zWeightParam = 0.1 # Forcing Horizontal features in the model (smaller than 1) or vertical (larger than 1)
    # Inversion:
    dataTT = pg.DataContainer(filename, 's g t')
    print(dataTT)
    mgr = TTMgr(data=dataTT)
    meshTT = mgr.createMesh(data=dataTT, paraMaxCellSize=maxCellSize, paraDepth=depthMax)
    mgr.invert(data=dataTT, mesh= meshTT, zWeight=zWeightParam, lam=lambdaParam, verbose=True)
    ax,cbar = mgr.showResult(logScale=True)
    mgr.drawRayPaths(ax=ax, color='w', lw=0.3, alpha=0.5)
    plt.show()
    mgr.showCoverage()
    plt.show()
Esempio n. 15
0
            if new_cell.id() == old_cell_id:
                # If we keep jumping back and forth between two cells.
                print("Jumping back and forth...")
                break

        return self._jac

if __name__ == '__main__':
    """
    Currently, this script assumes that the data was generated with Dijkstra
    modelling and computes the differences between the FMM modelling.
    """

    mesh = pg.Mesh('vagnh_fwd_mesh.bms')
    mesh.createNeighbourInfos()
    data = pg.DataContainer('vagnh_NONOISE.sgt', 's g')
    vel = [1400., 1700., 5000.]
    slo = np.array([0, 0, 1./1400., 1./1700., 1./5000.])
    cslo = slo.take(mesh.cellMarkers())
    print(mesh)
    print(data)

    fwd = TravelTimeFMM(mesh, data, True)
    pg.tic()
    t_fmm = fwd.response(cslo)
#    t_fmm = fwd.response(1.0/np.array(vel))
    pg.toc()
#    delta_t = np.array(data("t")) - t_fmm
#    f, ax = plt.subplots()
#    x = pg.x(data.sensorPositions())
#    ax.plot(abs(delta_t), 'r-.', label='abs. diff')