Example #1
0
    def hash(self, funct, *args, **kwargs):
        """"Create a hash value"""
        pg.tic()
        functInfo = self.functInfo(funct)
        funcHash = strHash(functInfo)
        versionHash = strHash(pg.versionStr())
        codeHash = strHash(inspect.getsource(funct))

        argHash = 0
        for a in args:
            if isinstance(a, str):
                argHash = argHash ^ strHash(a)
            elif isinstance(a, list):
                for item in a:
                    if isinstance(item, str):
                        argHash = argHash ^ strHash(item)
                    else:
                        argHash = argHash ^ hash(item)
            else:
                argHash = argHash ^ hash(a)

        for k, v in kwargs.items():
            if isinstance(v, str):
                argHash = argHash ^ strHash(v)
            else:
                argHash = argHash ^ hash(v)

        pg.debug("Hashing took:", pg.dur(), "s")
        return funcHash ^ versionHash ^ codeHash ^ argHash
Example #2
0
def simulateSynth(model, tMax=5000, satSteps=150, ertSteps=10, area=0.1,
                  synthPath='synth/'):
    """Create synthetic example."""

    if not os.path.exists('synth/'):
        os.mkdir(synthPath)

    world = mt.createWorld(start=[-20, 0], end=[20, -16], layers=[-2, -8],
                           worldMarker=False)
    for i, b in enumerate(world.boundaries()):
        b.setMarker(i + 1)

    block = mt.createRectangle(start=[-6, -3.5], end=[6, -6.0], marker=4,
                               boundaryMarker=11, area=area)
    geom = mt.mergePLC([world, block])
    geom.save(synthPath + 'synthGeom')
    # pg.show(geom, boundaryMarker=1)

    paraMesh = pg.meshtools.createMesh(geom, quality=32, area=area,
                                       smooth=[1, 10])

    # translate 1 2 3 4 - > 0 1 2 3
    mapMarker = np.array([0, 0, 1, 2, 3], 'float')
    paraMesh.setCellMarkers(mapMarker[np.array(paraMesh.cellMarkers())])
    paraMesh.save(synthPath + 'synth.bms')

    fop = HydroGeophysicalModelling(mesh=paraMesh, tMax=tMax,
                                    satSteps=satSteps,
                                    ertSteps=ertSteps,
                                    verbose=1)

    # openblas have some problems with to high thread count ..
    # we need to dig into
    print("ThreadCount:", pg.threadCount())
    pg.setThreadCount(4)

    print('##### Simulate synthetic data ' + '#'*50)
    pg.tic()
    rhoaR = fop.response(pg.RVector(model)[paraMesh.cellMarkers()])
    pg.toc()
    print('#'*100)

    # add some noise here
    rand = pg.RVector(len(rhoaR))
    pg.randn(rand)

    rhoaR *= (1.0 + rand * fop.ws.derr.flatten())
    fop.ws.rhoaR = rhoaR.reshape(fop.ws.derr.shape)

    # fop.ws.mesh.save(synthPath + 'synth.bms')
    np.save(synthPath + 'synthK', fop.ws.k)
    np.save(synthPath + 'synthVel', fop.ws.vel)
    np.save(synthPath + 'synthSat', fop.ws.sat)

    fop.ws.scheme.save(synthPath + 'synth.shm', 'a b m n')
    np.save(synthPath + 'synthRhoaRatio', fop.ws.rhoaR)
    np.save(synthPath + 'synthRhoa', fop.ws.rhoa)
    np.save(synthPath + 'synthErr', fop.ws.derr)
Example #3
0
    def restore(self):
        """Read data from json infos"""
        if os.path.exists(self._name + '.json'):

            # Fricking mpl kills locale setting to system default .. this went
            # horrible wrong for german 'decimal_point': ','
            pg.checkAndFixLocaleDecimal_point(verbose=False)

            try:
                with open(self._name + '.json') as file:
                    self.info = json.load(file)

                # if len(self.info['type']) != 1:
                #     pg.error('only single return caches supported for now.')

                #pg._y(pg.pf(self.info))
                
                if self.info['type'] == 'DataContainerERT':
                    self._value = pg.DataContainerERT(self.info['file'],
                                                      removeInvalid=False)
                    # print(self._value)
                elif self.info['type'] == 'RVector':
                    self._value = pg.Vector()
                    self._value.load(self.info['file'], format=pg.core.Binary)
                elif self.info['type'] == 'Mesh':
                    pg.tic()
                    self._value = pg.Mesh()
                    self._value.loadBinaryV2(self.info['file'] + '.bms')
                    pg.debug("Restoring cache took:", pg.dur(), "s")
                elif self.info['type'] == 'ndarray':
                    self._value = np.load(self.info['file'] + '.npy',
                                          allow_pickle=True)
                elif self.info['type'] == 'Cm05Matrix':
                    self._value = pg.matrix.Cm05Matrix(self.info['file'])
                elif self.info['type'] == 'GeostatisticConstraintsMatrix':
                    self._value = pg.matrix.GeostatisticConstraintsMatrix(
                                                            self.info['file'])
                else:
                    self._value = np.load(self.info['file'] + '.npy',
                                          allow_pickle=True)

                if self.value is not None:
                    self.info['restored'] = self.info['restored'] + 1
                    self.updateCacheInfo()
                    pg.info('Cache {3} restored ({1}s x {0}): {2}'.\
                        format(self.info['restored'],
                               round(self.info['dur'], 1),
                               self._name, self.info['codeinfo']))
                else:
                    # default try numpy
                    pg.warn('Could not restore cache of type {0}.'.format(self.info['type']))

                pg.debug("Restoring cache took:", pg.dur(), "s")
            except Exception as e:
                import traceback
                traceback.print_exc(file=sys.stdout)
                print(self.info)
                pg.error('Cache restoring failed.')
Example #4
0
def test(N):
    
    x = np.linspace(0, 1, N)

    pg.tic()
    mesh = pg.createGrid(x, x, x)
    print(mesh)
    pg.toc()
    
    A = pg.RSparseMatrix()
    A.fillStiffnessMatrix(mesh)
    pg.toc()
Example #5
0
def test(N):

    x = np.linspace(0, 1, N)

    pg.tic()
    mesh = pg.createGrid(x, x, x)
    print(mesh)
    pg.toc()

    A = pg.RSparseMatrix()
    A.fillStiffnessMatrix(mesh)
    pg.toc()
    def calculate_current_flow(self, time=False, verbose=False):
        """
        Perform the simulation based on the mesh, data and scheme
        Returns:
            RMatrix and RVector

        """
        if time:
            pg.tic()
        self.sim = ert.simulate(self.mesh, res=self.data, scheme=self.scheme, sr=False,
                                calcOnly=True, verbose=verbose, returnFields=True)
        if time:
            pg.toc("Current flow", box=True)
        self.pot = pg.utils.logDropTol(self.sim[0] - self.sim[1], 10)
        return self.sim, self.pot
Example #7
0
    def __init__(self, A, verbose=False):
        """Constructor saving matrix and vector.

        Parameters
        ----------
        A : ndarray
            numpy type (full) matrix
        """
        super().__init__(verbose)  # only in Python 3
        self._mul = None

        if isinstance(A, str):
            self.load(A)
        else:
            from scipy.linalg import eigh  # , get_blas_funcs

            if A.shape[0] != A.shape[1]:  # rows/cols for pgcore matrix
                raise Exception("Matrix must by square (and symmetric)!")

            if verbose:
                t = pg.tic(key='init cm05')
            self.ew, self.EV = eigh(A)

            if verbose:
                pg.info(
                    '(C) Time for eigenvalue decomposition: {:.1f}s'.format(
                        pg.dur(key='init cm05')))
Example #8
0
    def hash(self, funct, *args, **kwargs):
        """"Create a hash value"""
        pg.tic()
        functInfo = self.functInfo(funct)
        funcHash = strHash(functInfo)
        versionHash = strHash(pg.versionStr())
        codeHash = strHash(inspect.getsource(funct))

        argHash = 0
        for a in args:
            argHash = argHash ^ valHash(a)

        for k, v in kwargs.items():
            argHash = argHash ^ valHash(k) ^ valHash(v)
            
        pg.debug("Hashing took:", pg.dur(), "s")
        return funcHash ^ versionHash ^ codeHash ^ argHash
Example #9
0
def createMeshPatches(ax, mesh, verbose=True):
    """Utility function to create 2d mesh patches within a given ax."""
    if not mesh:
        pg.error("drawMeshBoundaries(ax, mesh): invalid mesh:", mesh)
        return

    if mesh.nodeCount() < 2:
        pg.error("drawMeshBoundaries(ax, mesh): to few nodes:", mesh)
        return

    pg.tic()
    polys = [_createCellPolygon(c) for c in mesh.cells()]
    patches = mpl.collections.PolyCollection(polys, picker=True)

    if verbose:
        pg.info("Creation of mesh patches took = ", pg.toc())

    return patches
    def calculate_sensitivity(self, time=False):
        """
        Make a sensitivity analysis
        Returns:

        """
        if time:
            pg.tic()
        self.fop = ert.ERTModelling()
        self.fop.setData(self.scheme)
        self.fop.setMesh(self.mesh)
        self.fop.createJacobian(self.data)
        if time:
            pg.toc("Sensitivity calculation", box=True)
        sens = self.fop.jacobian()[0]  # first row = first measurement
        self.normsens = pg.utils.logDropTol(sens / self.mesh.cellSizes(), 5e-5)
        self.normsens /= numpy.max(self.normsens)
        return self.fop
Example #11
0
def createMeshPatches(ax, mesh, verbose=True, rasterized=False):
    """Utility function to create 2d mesh patches within a given ax."""
    if not mesh:
        pg.error("drawMeshBoundaries(ax, mesh): invalid mesh:", mesh)
        return

    if mesh.nodeCount() < 2:
        pg.error("drawMeshBoundaries(ax, mesh): to few nodes:", mesh)
        return

    pg.tic()
    polys = [_createCellPolygon(c) for c in mesh.cells()]
    patches = mpl.collections.PolyCollection(polys, picker=True,
                                             rasterized=rasterized)

    if verbose:
        pg.info("Creation of mesh patches took = ", pg.toc())

    return patches
Example #12
0
    def createJacobian(self, model):
        """Create Jacobian matrix."""
        if self.subPotentials is None:
            self.response(model)

        J = self.jacobian()
        J.resize(self.data.size(), self.regionManager().parameterCount())

        cells = self.mesh().findCellByMarker(0, -1)
        Si = pg.ElementMatrix()
        St = pg.ElementMatrix()

        u = self.subPotentials

        pg.tic()
        if self.verbose():
            print("Calculate sensitivity matrix for model: ",
                  min(model), max(model))

        Jt = pg.RMatrix(self.data.size(),
                        self.regionManager().parameterCount())

        for kIdx, w in enumerate(self.w):
            k = self.k[kIdx]
            w = self.w[kIdx]

            Jt *= 0.
            A = pg.ElementMatrixMap()

            for i, c in enumerate(cells):
                modelIdx = c.marker()

                # 2.5D
                Si.u2(c)
                Si *= k * k
                Si += St.ux2uy2uz2(c)

                # 3D
                # Si.ux2uy2uz2(c); w = w* 2

                A.add(modelIdx, Si)

            for dataIdx in range(self.data.size()):

                a = int(self.data('a')[dataIdx])
                b = int(self.data('b')[dataIdx])
                m = int(self.data('m')[dataIdx])
                n = int(self.data('n')[dataIdx])
                Jt[dataIdx] = A.mult(u[kIdx][a] - u[kIdx][b],
                                     u[kIdx][m] - u[kIdx][n])

            J += w * Jt

        m2 = model*model
        k = self.data('k')

        for i in range(J.rows()):
            J[i] /= (m2 / k[i])

        if self.verbose():
            sumsens = np.zeros(J.rows())
            for i in range(J.rows()):
                sumsens[i] = pg.sum(J[i])
            print("sens sum: median = ", pg.median(sumsens),
                  " min = ", pg.min(sumsens),
                  " max = ", pg.max(sumsens))
Example #13
0
def crankNicolson(times, theta, S, I, f, u0=None, progress=None, debug=None):
    """
        S = constant over time
        f = constant over time
    """

    if len(times) < 2:
        raise BaseException("We need at least 2 times for Crank "
                            "Nicolsen time discretization." + str(len(times)))
    sw = pg.Stopwatch(True)

    if u0 is None:
        u0 = np.zeros(len(f))

    u = np.zeros((len(times), len(f)))
    u[0, :] = u0
    dt = times[1] - times[0]

    rhs = np.zeros((len(times), len(f)))

    rhs[:] = f

    A = I + S * dt * theta

    solver = pg.LinSolver(A, verbose=False)

    timeAssemble = []
    timeSolve = []
    # print('0', min(u[0]), max(u[0]), min(f), max(f))

    timeMeasure = False
    if progress:
        timeMeasure = True

    for n in range(1, len(times)):

        if timeMeasure:
            pg.tic()


#        pg.tic()
#bRef = (I + (dt * (theta - 1.)) * S) * u[n - 1] + \
#dt * ((1.0 - theta) * rhs[n - 1] + theta * rhs[n])
#        pg.toc()
#
#        pg.tic()
#b = I * u[n - 1] + ((dt * (theta - 1.)) * S) * u[n - 1] + \
#dt * ((1.0 - theta) * rhs[n - 1] + theta * rhs[n])
#        pg.toc()
#
#        pg.tic()
        b = I * u[n - 1] + S.mult(dt * (theta - 1.) * u[n - 1]) + \
            dt * ((1.0 - theta) * rhs[n - 1] + theta * rhs[n])
        #        pg.toc()
        #
        #        print(np.linalg.norm(b-b1))
        #np.testing.assert_allclose(bRef, b)

        if timeMeasure:
            timeAssemble.append(pg.dur())

        if timeMeasure:
            pg.tic()

        u[n, :] = solver.solve(b)

        if timeMeasure:
            timeSolve.append(pg.dur())

        # A = (I + dt * theta * S)
        # u[n, : ] = linsolve(A, b)

        if progress:
            progress.update(n,
                            ' t_prep: ' + str(round(timeAssemble[-1], 5)) + 's' + \
                            ' t_step: ' + str(round(timeSolve[-1], 5)) + 's')

        #if verbose and (n % verbose == 0):
        ## print(min(u[n]), max(u[n]))
        #print("timesteps:", n, "/", len(times),
        #'runtime:', sw.duration(), "s",
        #'assemble:', np.mean(timeAssemble),
        #'solve:', np.mean(timeSolve))
    return u
Example #14
0
    """
    Currently, this script assumes that the data was generated with Dijkstra
    modelling and computes the differences between the FMM modelling.
    """

    mesh = pg.Mesh('vagnh_fwd_mesh.bms')
    mesh.createNeighbourInfos()
    data = pg.DataContainer('vagnh_NONOISE.sgt', 's g')
    vel = [1400., 1700., 5000.]
    slo = np.array([0, 0, 1. / 1400., 1. / 1700., 1. / 5000.])
    cslo = slo.take(mesh.cellMarkers())
    print(mesh)
    print(data)

    fwd = TravelTimeFMM(mesh, data, True)
    pg.tic()
    t_fmm = fwd.response(cslo)
    #    t_fmm = fwd.response(1.0/np.array(vel))
    pg.toc()
    #    delta_t = np.array(data("t")) - t_fmm
    #    f, ax = plt.subplots()
    #    x = pg.x(data.sensorPositions())
    #    ax.plot(abs(delta_t), 'r-.', label='abs. diff')
    #    ax.plot(delta_t, 'b-', label='diff')
    #    ax.legend(loc='best')
    #    f.show()
    #    raise SystemExit()

    l = fwd._trace_back(50, 0)

    fig, a = plt.subplots()
Example #15
0
    def createJacobian(self, model):
        """Create Jacobian matrix."""
        if self.subPotentials is None:
            self.response(model)

        J = self.jacobian()
        J.resize(self.data.size(), self.regionManager().parameterCount())

        cells = self.mesh().findCellByMarker(0, -1)
        Si = pg.ElementMatrix()
        St = pg.ElementMatrix()

        u = self.subPotentials

        pg.tic()
        if self.verbose():
            print("Calculate sensitivity matrix for model: ", min(model),
                  max(model))

        Jt = pg.RMatrix(self.data.size(),
                        self.regionManager().parameterCount())

        for kIdx, w in enumerate(self.w):
            k = self.k[kIdx]
            w = self.w[kIdx]

            Jt *= 0.
            A = pg.ElementMatrixMap()

            for i, c in enumerate(cells):
                modelIdx = c.marker()

                # 2.5D
                Si.u2(c)
                Si *= k * k
                Si += St.ux2uy2uz2(c)

                # 3D
                # Si.ux2uy2uz2(c); w = w* 2

                A.add(modelIdx, Si)

            for dataIdx in range(self.data.size()):

                a = int(self.data('a')[dataIdx])
                b = int(self.data('b')[dataIdx])
                m = int(self.data('m')[dataIdx])
                n = int(self.data('n')[dataIdx])
                Jt[dataIdx] = A.mult(u[kIdx][a] - u[kIdx][b],
                                     u[kIdx][m] - u[kIdx][n])

            J += w * Jt

        m2 = model * model
        k = self.data('k')

        for i in range(J.rows()):
            J[i] /= (m2 / k[i])

        if self.verbose():
            sumsens = np.zeros(J.rows())
            for i in range(J.rows()):
                sumsens[i] = pg.sum(J[i])
            print("sens sum: median = ", pg.median(sumsens), " min = ",
                  pg.min(sumsens), " max = ", pg.max(sumsens))
    else:
        cell.setMarker(len(np.unique(mesh.cellMarkers())))  # triangle boundary

# create scheme files
sensors = np.load("sensors.npy", allow_pickle=True)
shmERT = pg.DataContainerERT("erttrue.dat")
shmSRT = createRAData(sensors)

Fsyn = np.loadtxt("syn_model.dat")

# %% compute forward response and jacobians
jacERT, jacSRT = jacobian4PM(meshERT, meshRST, shmERT, shmSRT, Fsyn)
jacJoint = np.vstack((jacSRT, jacERT))
print(jacERT.shape, jacSRT.shape, jacJoint.shape)
jacJoint.dump("jacJoint.npz")
pg.tic("Calculating JTJ")
JTJ = jacJoint.T.dot(jacJoint)
pg.toc()
MCM = np.linalg.inv(JTJ)
MCM.dump("MCM.npz")
# plt.matshow(MCM)
# %%
npar, nreg = Fsyn.shape
gMat = np.zeros((nreg, npar * nreg))
for i in range(nreg):
    for j in range(npar):
        gMat[i, j * nreg + i] = 1.0
# %%
pg.tic("Calculating JTJ")
jacJointConst = np.vstack((jacJoint, gMat * 10000))
JTJconst = jacJointConst.T.dot(jacJointConst)
Example #17
0
def crankNicolson(times, theta, S, I, f, u0=None, verbose=0):
    """
        S = const over time
        f = const over time

    """

    if len(times) < 2:
        raise BaseException("We need at least 2 times for Crank "
                            "Nicolsen time discretization." + str(len(times)))
    sw = pg.Stopwatch(True)

    if u0 is None:
        u0 = np.zeros(len(f))

    u = np.zeros((len(times), len(f)))
    u[0, :] = u0
    dt = (times[1] - times[0])

    rhs = np.zeros((len(times), len(f)))

    rhs[:] = f

    A = (I + dt * theta * S)

    solver = pg.LinSolver(A, verbose=verbose)

    timeAssemble = []
    timeSolve = []
    # print('0', min(u[0]), max(u[0]), min(f), max(f))
    for n in range(1, len(times)):

        if verbose:
            pg.tic()

#        pg.tic()
#        bRef = (I + (dt * (theta - 1.)) * S) * u[n - 1] + \
#            dt * ((1.0 - theta) * rhs[n - 1] + theta * rhs[n])
#        pg.toc()
#
#        pg.tic()
#        b = u[n - 1] + ((dt * (theta - 1.)) * S) * u[n - 1] + \
#            dt * ((1.0 - theta) * rhs[n - 1] + theta * rhs[n])
#        pg.toc()
#
#        pg.tic()
        b = u[n - 1] + S.mult(dt * (theta - 1.) * u[n - 1]) + \
            dt * ((1.0 - theta) * rhs[n - 1] + theta * rhs[n])
        #        pg.toc()
        #
        #        print(np.linalg.norm(b-b1))
        #        np.testing.assert_allclose(bRef, b)

        if verbose:
            timeAssemble.append(pg.dur())

        if verbose:
            pg.tic()

        u[n, :] = solver.solve(b)

        if verbose:
            timeSolve.append(pg.dur())

        # A = (I + dt * theta * S)
        # u[n, : ] = linsolve(A, b)

        if verbose and (n % verbose == 0):
            # print(min(u[n]), max(u[n]))
            print("timesteps:", n, "/", len(times), 'runtime:', sw.duration(),
                  "s", 'assemble:', np.mean(timeAssemble), 'solve:',
                  np.mean(timeSolve))


#    import matplotlib.pyplot as plt
#    plt.figure()
#    plt.plot(timeAssemble)
#    plt.figure()
#    plt.plot(timeSolve)
#    plt.show()

    return u
Example #18
0
    """
    Currently, this script assumes that the data was generated with Dijkstra
    modelling and computes the differences between the FMM modelling.
    """

    mesh = pg.Mesh('vagnh_fwd_mesh.bms')
    mesh.createNeighbourInfos()
    data = pg.DataContainer('vagnh_NONOISE.sgt', 's g')
    vel = [1400., 1700., 5000.]
    slo = np.array([0, 0, 1./1400., 1./1700., 1./5000.])
    cslo = slo.take(mesh.cellMarkers())
    print(mesh)
    print(data)

    fwd = TravelTimeFMM(mesh, data, True)
    pg.tic()
    t_fmm = fwd.response(cslo)
#    t_fmm = fwd.response(1.0/np.array(vel))
    pg.toc()
#    delta_t = np.array(data("t")) - t_fmm
#    f, ax = plt.subplots()
#    x = pg.x(data.sensorPositions())
#    ax.plot(abs(delta_t), 'r-.', label='abs. diff')
#    ax.plot(delta_t, 'b-', label='diff')
#    ax.legend(loc='best')
#    f.show()
#    raise SystemExit()

    l = fwd._trace_back(50, 0)

    fig, a = plt.subplots()
Example #19
0
    def response(self, model):
        """Solve forward task.

        Create apparent resistivity values for a given resistivity distribution
        for self.mesh.
        """
        ### NOTE TODO can't be MT until mixed boundary condition depends on
        ### self.resistivity
        pg.tic()
        if not self.data.allNonZero('k'):
            pg.error('Need valid geometric factors: "k".')
            pg.warn('Fallback "k" values to -sign("rhoa")')
            self.data.set('k', -pg.math.sign(self.data('rhoa')))

        mesh = self.mesh()

        nDof = mesh.nodeCount()
        elecs = self.data.sensorPositions()

        nEle = len(elecs)
        nData = self.data.size()

        self.resistivity = res = self.createMappedModel(model, -1.0)

        if self.verbose:
            print("Calculate response for model:", min(res), max(res))

        rMin = elecs[0].dist(elecs[1]) / 2.0
        rMax = elecs[0].dist(elecs[-1]) * 2.0

        k, w = self.getIntegrationWeights(rMin, rMax)

        self.k = k
        self.w = w

        # pg.show(mesh, res, label='res')
        # pg.wait()

        rhs = self.createRHS(mesh, elecs)

        # store all potential fields
        u = np.zeros((nEle, nDof))
        self.subPotentials = [pg.Matrix(nEle, nDof) for i in range(len(k))]

        for i, ki in enumerate(k):
            ws = dict()
            uE = pg.solve(mesh,
                          a=1. / res,
                          b=-(ki * ki) / res,
                          f=rhs,
                          bc={'Robin': ['*', self.mixedBC]},
                          userData={
                              'sourcePos': elecs,
                              'k': ki
                          },
                          verbose=False,
                          stats=0,
                          debug=False)
            self.subPotentials[i] = uE
            u += w[i] * uE

        # collect potential matrix,
        # i.e., potential for all electrodes and all injections
        pM = np.zeros((nEle, nEle))

        for i in range(nEle):
            pM[i] = pg.interpolate(mesh, u[i, :], destPos=elecs)

        # collect resistivity values for all 4 pole measurements
        r = np.zeros(nData)

        for i in range(nData):
            iA = int(self.data('a')[i])
            iB = int(self.data('b')[i])
            iM = int(self.data('m')[i])
            iN = int(self.data('n')[i])

            uAB = pM[iA] - pM[iB]
            r[i] = uAB[iM] - uAB[iN]

        self.lastResponse = r * self.data('k')

        if self.verbose:
            print("Resp min/max: {0} {1} {2}s".format(min(self.lastResponse),
                                                      max(self.lastResponse),
                                                      pg.dur()))

        return self.lastResponse
Example #20
0
def simulateSynth(model,
                  tMax=5000,
                  satSteps=150,
                  ertSteps=10,
                  area=0.1,
                  synthPath='synth/'):
    """Create synthetic example."""

    if not os.path.exists('synth/'):
        os.mkdir(synthPath)

    world = mt.createWorld(start=[-20, 0],
                           end=[20, -16],
                           layers=[-2, -8],
                           worldMarker=False)
    for i, b in enumerate(world.boundaries()):
        b.setMarker(i + 1)

    block = mt.createRectangle(start=[-6, -3.5],
                               end=[6, -6.0],
                               marker=4,
                               boundaryMarker=11,
                               area=area)
    geom = mt.mergePLC([world, block])
    geom.save(synthPath + 'synthGeom')
    # pg.show(geom, boundaryMarker=1)

    paraMesh = pg.meshtools.createMesh(geom,
                                       quality=32,
                                       area=area,
                                       smooth=[1, 10])

    # translate 1 2 3 4 - > 0 1 2 3
    mapMarker = np.array([0, 0, 1, 2, 3], 'float')
    paraMesh.setCellMarkers(mapMarker[np.array(paraMesh.cellMarkers())])
    paraMesh.save(synthPath + 'synth.bms')

    fop = HydroGeophysicalModelling(mesh=paraMesh,
                                    tMax=tMax,
                                    satSteps=satSteps,
                                    ertSteps=ertSteps,
                                    verbose=1)

    # openblas have some problems with to high thread count ..
    # we need to dig into
    print("TC", pg.threadCount())
    pg.setThreadCount(4)

    print('##### Simulate synthetic data ' + '#' * 50)
    pg.tic()
    rhoaR = fop.response(pg.RVector(model)[paraMesh.cellMarkers()])
    pg.toc()
    print('#' * 100)

    # add some noise here
    rand = pg.RVector(len(rhoaR))
    pg.randn(rand)

    rhoaR *= (1.0 + rand * fop.ws.derr.flatten())
    fop.ws.rhoaR = rhoaR.reshape(fop.ws.derr.shape)

    # fop.ws.mesh.save(synthPath + 'synth.bms')
    np.save(synthPath + 'synthK', fop.ws.k)
    np.save(synthPath + 'synthVel', fop.ws.vel)
    np.save(synthPath + 'synthSat', fop.ws.sat)

    fop.ws.scheme.save(synthPath + 'synth.shm', 'a b m n')
    np.save(synthPath + 'synthRhoaRatio', fop.ws.rhoaR)
    np.save(synthPath + 'synthRhoa', fop.ws.rhoa)
    np.save(synthPath + 'synthErr', fop.ws.derr)