Exemple #1
0
def createERTData(elecs, schemeName='none', **kwargs):
    """ Simple data creator for compatibility (advanced version in BERT).

    Parameters
    ----------
    sounding : bool [False]
        Create a 1D VES Schlumberger configuration.
        elecs need to be an array with elecs[0] = mn/2 and elecs[1:] = ab/2.

    """
    if kwargs.pop('sounding', False):
        data = pg.DataContainerERT()
        data.setSensors(pg.cat(-elecs[::-1], elecs))

        nElecs = len(elecs)
        for i in range(nElecs - 1):
            data.createFourPointData(i, i, 2 * nElecs - i - 1, nElecs - 1,
                                     nElecs)

        return data

    if schemeName != "dd":
        import pybert as pb  # that's bad!!! TODO: remove pybert deps
        return pb.createData(elecs, schemeName, **kwargs)

    isClosed = kwargs.pop('closed', False)

    data = pg.DataContainerERT()
    data.setSensors(elecs)

    nElecs = len(elecs)
    a = []
    b = []
    m = []
    n = []
    eb = 0
    for i in range(nElecs):
        for j in range(eb + 2, nElecs):
            ea = i
            eb = ea + 1
            em = j
            en = em + 1

            if isClosed:
                en = en % nElecs

            if en < nElecs and en != ea:
                a.append(ea)
                b.append(eb)
                m.append(em)
                n.append(en)

    data.resize(len(a))
    data.add('a', a)
    data.add('b', b)
    data.add('m', m)
    data.add('n', n)
    data.set('valid', np.ones(len(a)))

    return data
Exemple #2
0
def main(inv_par, project_conf, bw_surface, max_dist=1.0):
    a = bw_surface._bs_surface.eval(0, 0)
    b = bw_surface._bs_surface.eval(1, 0)
    c = bw_surface._bs_surface.eval(0, 1)

    a[2] = 0
    b[2] = 0
    c[2] = 0

    ab = b - a
    ac = c - a
    tr = np.array([ab, ac, [0, 0, 1]]).T
    inv_tr = np.linalg.inv(tr)

    if project_conf.method == GenieMethod.ERT:
        data = pg.DataContainerERT("input.dat", removeInvalid=False)
    else:
        data = pg.DataContainer("input.dat",
                                sensorTokens='s g',
                                removeInvalid=False)

    for i in range(len(data.sensorPositions())):
        pos = data.sensorPosition(i)
        pos = np.array([pos[0], pos[1], pos[2]])
        pos_l = inv_tr @ (pos - a)

        try:
            new_pos = bw_surface._bs_surface.eval(pos_l[0], pos_l[1])
        except IndexError:
            pass
        else:
            if np.linalg.norm(new_pos - pos) <= max_dist:
                data.setSensorPosition(i, new_pos)

    data.save("input_snapped.dat")
Exemple #3
0
def mesh_from_brep(brep_file, mesh_file, project_conf, inv_par):
    if project_conf.method == GenieMethod.ERT:
        data = pg.DataContainerERT("input_snapped.dat", removeInvalid=False)
    else:
        data = pg.DataContainer("input_snapped.dat",
                                sensorTokens='s g',
                                removeInvalid=False)

    el_pos = []
    for i in range(len(data.sensorPositions())):
        pos = data.sensorPosition(i)
        pos = np.array([pos[0], pos[1], pos[2]])
        el_pos.append(pos)

    model = gmsh.GeometryOCC("model_name")
    compound = model.import_shapes(brep_file, highestDimOnly=False)
    points = [model.point(pos).tags[0] for pos in el_pos]
    dist = field.distance_nodes(points)
    f_distance = field.threshold(dist,
                                 lower_bound=(inv_par.elementSize_d,
                                              inv_par.elementSize_d),
                                 upper_bound=(inv_par.elementSize_D,
                                              inv_par.elementSize_H))
    model.set_mesh_step_field(f_distance)
    model.mesh_options.CharacteristicLengthMin = 0.1
    model.mesh_options.CharacteristicLengthMax = 100
    model.make_mesh([compound])
    model.write_mesh(mesh_file, gmsh.MeshFormat.msh2)
Exemple #4
0
def createDataVES(ab2, mn2):
    """ Utility one-liner to create a BERT datafile for Schlumberger 1D VES

    Parameters
    ----------
    ab2: array
        Half distance between current electrodes

    mn2: float
        Half distance between measurement electrodes

    Returns
    -------
    data : DataContainerERT
    """

    data = pg.DataContainerERT()

    if type(mn2) is float or type(mn2) is int:
        mn2 = [mn2]

    count = 0
    for mn in mn2:
        emID = data.createSensor([-mn, 0.0, 0.0])
        enID = data.createSensor([mn, 0.0, 0.0])

        for x in ab2:
            eaID = data.createSensor([-x, 0.0, 0.0])
            ebID = data.createSensor([x, 0.0, 0.0])
            data.createFourPointData(count, eaID, ebID, emID, enID)
            count += 1

    data.fitFillSize()

    return data
Exemple #5
0
    def restore(self):
        """Read data from json infos"""
        if os.path.exists(self._name + '.json'):

            # Fricking mpl kills locale setting to system default .. this went
            # horrible wrong for german 'decimal_point': ','
            pg.checkAndFixLocaleDecimal_point(verbose=False)

            try:
                with open(self._name + '.json') as file:
                    self.info = json.load(file)

                # if len(self.info['type']) != 1:
                #     pg.error('only single return caches supported for now.')

                #pg._y(pg.pf(self.info))
                
                if self.info['type'] == 'DataContainerERT':
                    self._value = pg.DataContainerERT(self.info['file'],
                                                      removeInvalid=False)
                    # print(self._value)
                elif self.info['type'] == 'RVector':
                    self._value = pg.Vector()
                    self._value.load(self.info['file'], format=pg.core.Binary)
                elif self.info['type'] == 'Mesh':
                    pg.tic()
                    self._value = pg.Mesh()
                    self._value.loadBinaryV2(self.info['file'] + '.bms')
                    pg.debug("Restoring cache took:", pg.dur(), "s")
                elif self.info['type'] == 'ndarray':
                    self._value = np.load(self.info['file'] + '.npy',
                                          allow_pickle=True)
                elif self.info['type'] == 'Cm05Matrix':
                    self._value = pg.matrix.Cm05Matrix(self.info['file'])
                elif self.info['type'] == 'GeostatisticConstraintsMatrix':
                    self._value = pg.matrix.GeostatisticConstraintsMatrix(
                                                            self.info['file'])
                else:
                    self._value = np.load(self.info['file'] + '.npy',
                                          allow_pickle=True)

                if self.value is not None:
                    self.info['restored'] = self.info['restored'] + 1
                    self.updateCacheInfo()
                    pg.info('Cache {3} restored ({1}s x {0}): {2}'.\
                        format(self.info['restored'],
                               round(self.info['dur'], 1),
                               self._name, self.info['codeinfo']))
                else:
                    # default try numpy
                    pg.warn('Could not restore cache of type {0}.'.format(self.info['type']))

                pg.debug("Restoring cache took:", pg.dur(), "s")
            except Exception as e:
                import traceback
                traceback.print_exc(file=sys.stdout)
                print(self.info)
                pg.error('Cache restoring failed.')
Exemple #6
0
def prepare_old(electrode_groups, measurements):
    """
    Prepares data for GIMLI inversion.
    :param electrode_groups:
    :param measurements:
    :return:
    """
    el_offset = 0
    electrodes = []
    a = pd.Series()
    b = pd.Series()
    m = pd.Series()
    n = pd.Series()
    i = pd.Series()
    u = pd.Series()
    err = pd.Series()
    rhoa = pd.Series()

    for ms in measurements:
        if ms.data is None:
            continue
        d = ms.data["data"]

        for e_id in range(ms.el_start, ms.el_stop+1):
            e = _find_el(electrode_groups, e_id)
            if e is None:
                print("chyba")
            electrodes.append(e)

        a = a.append(d["ca"] + el_offset, ignore_index=True)
        b = b.append(d["cb"] + el_offset, ignore_index=True)
        m = m.append(d["pa"] + el_offset, ignore_index=True)
        n = n.append(d["pb"] + el_offset, ignore_index=True)
        i = i.append(d["I"], ignore_index=True)
        u = u.append(d["V"], ignore_index=True)
        err = err.append(d["std"], ignore_index=True)
        rhoa = rhoa.append(d["AppRes"], ignore_index=True)

        el_offset += ms.el_stop - ms.el_start + 1

    data = pg.DataContainerERT()
    for e in electrodes:
        data.createSensor([e.x, e.y, e.z])
    data.resize(len(a))
    data.set('a', a)
    data.set('b', b)
    data.set('m', m)
    data.set('n', n)
    data.set('i', i)
    data.set('u', u)
    data.set('err', err)
    data.set('rhoa', rhoa)
    data.markValid(data('rhoa') > 0)
    return data
Exemple #7
0
    def checkData(self, data=None):
        """Return data from container.

        THINKABOUT: Data will be changed, or should the manager keep a copy?
        """
        data = data or pg.DataContainerERT(self.data)
        if isinstance(data, pg.DataContainer):
            if not data.allNonZero('k'):
                pg.warn("Data file contains no geometric factors (token='k').")
                data['k'] = createGeometricFactors(data, verbose=True)
            if self.fop.complex():
                if not data.haveData('rhoa'):
                    pg.critical('Datacontainer have no "rhoa" values.')
                if not data.haveData('ip'):
                    pg.critical('Datacontainer have no "ip" values.')

                # pg.warn('check sign of phases')
                rhoa = data['rhoa']
                phia = -data['ip'] / 1000  # 'ip' is defined for neg mrad.
                # we should think about some 'phia' in rad

                return pg.utils.squeezeComplex(pg.utils.toComplex(rhoa, phia))

            else:
                if not data.haveData('rhoa'):
                    if data.allNonZero('r'):
                        pg.info("Creating apparent resistivies from "
                                "impedences rhoa = r * k")
                        data['rhoa'] = data['r'] * data['k']
                    elif data.allNonZero('u') and data.allNonZero('i'):
                        pg.info("Creating apparent resistivies from "
                                "voltage and currrent rhoa = u/i * k")
                        data['rhoa'] = data['u'] / data['i'] * data['k']
                    else:
                        pg.critical("Datacontainer have neither: "
                                    "apparent resistivies 'rhoa', "
                                    "or impedances 'r', "
                                    "or voltage 'u' along with current 'i'.")

                if any(data['rhoa'] < 0) and \
                        isinstance(self.inv.dataTrans, pg.core.TransLog):
                    print(pg.find(data['rhoa'] < 0))
                    print(data['rhoa'][data['rhoa'] < 0])
                    pg.critical("Found negative apparent resistivities. "
                                "These can't be processed with logarithmic "
                                "data transformation. You should consider to "
                                "filter them out using "
                                "data.remove(data['rhoa'] < 0).")

                return data['rhoa']

        return data
Exemple #8
0
def createERTData(elecs, schemeName='none', **kwargs):
    """ Simple data creator for compatibility (advanced version in BERT)."""
    if schemeName is not "dd":
        import pybert as pb  # that's bad!!! TODO: remove pybert deps
        return pb.createData(elecs, schemeName, **kwargs)

    if isinstance(elecs, pg.RVector):
        sPos = []
        for e in elecs:
            sPos.append(pg.RVector3(e, 0., 0.))
        elecs = sPos

    isClosed = kwargs.pop('closed', False)

    data = pg.DataContainerERT()
    data.registerSensorIndex('a')
    data.registerSensorIndex('b')
    data.registerSensorIndex('m')
    data.registerSensorIndex('n')
    data.setSensorPositions(elecs)
    nElecs = len(elecs)
    a = []
    b = []
    m = []
    n = []
    eb = 0
    for i in range(nElecs):
        for j in range(eb + 2, nElecs):
            ea = i
            eb = ea + 1
            em = j
            en = em + 1

            if isClosed:
                en = en % nElecs

            if en < nElecs and en != ea:
                a.append(ea)
                b.append(eb)
                m.append(em)
                n.append(en)

    data.resize(len(a))
    data.add('a', a)
    data.add('b', b)
    data.add('m', m)
    data.add('n', n)
    data.set('valid', np.ones(len(a)))

    return data
Exemple #9
0
    def test_HashData(self):
        d1 = pg.DataContainerERT()
        d2 = pg.DataContainerERT()

        self.assertEqual(d1.hash(), d2.hash())
        d1.createSensor([1.0, 0.0])
        d2.createSensor([2.0, 0.0])
        self.assertFalse(d1.hash() == d2.hash())
        d2.setSensor(0, [1.0, 0.0])
        self.assertTrue(d1.hash() == d2.hash())

        d1.resize(10)
        d2.resize(12)
        d1.add('a', pg.Vector(d1.size(), 1.0))
        d2.add('a', pg.Vector(d2.size(), 1.0))
        self.assertFalse(d1.hash() == d2.hash())

        d2.resize(10)
        self.assertTrue(d1.hash() == d2.hash())
        d2('a')[3] = 2.0
        self.assertFalse(d1.hash() == d2.hash())
        d2('a')[3] = 1.0
        self.assertTrue(d1.hash() == d2.hash())
Exemple #10
0
def createMesh(geom, topo, sfile, Q):

    scheme = pg.DataContainerERT()
    scheme.setSensorPositions(topo)

    srv = pd.read_csv(sfile, sep='\t', header=None)
    for i, elec in enumerate("abmn"):
        scheme[elec] = srv.values[:, i + 1].astype(np.int) - 1

    for p in scheme.sensors():
        geom.createNode(p)
        geom.createNode(p - [0, 0.1])

    mesh = mt.createMesh(geom, quality=Q)

    return scheme, mesh
Exemple #11
0
    def export_to_pygimli_scheme(self, norrec='nor', timestep=None):
        """Export the data into a pygimili.DataContainerERT object.

        For now, do NOT set any sensor positions

        Parameters
        ----------


        Returns
        -------
        """
        logger.info('Exporting to pygimli DataContainer')
        logger.info('{} data will be exported'.format(norrec))
        if timestep is None:
            logger.info('No timestep selection is applied')
        else:
            logger.info('timestep(s) {} will be used'.format(timestep))

        import pygimli as pg
        data_container = pg.DataContainerERT()

        query = ' '.join((
            'norrec == "{}"'.format(norrec),
        ))

        if timestep is not None:
            query += ' and timestep=="{}"'.format(timestep)

        logger.debug('Query: {}'.format(query))

        subdata = self.data.query(query)
        assert subdata.shape[0] != 0

        data_container['a'] = subdata['a']
        data_container['b'] = subdata['b']
        data_container['m'] = subdata['m']
        data_container['n'] = subdata['n']
        data_container['r'] = subdata['r']

        if 'k' in subdata.columns:
            data_container['k'] = subdata['k']

        if 'rho_a' in subdata.columns:
            data_container['rhoa'] = subdata['rho_a']

        return data_container
Exemple #12
0
    def createElectrodes(self,
                         nElectrodes=24,
                         electrodeSpacing=1,
                         sensorList=None):
        self.data_ = pg.DataContainerERT()

        if sensorList is not None:
            for p in sensorList:
                if isinstance(p, float):
                    self.data_.createSensor((p, 0.))
                else:
                    self.data_.createSensor(p)
        else:
            for i in range(nElectrodes):
                self.data_.createSensor(
                    pg.Pos(float(i) * electrodeSpacing, 0.0))

        self.nElectrodes_ = self.data_.sensorCount()
    def create_data_containerERT(self, measurements=numpy.array([[0, 1, 2, 3], ]),  # Dipole-Dipole
                                 scheme_type="abmn", verbose=False):
        """
        creates the scheme from the previous 4 aruco markers detected
        Args:
            measurements: Dipole-Dipole
            scheme_type: assign the type of electrode to the aruco
            verbose:

        Returns:

        """
        scheme = pg.DataContainerERT()
        scheme.setSensorPositions(self.electrode)
        for i, elec in enumerate(scheme_type):
            scheme[elec] = measurements[:, i]
        scheme["k"] = ert.createGeometricFactors(scheme, verbose=verbose)
        self.scheme = scheme
        return self.scheme
Exemple #14
0
    def create(self,
               nElectrodes=24,
               electrodeSpacing=1,
               sensorList=None,
               **kwargs):
        """
        """
        self.createElectrodes(nElectrodes, electrodeSpacing, sensorList)
        self.createData(**kwargs)

        if self.addInverse_:
            out = pg.DataContainerERT(self.data_)
            self.setInverse(not self.inverse_)
            self.createData(**kwargs)
            self.data_.add(out)
            self.data_.removeInvalid()
            self.data_.sortSensorsIndex()

        if kwargs.values():
            print("Warning! DataSchemeBase::create has unhandled arguments")
            print(kwargs)

        return self.data_
Exemple #15
0
 def test_Int64Problem(self):
     data = pg.DataContainerERT()
     pos = np.arange(4, dtype=np.int)
     data.createFourPointData(0, pos[0], pos[1], pos[2], pos[3])
     pos = np.arange(4, dtype=np.int32)
     data.createFourPointData(1, pos[0], pos[1], pos[2], pos[3])
     pos = np.arange(4, dtype=np.int64)
     data.createFourPointData(2, pos[0], pos[1], pos[2], pos[3])
     pos = np.arange(4, dtype=np.float)
     data.createFourPointData(3, pos[0], pos[1], pos[2], pos[3])
     pos = np.arange(4, dtype=np.float32)
     data.createFourPointData(4, pos[0], pos[1], pos[2], pos[3])
     pos = np.arange(4, dtype=np.float64)
     data.createFourPointData(5, pos[0], pos[1], pos[2], pos[3])
     pos = np.arange(4)
     data.createFourPointData(6, pos[0], pos[1], pos[2], pos[3])
     pos = range(4)
     data.addFourPointData(pos[0], pos[1], pos[2], pos[3])
     #print(data('a'), data('b'), data('m'), data('n'))
     self.assertEqual(sum(data('a')), 8 * 0)
     self.assertEqual(sum(data('b')), 8 * 1)
     self.assertEqual(sum(data('m')), 8 * 2)
     self.assertEqual(sum(data('n')), 8 * 3)
Exemple #16
0
def createData(elecs, schemeName='none', **kwargs):
    """ Utility one-liner to create a BERT datafile

    Parameters
    ----------
    elecs : int | list[pos] | array(x)
        Number of electrodes or electrode positions or x-positions

    schemeName : str ['none']
        Name of the configuration. If you provide an unknown scheme name, all
        known schemes ['wa', 'wb', 'pp', 'pd', 'dd', 'slm', 'hw', 'gr'] listed.

    **kwargs :

        Arguments that will be forwarded to the scheme generator.

        * inverse : bool
            interchange AB MN with MN AB
        * reciprocity : bool
            interchange AB MN with BA NM
        * addInverse : bool
            add additional inverse measurements
        * spacing : float [1]
            electrode spacing in meters
        * closed : bool
            Close the chain. Measure from the end of the array to the first
            electrode.

    Returns
    -------
    data : DataContainerERT

    Examples
    --------
    >>> import matplotlib.pyplot as plt
    >>> from pygimli.physics import ert
    >>>
    >>> schemes = ['wa', 'wb', 'pp', 'pd', 'dd', 'slm', 'hw', 'gr']
    >>> fig, ax = plt.subplots(3,3)
    >>>
    >>> for i, schemeName in enumerate(schemes):
    ...     s = ert.createData(elecs=41, schemeName=schemeName)
    ...     k = ert.geometricFactors(s)
    ...     _ = ert.show(s, vals=k, ax=ax.flat[i], label='k - ' + schemeName)
    >>>
    >>> plt.show()
    """
    if kwargs.pop('sounding', False):
        data = pg.DataContainerERT()
        data.setSensors(pg.cat(-elecs[::-1], elecs))

        nElecs = len(elecs)
        for i in range(nElecs - 1):
            data.createFourPointData(i, i, 2 * nElecs - i - 1, nElecs - 1,
                                     nElecs)

        return data

    mg = DataSchemeManager()

    if schemeName == "none":
        pg.error('argument "schemeName" not set. Valid schemeNames are:')
        for i in mg.schemes():
            print(i, "scheme: " + mg.scheme(i).prefix)

    scheme = mg.scheme(schemeName)

    scheme.setInverse(kwargs.pop('inverse', False))
    scheme.addInverse(kwargs.pop('addInverse', False))
    scheme._closed = kwargs.pop('closed', False)

    if isinstance(elecs, int):
        data = scheme.create(nElectrodes=elecs,
                             electrodeSpacing=kwargs.pop('spacing', 1),
                             **kwargs)
    elif hasattr(elecs, '__iter__'):
        if isinstance(elecs[0], float) or isinstance(elecs[0], int):
            data = scheme.create(nElectrodes=len(elecs), **kwargs)
            data.setSensors(elecs)
        else:
            data = scheme.create(sensorList=elecs, **kwargs)
    else:
        print(elecs)
        pg.critical("Can't interpret elecs")

    return data
Exemple #17
0
def importRes2dInv(filename, verbose=False, return_header=False):
    """Read res2dinv format

    Parameters
    ----------
    filename : str
    verbose : bool [False]
    return_header : bool [False]

    Returns
    -------
    pg.DataContainerERT and (in case of return_header=True)
    header dictionary

    Format
    ------
        str - title
        float - unit spacing [m]
        int - Array Number (1-Wenner, 3-Dipole-dipole atm only)
        int - Number of Datapoints
        float - x-location given in terms of first electrode
                use 1 if mid-point location is given
        int - 0 for no IP, use 1 if IP present
        str - Phase Angle  if IP present
        str - mrad if IP present
        0,90.0 - if IP present
        dataBody
    """

    def getNonEmptyRow(i, comment='#'):
        s = next(i)
        while s[0] is comment:
            s = next(i)
        return s.split('\r\n')[0]
    # def getNonEmptyRow(...)

    with open(filename, 'r') as fi:
        content = fi.readlines()

    it = iter(content)
    header = {}
    header['name'] = getNonEmptyRow(it, comment=';')
    header['spacing'] = float(getNonEmptyRow(it, comment=';'))
    typrow = getNonEmptyRow(it, comment=';')
    typ = int(typrow.rstrip('\n').rstrip('R').rstrip('L'))

    if typ == 11:
        # independent electrode positions
        header['subtype'] = int(getNonEmptyRow(it, comment=';'))
        header['dummy'] = getNonEmptyRow(it, comment=';')
        isR = int(getNonEmptyRow(it, comment=';'))

    nData = int(getNonEmptyRow(it, comment=';'))
    xLoc = float(getNonEmptyRow(it, comment=';'))
    hasIP = int(getNonEmptyRow(it, comment=';'))

    if hasIP:
        header['ipQuantity'] = getNonEmptyRow(it, comment=';')
        header['ipUnit'] = getNonEmptyRow(it, comment=';')
        header['ipData'] = getNonEmptyRow(it, comment=';')
        ipline = header['ipData'].rstrip('\n').rstrip('\r').split(' ')
        if len(ipline) > 2:  # obviously spectral data?
            header['ipNumGates'] = int(ipline[0])
            header['ipDelay'] = float(ipline[1])
            header['onTime'] = float(ipline[-2])
            header['offTime'] = float(ipline[-1])
            header['ipDT'] = np.array(ipline[2:-2], dtype=float)
            header['ipGateT'] = np.cumsum(np.hstack((header['ipDelay'],
                                                     header['ipDT'])))

    data = pg.DataContainerERT()
    data.resize(nData)

    if typ == 9 or typ == 10:
        raise Exception("Don't know how to read:" + str(typ))

    if typ == 11 or typ == 12 or typ == 13:  # mixed array

        res = pg.Vector(nData, 0.0)
        ip = pg.Vector(nData, 0.0)
        specIP = []

        for i in range(nData):
            vals = getNonEmptyRow(it, comment=';').replace(',', ' ').split()

            # row starts with 4
            if int(vals[0]) == 4:
                eaID = data.createSensor(pg.Pos(float(vals[1]),
                                                float(vals[2])))
                ebID = data.createSensor(pg.Pos(float(vals[3]),
                                                float(vals[4])))
                emID = data.createSensor(pg.Pos(float(vals[5]),
                                                float(vals[6])))
                enID = data.createSensor(pg.Pos(float(vals[7]),
                                                float(vals[8])))
            elif int(vals[0]) == 3:
                eaID = data.createSensor(pg.Pos(float(vals[1]),
                                                float(vals[2])))
                ebID = -1
                emID = data.createSensor(pg.Pos(float(vals[3]),
                                                float(vals[4])))
                enID = data.createSensor(pg.Pos(float(vals[5]),
                                                float(vals[6])))
            elif int(vals[0]) == 2:
                eaID = data.createSensor(pg.Pos(float(vals[1]),
                                                float(vals[2])))
                ebID = -1
                emID = data.createSensor(pg.Pos(float(vals[3]),
                                                float(vals[4])))
                enID = -1
            else:
                raise Exception('dont know how to handle row', vals[0])
            res[i] = float(vals[int(vals[0])*2+1])
            if hasIP:
                # ip[i] = float(vals[int(vals[0])*2+2])
                ipCol = int(vals[0])*2+2
                ip[i] = float(vals[ipCol])
                if 'ipNumGates' in header:
                    specIP.append(vals[ipCol:])

            data.createFourPointData(i, eaID, ebID, emID, enID)

        if isR:
            data.set('r', res)
        else:
            data.set('rhoa', res)

        if hasIP:
            data.set('ip', ip)
            if 'ipNumGates' in header:
                A = np.array(specIP, dtype=float)
                A[A > 1000] = -999
                A[A < -1000] = -999
                for i in range(header['ipNumGates']):
                    data.set('ip'+str(i+1), A[:, i])

        data.sortSensorsX()
        data.sortSensorsIndex()
        if return_header:
            return data, header
        else:
            return data

    # amount of values per collumn per typ
    nntyp = [0, 3, 3, 4, 3, 3, 4, 4, 3, 0, 0, 8, 10]

    nn = nntyp[typ] + hasIP

    # dataBody = pg.Matrix(nn, nData)
    dataBody = np.zeros((nn, nData))

    for i in range(nData):
        vals = getNonEmptyRow(it, comment=';').replace(',', ' ').split()
        dataBody[:, i] = np.array(vals, dtype=float)
#        for j in range(nn):
#            dataBody[j][i] = float(vals[j])

    XX = dataBody[0]
    EL = dataBody[1]
    SP = pg.Vector(nData, 1.0)

    if nn - hasIP == 4:
        SP = dataBody[2]

    AA = None
    BB = None
    NN = None
    MM = None

    if typ == 1:  # Wenner
        AA = XX - xLoc * EL * 1.5
        MM = AA + EL
        NN = MM + EL
        BB = NN + EL
    elif typ == 2:  # Pole-Pole
        AA = XX - xLoc * EL * 0.5
        MM = AA + EL
    elif typ == 3:  # Dipole-Dipole
        AA = XX - xLoc * EL * (SP / 2. + 1.)
        BB = AA + EL
        MM = BB + SP * EL
        NN = MM + EL
        pass
    elif typ == 3:  # Dipole-Dipole
        AA = XX - xLoc * EL * (SP / 2. + 1.)
        BB = AA + EL
        MM = BB + SP * EL
        NN = MM + EL
    elif typ == 4:  # WENNER-BETA
        AA = XX - xLoc * EL * 1.5
        BB = AA + EL
        MM = BB + EL
        NN = MM + EL
    elif typ == 5:  # WENNER-GAMMA
        AA = XX - xLoc * EL * 1.5
        MM = AA + EL
        BB = MM + EL
        NN = BB + EL
    elif typ == 6:  # POLE-DIPOLE
        AA = XX - xLoc * SP * EL - (SP - 1.) * (SP < 0.) * EL
        MM = AA + SP * EL
        NN = MM + pg.sign(SP) * EL
    elif typ == 7:  # SCHLUMBERGER
        AA = XX - xLoc * EL * (SP + 0.5)
        MM = AA + SP * EL
        NN = MM + EL
        BB = NN + SP * EL
    else:
        raise Exception('Datatype ' + str(typ) + ' not yet suppoted')

    for i in range(len(AA)):

        if AA is not None:
            eaID = data.createSensor(pg.Pos(AA[i], 0.0))
        else:
            eaID = -1

        if BB is not None:
            ebID = data.createSensor(pg.Pos(BB[i], 0.0))
        else:
            ebID = -1

        if MM is not None:
            emID = data.createSensor(pg.Pos(MM[i], 0.0))
        else:
            emID = -1

        if NN is not None:
            enID = data.createSensor(pg.Pos(NN[i], 0.0))
        else:
            enID = -1

        data.createFourPointData(i, eaID, ebID, emID, enID)

    data.set('rhoa', dataBody[nn - hasIP - 1])
    if hasIP:
        data.set('ip', dataBody[nn - 1])

    data.sortSensorsX()
    if return_header:
        return data, header
    else:
        return data
Exemple #18
0
def prepare(electrode_groups, measurements, mesh_cut_tool_param=None, masked_meas_lines=None):
    """
    Prepares data for GIMLI inversion.
    :param electrode_groups:
    :param measurements:
    :return:
    """
    #el_offset = 0
    electrodes = []
    sensor_ids = []
    a = pd.Series()
    b = pd.Series()
    m = pd.Series()
    n = pd.Series()
    i = pd.Series()
    u = pd.Series()
    err = pd.Series()
    rhoa = pd.Series()

    meas_info = MeasurementsInfo()

    data = pg.DataContainerERT()

    if mesh_cut_tool_param is not None:
        base_point, gen_vecs = cut_point_cloud.cut_tool_to_gen_vecs(mesh_cut_tool_param, only_inv=True)
        inv_tr_mat = cut_point_cloud.inv_tr(gen_vecs)

    for ms in measurements:
        if ms.data is None:
            continue
        d = ms.data["data"]

        ind_to_rem = set()

        # remove masked lines from measurements
        if masked_meas_lines is not None:
            if ms.number in masked_meas_lines:
                ind_to_rem.update({i for i, b in enumerate(masked_meas_lines[ms.number][:d.shape[0]]) if b})

        # remove measurements outside inversion region
        if mesh_cut_tool_param is not None:
            for j in range(d.shape[0]):
                for col in ["ca", "cb", "pa", "pb"]:
                    e = _find_el(electrode_groups, ms.meas_map[d[col][j]])
                    nl = cut_point_cloud.tr_to_local(base_point, inv_tr_mat, np.array([e.x, e.y, e.z]))
                    if not (0 <= nl[0] <= 1 and 0 <= nl[1] <= 1 and 0 <= nl[2] <= 1):
                        ind_to_rem.add(j)
                        break

        d = d.drop(d.index[list(ind_to_rem)])

        meas_map_sensor = {}
        for meas_id, e_id in ms.meas_map.items():
            # todo: prepsat nejak inteligentneji
            if (meas_id not in [v for v in d["ca"]]) and (meas_id not in [v for v in d["cb"]]) and (meas_id not in [v for v in d["pa"]]) and (meas_id not in [v for v in d["pb"]]):
                continue

            ind = -1
            for j, e in enumerate(electrodes):
                if e.id == e_id:
                    ind = j
                    break
            if ind < 0:
                e = _find_el(electrode_groups, e_id)
                if e is None:
                    print("chyba")
                ind = len(electrodes)
                electrodes.append(e)
                s_id = data.createSensor([e.x, e.y, e.z])
                sensor_ids.append(s_id)
            meas_map_sensor[meas_id] = sensor_ids[ind]

        a = a.append(pd.Series([meas_map_sensor[v] for v in d["ca"]]), ignore_index=True)
        b = b.append(pd.Series([meas_map_sensor[v] for v in d["cb"]]), ignore_index=True)
        m = m.append(pd.Series([meas_map_sensor[v] for v in d["pa"]]), ignore_index=True)
        n = n.append(pd.Series([meas_map_sensor[v] for v in d["pb"]]), ignore_index=True)
        i = i.append(d["I"], ignore_index=True)
        u = u.append(d["V"], ignore_index=True)
        err = err.append(d["std"], ignore_index=True)
        rhoa = rhoa.append(d["AppRes"], ignore_index=True)

        #el_offset += len(ms.meas_map)

        for j in d.index:
            meas_info.items.append(MeasurementInfoItem(measurement_number=ms.number,
                                                       ca=d["ca"][j], cb=d["cb"][j], pa=d["pa"][j], pb=d["pb"][j],
                                                       I=d["I"][j], V=d["V"][j], AppRes=d["AppRes"][j], std=d["std"][j],
                                                       inv_ca=meas_map_sensor[d["ca"][j]], inv_cb=meas_map_sensor[d["cb"][j]], inv_pa=meas_map_sensor[d["pa"][j]], inv_pb=meas_map_sensor[d["pb"][j]]))

    data.resize(len(a))
    data.set('a', a)
    data.set('b', b)
    data.set('m', m)
    data.set('n', n)
    data.set('i', i)
    data.set('u', u)
    data.set('err', err)
    data.set('rhoa', rhoa)
    #data.markValid(data('rhoa') > 0)
    # zakomentovano kvuli analyse_measurement_dialog.py

    return data, meas_info
Exemple #19
0
def prepare_old2(electrode_groups, measurements):
    """
    Prepares data for GIMLI inversion.
    :param electrode_groups:
    :param measurements:
    :return:
    """
    #el_offset = 0
    electrodes = []
    a = pd.Series()
    b = pd.Series()
    m = pd.Series()
    n = pd.Series()
    i = pd.Series()
    u = pd.Series()
    err = pd.Series()
    rhoa = pd.Series()

    for ms in measurements:
        if ms.data is None:
            continue
        d = ms.data["data"]

        meas_map_sensor = {}
        for meas_id, e_id in ms.meas_map.items():
            ind = -1
            for j, e in enumerate(electrodes):
                if e.id == e_id:
                    ind = j
                    break
            if ind < 0:
                e = _find_el(electrode_groups, e_id)
                if e is None:
                    print("chyba")
                ind = len(electrodes)
                electrodes.append(e)
            meas_map_sensor[meas_id] = ind

        a = a.append(pd.Series([meas_map_sensor[v] for v in d["ca"]]), ignore_index=True)
        b = b.append(pd.Series([meas_map_sensor[v] for v in d["cb"]]), ignore_index=True)
        m = m.append(pd.Series([meas_map_sensor[v] for v in d["pa"]]), ignore_index=True)
        n = n.append(pd.Series([meas_map_sensor[v] for v in d["pb"]]), ignore_index=True)
        i = i.append(d["I"], ignore_index=True)
        u = u.append(d["V"], ignore_index=True)
        err = err.append(d["std"], ignore_index=True)
        rhoa = rhoa.append(d["AppRes"], ignore_index=True)

        #el_offset += len(ms.meas_map)

    data = pg.DataContainerERT()
    for e in electrodes:
        data.createSensor([e.x, e.y, e.z])
    data.resize(len(a))
    data.set('a', a)
    data.set('b', b)
    data.set('m', m)
    data.set('n', n)
    data.set('i', i)
    data.set('u', u)
    data.set('err', err)
    data.set('rhoa', rhoa)
    data.markValid(data('rhoa') > 0)
    return data
Exemple #20
0
def inv_ert(inversion_conf, project_conf):
    inv_par = inversion_conf.inversion_param
    cut_par = inversion_conf.mesh_cut_tool_param

    remove_old_files()

    ret, bw_surface = prepare(cut_par, inv_par, project_conf)
    if not ret:
        return
    #return

    # snap electrodes
    print()
    print_headline("Snapping electrodes")
    if inv_par.meshFrom == MeshFrom.SURFACE_CLOUD:
        snap_surf.main(inv_par,
                       project_conf,
                       bw_surface,
                       max_dist=inv_par.snapDistance)
    else:
        snap_electrodes.main(inv_par,
                             project_conf,
                             max_dist=inv_par.snapDistance)

    #ball_mesh("inv_mesh.msh", "inv_mesh2.msh", [-622342, -1128822, 22], 5.0)
    #return

    print()
    print_headline("Creating inversion mesh")
    mesh_from_brep("inv_mesh_tmp.brep", "inv_mesh_tmp.msh2", project_conf,
                   inv_par)

    print()
    print_headline("Modify mesh")
    modify_mesh("inv_mesh_tmp.msh2", "inv_mesh.msh", cut_par)

    #if inv_par.meshFrom == MeshFrom.SURFACE_CLOUD:
    print()
    print_headline("Snapping electrodes final")
    snap_electrodes.main(inv_par,
                         project_conf,
                         max_dist=inv_par.snapDistance,
                         final=True)

    print()
    print_headline("Inversion")

    # res = pb.Resistivity("input.dat")
    # res.invert()
    # np.savetxt('resistivity.vector', res.resistivity)
    # return

    # load data file
    data = pg.DataContainerERT("input_snapped.dat", removeInvalid=False)
    #data = pg.DataContainerERT("ldp2.dat")
    #print(data.size())
    #print(data("a"))
    #print(data.sensorIdx())
    #return

    # mark all data valid
    #data.markValid(data('rhoa') > 0)
    #data.markValid(data('rhoa') <= 0)
    #data.markValid(data('u') > 0)

    # k, rhoa
    #inv_par.k_ones = True
    if inv_par.k_ones:
        data.set("k", np.ones(data.size()))
    else:
        data.set("k", misc.geometricFactors(data))
    #data.set("err", pb.Resistivity.estimateError(data, absoluteUError=0.0001, relativeError=0.03))
    #data.set("k", np.ones(data.size()))
    #data.set("k", misc.geometricFactors(data))
    data.set("rhoa", data("u") / data("i") * data("k"))
    tolerance = 1e-12
    #data.markValid(np.abs(data('rhoa')) > tolerance)
    data.markValid(data('rhoa') > tolerance)
    data.markInvalid(data('rhoa') <= tolerance)  # udelat poradne

    # remove invalid data
    oldsize = data.size()
    data.removeInvalid()
    newsize = data.size()
    if newsize < oldsize:
        print('Removed ' + str(oldsize - newsize) + ' values.')
    if not data.allNonZero('rhoa'):
        print("No or partial rhoa values.")
        return

    # check, compute error
    # if data.allNonZero('err'):
    #     error = data('err')
    # else:
    #     print("estimate data error")
    #     error = inv_par.relativeError + inv_par.absoluteError / data('rhoa')
    error = data('err')
    min_err = 0.0005
    for i in range(data.size()):
        if error[i] < min_err:
            error[i] = min_err

    # create FOP
    fop = pg.core.DCSRMultiElectrodeModelling(verbose=inv_par.verbose)
    fop.setThreadCount(psutil.cpu_count(logical=False))
    fop.setData(data)

    # create Inv
    inv = pg.core.RInversion(verbose=inv_par.verbose, dosave=False)
    # variables tD, tM are needed to prevent destruct objects
    tM = pg.core.RTransLogLU(inv_par.minModel, inv_par.maxModel)
    if inv_par.data_log:
        tD = pg.core.RTransLog()
        inv.setTransData(tD)
    inv.setTransModel(tM)
    inv.setForwardOperator(fop)

    # mesh
    mesh_file = "inv_mesh.msh"
    #mesh_file = inv_par.meshFile
    if mesh_file == "":
        depth = inv_par.depth
        if depth is None:
            depth = pg.core.DCParaDepth(data)

        poly = pg.meshtools.createParaMeshPLC(
            data.sensorPositions(),
            paraDepth=depth,
            paraDX=inv_par.paraDX,
            paraMaxCellSize=inv_par.maxCellArea,
            paraBoundary=2,
            boundary=2)

        if inv_par.verbose:
            print("creating mesh...")
        mesh = pg.meshtools.createMesh(poly,
                                       quality=inv_par.quality,
                                       smooth=(1, 10))
    else:
        mesh = pg.Mesh(pg.load(mesh_file))

    mesh.createNeighbourInfos()

    if inv_par.verbose:
        print(mesh)

    sys.stdout.flush()  # flush before multithreading
    fop.setMesh(mesh)
    fop.regionManager().setConstraintType(1)

    # print(fop.regionManager().regionCount())
    # print(fop.regionManager().paraDomain().cellMarkers())
    # pg.show(mesh, data=mesh.cellMarkers())
    # print(fop.regionManager().region(1).cellMarkers())
    # return

    if not inv_par.omitBackground:
        if fop.regionManager().regionCount() > 1:
            fop.regionManager().region(1).setBackground(True)

    if mesh_file == "":
        fop.createRefinedForwardMesh(True, False)
    else:
        fop.createRefinedForwardMesh(inv_par.refineMesh, inv_par.refineP2)

    paraDomain = fop.regionManager().paraDomain()
    #paraDomain = fop.regionManager().mesh()
    inv.setForwardOperator(fop)  # necessary?

    # in_ball = find_markers_in_ball(paraDomain, [-622342, -1128822, 22], 5.0)
    # print(pg.median(data('rhoa')))
    # pc = fop.regionManager().parameterCount()
    # x = pg.RVector(pc, 10000.0)
    # for i, m in enumerate(paraDomain.cellMarkers()):
    #     if m in in_ball:
    #         x[i] = 1000.0
    # resp = fop.response(x)
    # print(resp)
    #return

    # inversion parameters
    inv.setData(data('rhoa'))
    #inv.setData(resp)
    inv.setRelativeError(error)
    #inv.setRelativeError(pg.RVector(data.size(), 0.03))
    fop.regionManager().setZWeight(inv_par.zWeight)
    inv.setLambda(inv_par.lam)
    inv.setOptimizeLambda(inv_par.optimizeLambda)
    inv.setMaxIter(inv_par.maxIter)
    inv.setRobustData(inv_par.robustData)
    inv.setBlockyModel(inv_par.blockyModel)
    inv.setRecalcJacobian(inv_par.recalcJacobian)

    pc = fop.regionManager().parameterCount()
    if inv_par.k_ones:
        # hack of gimli hack
        v = pg.Vector(
            pg.Vector(
                pc,
                pg.core.median(data('rhoa') * misc.geometricFactors(data))))
        v[0] += tolerance * 2
        startModel = v
    else:
        startModel = pg.Vector(pc, pg.core.median(data('rhoa')))
    #startModel = pg.RVector(pc, 2000.0)

    inv.setModel(startModel)

    # Run the inversion
    sys.stdout.flush()  # flush before multithreading
    model = inv.run()
    resistivity = model[paraDomain.cellMarkers()]
    np.savetxt('resistivity.vector', resistivity)
    paraDomain.addData('Resistivity', resistivity)
    #paraDomain.addExportData('Resistivity (log10)', np.log10(resistivity))
    #paraDomain.addExportData('Coverage', coverageDC(fop, inv, paraDomain))
    #paraDomain.exportVTK('resistivity')

    # output in local coordinates
    if inv_par.local_coord:
        base_point, gen_vecs = cut_point_cloud.cut_tool_to_gen_vecs(cut_par)
        localparaDomain = pg.Mesh(paraDomain)
        localparaDomain.translate(pg.RVector3(-base_point))
        localparaDomain.rotate(
            pg.RVector3(0, 0, -math.atan2(gen_vecs[0][1], gen_vecs[0][0])))
        localparaDomain.exportVTK('resistivity')
    else:
        paraDomain.exportVTK('resistivity')

    # measurements on model
    print()
    print_headline("Measurements on model")
    with open("measurements_info.json") as fd:
        meas_info = MeasurementsInfo.deserialize(json.load(fd))

    resp = fop.response(resistivity)
    # hack of gimli hack
    v = pg.Vector(startModel)
    v[0] += tolerance * 2
    resp_start = fop.response(v)

    map = {}
    map_start = {}
    map_appres_gimli = {}
    for i in range(data.size()):
        map[(data("a")[i], data("b")[i], data("m")[i], data("n")[i])] = resp[i]
        map_start[(data("a")[i], data("b")[i], data("m")[i],
                   data("n")[i])] = resp_start[i]
        map_appres_gimli[(data("a")[i], data("b")[i], data("m")[i],
                          data("n")[i])] = data('rhoa')[i]

    meas_model_info = MeasurementsModelInfo()

    with open("measurements_model.txt", "w") as fd:
        fd.write(
            "meas_number ca  cb  pa  pb  I[A]      V[V]     AppRes[Ohmm] std    AppResGimli[Ohmm] AppResModel[Ohmm]   ratio AppResStartModel[Ohmm] start_ratio\n"
        )
        fd.write(
            "-------------------------------------------------------------------------------------------------------------------------------------------------\n"
        )
        for item in meas_info.items:
            k = (item.inv_ca, item.inv_cb, item.inv_pa, item.inv_pb)
            if k in map:
                m_on_m = "{:17.2f} {:17.2f} {:7.2f} {:22.2f}     {:7.2f}".format(
                    map_appres_gimli[k], map[k], map[k] / map_appres_gimli[k],
                    map_start[k], map_start[k] / map_appres_gimli[k])
                meas_model_info.items.append(
                    MeasurementModelInfoItem(
                        measurement_number=item.measurement_number,
                        ca=item.ca,
                        cb=item.cb,
                        pa=item.pa,
                        pb=item.pb,
                        app_res_model=map[k],
                        app_res_start_model=map_start[k]))
            else:
                m_on_m = "         not used"

            fd.write(
                "{:11} {:3} {:3} {:3} {:3} {:8.6f} {:9.6f} {:12.2f} {:6.4f} {}\n"
                .format(item.measurement_number, item.ca, item.cb, item.pa,
                        item.pb, item.I, item.V, item.AppRes, item.std,
                        m_on_m))

    with open("measurements_model_info.json", "w") as fd:
        json.dump(meas_model_info.serialize(), fd, indent=4, sort_keys=True)

    if inv_par.p3d:
        print()
        print_headline("Saving p3d")
        t = time.time()
        save_p3d(paraDomain, model.array(), cut_par, inv_par.p3dStep,
                 "resistivity", inv_par.local_coord)
        print("save_p3d elapsed time: {:0.3f} s".format(time.time() - t))

    print()
    print("All done.")
Exemple #21
0
# spatial dimensions and need different boundary conditions (BC).
#
# As there is no current flow through the tanks boundary at all, homogeneous
# (Neumann) BC are defined for the whole boundary.
# Neumann BC are natural (intrinsic) for the finite element simulations.
# \link{tutorial:fem:bc}, so we just need to define a cube geometry including
# region markers.

plc = mt.createCube(size=[0.99, 0.5, 1.0], pos=[0.495, 0.25], boundaryMarker=1)

###############################################################################
# We first read the measuring scheme file and add the electrodes as nodes with
# the marker -99 to the geometry.

filename = pg.getExampleFile("ert/modeltank.shm")
shm = pg.DataContainerERT(filename)

for s in shm.sensors():
    plc.createNode(s, marker=-99)

###############################################################################
# There are two small problems to overcome for simulating Neumann bodies.
#
# First, we always need dipole current injection since there can be no current
# flow out of the closed boundaries of our experimental tank.
# (Note that by default single poles are simulated and superpositioned.)
# Therefore we define a reference electrode position inside the PLC, with a
# marker -999, somewhere away from the electrodes.

plc.createNode([0.5, 0.5, -0.5], marker=-999)
Exemple #22
0
    def simulate(self, mesh, scheme, res, **kwargs):
        """Simulate an ERT measurement.

        Perform the forward task for a given mesh, a resistivity distribution
        (per cell), a measurement
        scheme and will return data (apparent resistivity) or potential fields.

        This function can also operate on complex resistivity models, thereby
        computing complex apparent resistivities.

        The forward operator itself only calculate potential values
        for the given scheme file.
        To calculate apparent resistivities, geometric factors (k) are needed.
        If there are no values k in the DataContainerERT scheme, then we will
        try to calculate them, either analytic or by using a p2-refined
        version of the given mesh.

        TODO
        ----
        * 2D + Complex + SR

        Args
        ----
        mesh : :gimliapi:`GIMLI::Mesh`
            2D or 3D Mesh to calculate for.

        res : float, array(mesh.cellCount()) | array(N, mesh.cellCount()) | list
            Resistivity distribution for the given mesh cells can be:
            . float for homogeneous resistivity
            . single array of length mesh.cellCount()
            . matrix of N resistivity distributions of length mesh.cellCount()
            . resistivity map as [[regionMarker0, res0],
                                  [regionMarker0, res1], ...]

        scheme : :gimliapi:`GIMLI::DataContainerERT`
            Data measurement scheme.

        Keyword Args
        ------------
        verbose: bool[False]
            Be verbose. Will override class settings.
        calcOnly: bool [False]
            Use fop.calculate instead of fop.response. Useful if you want
            to force the calculation of impedances for homogeneous models.
            No noise handling. Solution is put as token 'u' in the returned
            DataContainerERT.
        noiseLevel: float [0.0]
            add normally distributed noise based on
            scheme('err') or on noiseLevel if scheme did not contain 'err'
        noiseAbs: float [0.0]
            Absolute voltage error in V
        returnArray: bool [False]
            Returns an array of apparent resistivities instead of
            a DataContainerERT
        returnFields: bool [False]
            Returns a matrix of all potential values (per mesh nodes)
            for each injection electrodes.

        Returns
        -------
        DataContainerERT | array(N, data.size()) | array(N, data.size()) |
        array(N, data.size()):
            Data container with resulting apparent resistivity data and
            errors (if noiseLevel or noiseAbs is set).
            Optional returns a Matrix of rhoa values
            (for returnArray==True forces noiseLevel=0).
            In case of a complex valued resistivity model, phase values will be
            returned in the DataContainerERT (see example below), or as an
            additional returned array.

        Examples
        --------
        # TODO: Remove pybert dependencies
        # >>> import pybert as pb
        # >>> import pygimli as pg
        # >>> import pygimli.meshtools as mt
        # >>> world = mt.createWorld(start=[-50, 0], end=[50, -50],
        # ...                        layers=[-1, -5], worldMarker=True)
        # >>> scheme = pb.createData(
        # ...                     elecs=pg.utils.grange(start=-10, end=10, n=21),
        # ...                     schemeName='dd')
        # >>> for pos in scheme.sensorPositions():
        # ...     _= world.createNode(pos)
        # ...     _= world.createNode(pos + [0.0, -0.1])
        # >>> mesh = mt.createMesh(world, quality=34)
        # >>> rhomap = [
        # ...    [1, 100. + 0j],
        # ...    [2, 50. + 0j],
        # ...    [3, 10.+ 0j],
        # ... ]
        # >>> ert = pb.ERTManager()
        # >>> data = ert.simulate(mesh, res=rhomap, scheme=scheme, verbose=True)
        # >>> rhoa = data.get('rhoa').array()
        # >>> phia = data.get('phia').array()
        """
        verbose = kwargs.pop('verbose', self.verbose)
        calcOnly = kwargs.pop('calcOnly', False)
        returnFields = kwargs.pop("returnFields", False)
        returnArray = kwargs.pop('returnArray', False)
        noiseLevel = kwargs.pop('noiseLevel', 0.0)
        noiseAbs = kwargs.pop('noiseAbs', 1e-4)
        seed = kwargs.pop('seed', None)

        #segfaults with self.fop (test & fix)
        fop = self.createForwardOperator(useBert=self.useBert, sr=self.sr)
        fop.data = scheme
        fop.setMesh(mesh, ignoreRegionManager=True)
        fop.verbose = verbose

        rhoa = None
        phia = None

        isArrayData = False
        # parse the given res into mesh-cell-sized array
        if isinstance(res, int) or isinstance(res, float):
            res = np.ones(mesh.cellCount()) * float(res)
        elif isinstance(res, complex):
            res = np.ones(mesh.cellCount()) * res
        elif hasattr(res[0], '__iter__'):  # ndim == 2
            if len(res[0]) == 2:  # res seems to be a res map
                # check if there are markers in the mesh that are not defined in
                # the rhomap. better signal here before it results in some error
                meshMarkers = list(set(mesh.cellMarkers()))
                mapMarkers = [m[0] for m in res]
                if any([mark not in mapMarkers for mark in meshMarkers]):
                    left = [m for m in meshMarkers if m not in mapMarkers]
                    pg.critical(
                        "Mesh contains markers without assigned resistivities {}. Please fix given rhomap."
                        .format(left))
                res = pg.solver.parseArgToArray(res, mesh.cellCount(), mesh)
            else:  # probably nData x nCells array
                # better check for array data here
                isArrayData = True

        if isinstance(res[0], np.complex) or isinstance(res, pg.CVector):
            pg.info("Complex resistivity values found.")
            fop.setComplex(True)
        else:
            fop.setComplex(False)

        if not scheme.allNonZero('k') and not calcOnly:
            if verbose:
                pg.info('Calculate geometric factors.')
            scheme.set('k', fop.calcGeometricFactor(scheme))

        ret = pg.DataContainerERT(scheme)
        ## just be sure that we don't work with artifacts
        ret['u'] *= 0.0
        ret['i'] *= 0.0
        ret['r'] *= 0.0

        if isArrayData:
            rhoa = np.zeros((len(res), scheme.size()))
            for i, r in enumerate(res):
                rhoa[i] = fop.response(r)
                if verbose:
                    print(i, "/", len(res), " : ", pg.dur(), "s", "min r:",
                          min(r), "max r:", max(r), "min r_a:", min(rhoa[i]),
                          "max r_a:", max(rhoa[i]))
        else:  # res is single resistivity array
            if len(res) == mesh.cellCount():

                if calcOnly:
                    fop.mapERTModel(res, 0)

                    dMap = pg.core.DataMap()
                    fop.calculate(dMap)
                    if fop.complex():
                        pg.critical('Implement me')
                    else:
                        ret["u"] = dMap.data(scheme)
                        ret["i"] = np.ones(ret.size())

                    if returnFields:
                        return pg.Matrix(fop.solution())
                    return ret
                else:
                    if fop.complex():
                        res = pg.utils.squeezeComplex(res)

                    resp = fop.response(res)

                    if fop.complex():
                        rhoa, phia = pg.utils.toPolar(resp)
                    else:
                        rhoa = resp
            else:
                print(mesh)
                print("res: ", res)
                raise BaseException(
                    "Simulate called with wrong resistivity array.")

        if not isArrayData:
            ret['rhoa'] = rhoa

            if phia is not None:
                ret.set('phia', phia)
        else:
            ret.set('rhoa', rhoa[0])
            if phia is not None:
                ret.set('phia', phia[0])

        if returnFields:
            return pg.Matrix(fop.solution())

        if noiseLevel > 0:  # if errors in data noiseLevel=1 just triggers
            if not ret.allNonZero('err'):
                # 1A  and #100µV
                ret.set(
                    'err',
                    self.estimateError(ret,
                                       relativeError=noiseLevel,
                                       absoluteUError=noiseAbs,
                                       absoluteCurrent=1))
                print("Data error estimate (min:max) ", min(ret('err')), ":",
                      max(ret('err')))

            rhoa *= 1. + pg.randn(ret.size(), seed=seed) * ret('err')
            ret.set('rhoa', rhoa)

            ipError = None
            if phia is not None:
                if scheme.allNonZero('iperr'):
                    ipError = scheme('iperr')
                else:
                    # np.abs(self.data("phia") +TOLERANCE) * 1e-4absoluteError
                    if noiseLevel > 0.5:
                        noiseLevel /= 100.

                    if 'phiErr' in kwargs:
                        ipError = np.ones(
                            ret.size()) * kwargs.pop('phiErr') / 1000
                    else:
                        ipError = abs(ret["phia"]) * noiseLevel

                    if verbose:
                        print("Data IP abs error estimate (min:max) ",
                              min(ipError), ":", max(ipError))

                phia += np.randn(ret.size(), seed=seed) * ipError
                ret['iperr'] = ipError
                ret['phia'] = phia

        # check what needs to be setup and returned

        if returnArray:
            if phia is not None:
                return rhoa, phia
            else:
                return rhoa

        return ret
------------------------

In this example, we illustrate how to visualize the sensitivities of four-point
arrays. You can easily loop over the plotting command to create something like:
https://www.youtube.com/watch?v=lt1qV-2d5Ps
"""

import numpy as np
import matplotlib.pyplot as plt
import pygimli as pg
import pygimli.meshtools as mt
import pygimli.physics.ert as ert

###############################################################################
# We start by creating a ERT data container with three four-point arrays.
scheme = pg.DataContainerERT()

nelecs = 10
pos = np.zeros((nelecs, 2))
pos[:, 0] = np.linspace(5, 25, nelecs)
scheme.setSensorPositions(pos)

measurements = np.array((
    [0, 3, 6, 9],  # Dipole-Dipole
    [0, 9, 3, 6],  # Wenner
    [0, 9, 4, 5]   # Schlumberger
))

for i, elec in enumerate("abmn"):
    scheme[elec] = measurements[:,i]
Exemple #24
0
def main():
    # read config file
    conf_file = "inv.conf"
    with open(conf_file, "r") as fd:
        conf = json.load(fd)

    # res = pb.Resistivity("input.dat")
    # res.invert()
    # np.savetxt('resistivity.vector', res.resistivity)
    # return

    # load data file
    data = pg.DataContainerERT("input.dat")

    # remove invalid data
    oldsize = data.size()
    data.removeInvalid()
    newsize = data.size()
    if newsize < oldsize:
        print('Removed ' + str(oldsize - newsize) + ' values.')
    if not data.allNonZero('rhoa'):
        print("No or partial rhoa values.")
        return

    # check, compute error
    if data.allNonZero('err'):
        error = data('err')
    else:
        print("estimate data error")
        error = conf["relativeError"] + conf["absoluteError"] / data('rhoa')

    # create FOP
    fop = pg.DCSRMultiElectrodeModelling(verbose=conf["verbose"])
    fop.setThreadCount(psutil.cpu_count(logical=False))
    fop.setData(data)

    # create Inv
    inv = pg.RInversion(verbose=conf["verbose"], dosave=False)
    # variables tD, tM are needed to prevent destruct objects
    tD = pg.RTransLog()
    tM = pg.RTransLogLU()
    inv.setTransData(tD)
    inv.setTransModel(tM)
    inv.setForwardOperator(fop)

    # mesh
    if conf["meshFile"] == "":
        depth = conf["depth"]
        if depth is None:
            depth = pg.DCParaDepth(data)

        poly = pg.meshtools.createParaMeshPLC(
            data.sensorPositions(),
            paraDepth=depth,
            paraDX=conf["paraDX"],
            paraMaxCellSize=conf["maxCellArea"],
            paraBoundary=2,
            boundary=2)

        if conf["verbose"]:
            print("creating mesh...")
        mesh = pg.meshtools.createMesh(poly,
                                       quality=conf["quality"],
                                       smooth=(1, 10))
    else:
        mesh = pg.Mesh(pg.load(conf["meshFile"]))

    mesh.createNeighbourInfos()

    if conf["verbose"]:
        print(mesh)

    sys.stdout.flush()  # flush before multithreading
    fop.setMesh(mesh)
    fop.regionManager().setConstraintType(1)

    if not conf["omitBackground"]:
        if fop.regionManager().regionCount() > 1:
            fop.regionManager().region(1).setBackground(True)

    if conf["meshFile"] == "":
        fop.createRefinedForwardMesh(True, False)
    else:
        fop.createRefinedForwardMesh(conf["refineMesh"], conf["refineP2"])

    paraDomain = fop.regionManager().paraDomain()
    inv.setForwardOperator(fop)  # necessary?

    # inversion parameters
    inv.setData(data('rhoa'))
    inv.setRelativeError(error)
    fop.regionManager().setZWeight(conf['zWeight'])
    inv.setLambda(conf['lam'])
    inv.setMaxIter(conf['maxIter'])
    inv.setRobustData(conf['robustData'])
    inv.setBlockyModel(conf['blockyModel'])
    inv.setRecalcJacobian(conf['recalcJacobian'])

    pc = fop.regionManager().parameterCount()
    startModel = pg.RVector(pc, pg.median(data('rhoa')))

    inv.setModel(startModel)

    # Run the inversion
    sys.stdout.flush()  # flush before multithreading
    model = inv.run()
    resistivity = model(paraDomain.cellMarkers())
    np.savetxt('resistivity.vector', resistivity)
    print("Done.")
maxIter = 15
############

# Poro to rock content (inversion parameter)
fr_min = 1 - poro_max
fr_max = 1 - poro_min

# Load meshes and data
mesh = pg.load("mesh.bms")
true = np.load("true_model.npz")
sensors = np.load("sensors.npy", allow_pickle=True)

veltrue, rhotrue, fatrue, fitrue, fwtrue = true["vel"], true["rho"], true[
    "fa"], true["fi"], true["fw"]

ertScheme = pg.DataContainerERT("erttrue.dat")

meshRST = pg.load("paraDomain_%d.bms" % case)
meshERT = pg.load("meshERT_%d.bms" % case)

if fix_poro:
    frtrue = np.load("true_model.npz")["fr"]
    phi = []
    for cell in meshRST.cells():
        idx = mesh.findCell(cell.center()).id()
        phi.append(1 - frtrue[idx])
    phi = np.array(phi)
    fr_min = 0
    fr_max = 1
else:
    phi = poro
if case == 2:
    case = 2
    constrained = True
    mesh = pg.load("mesh_2.bms")
    paraDomain = pg.load("paraDomain_2.bms")
else:
    case = 1
    constrained = False
    mesh = pg.load("mesh_1.bms")
    paraDomain = pg.load("paraDomain_1.bms")

pg.boxprint("Calculating case %s" % case)

# Load meshes and data
ertScheme = pg.DataContainerERT("ert_filtered.data")

fr_min = 0.1
fr_max = 0.9
phi = np.ones(paraDomain.cellCount()) * poro

# Setup managers and equip with meshes
ert = ERTManager()
ert.setMesh(mesh)
ert.setData(ertScheme)
ert.fop.createRefinedForwardMesh()

ttData = pg.DataContainer("rst_filtered.data", "s g")
rst = Refraction()
rst.setMesh(paraDomain)
rst.setData(ttData)
Exemple #27
0
def main(inv_par, project_conf, max_dist=1.0, final=False):
    if inv_par.meshFrom == MeshFrom.GALLERY_CLOUD:
        mesh_file = "gallery_mesh.msh"
        offset = np.array([
            project_conf.point_cloud_origin_x,
            project_conf.point_cloud_origin_y,
            project_conf.point_cloud_origin_z
        ])
    elif inv_par.meshFrom == MeshFrom.SURFACE_CLOUD:
        mesh_file = "inv_mesh_tmp.msh2"
        offset = np.array([0.0, 0.0, 0.0])
    else:
        mesh_file = "../../gallery_mesh.msh"
        offset = np.array([
            project_conf.gallery_mesh_origin_x,
            project_conf.gallery_mesh_origin_y,
            project_conf.gallery_mesh_origin_z
        ])

    if final:
        mesh_file = "inv_mesh_tmp.msh2"
        offset = np.array([0.0, 0.0, 0.0])

    mesh = GmshIO(mesh_file)

    #mesh.elements = {id: data for id, data in mesh.elements.items() if id not in [714, 2095]}

    tree = bih.BIH()
    boxes = []
    mesh.elements2 = {}
    i = 0
    for data in mesh.elements.values():
        type_, tags, nodeIDs = data
        if type_ != 2:
            continue
        # a = np.array(mesh.nodes[nodeIDs[0]]) + np.array([-622000.0, -1128000.0, 0.0])
        # b = np.array(mesh.nodes[nodeIDs[1]]) + np.array([-622000.0, -1128000.0, 0.0])
        # c = np.array(mesh.nodes[nodeIDs[2]]) + np.array([-622000.0, -1128000.0, 0.0])
        # a = np.array(mesh.nodes[nodeIDs[0]])
        # b = np.array(mesh.nodes[nodeIDs[1]])
        # c = np.array(mesh.nodes[nodeIDs[2]])
        a = np.array(mesh.nodes[nodeIDs[0]]) + offset
        b = np.array(mesh.nodes[nodeIDs[1]]) + offset
        c = np.array(mesh.nodes[nodeIDs[2]]) + offset
        boxes.append(bih.AABB([a, b, c]))
        mesh.elements2[i] = data
        i += 1

    tree.add_boxes(boxes)
    tree.construct()

    # electrodes = []
    # with open("electrodes_small.xyz") as fd:
    #     for i, line in enumerate(fd):
    #         s = line.split()
    #         if len(s) >= 3:
    #             el = np.array([float(s[0]), float(s[1]), float(s[2])])
    #             electrodes.append(el)
    #
    # t = time.time()
    # snapped_electrodes = [snap_electrode(el, mesh, tree) for el in electrodes]
    # print("elapsed time = {:.3f} s".format(time.time() - t))
    #
    # with open("electrodes_small_snapped2.xyz", "w") as fd:
    #     for el in snapped_electrodes:
    #         fd.write("{} {} {}\n".format(el[0], el[1], el[2]))

    if project_conf.method == GenieMethod.ERT:
        data = pg.DataContainerERT("input.dat", removeInvalid=False)
    else:
        data = pg.DataContainer("input.dat",
                                sensorTokens='s g',
                                removeInvalid=False)

    for i in range(len(data.sensorPositions())):
        pos = data.sensorPosition(i)
        pos = np.array([pos[0], pos[1], pos[2]])
        new_pos = snap_electrode(pos, mesh, tree, max_dist, offset)

        data.setSensorPosition(i, new_pos)
    data.save("input_snapped.dat")
Exemple #28
0
import numpy as np

import pygimli as pg
import pygimli.meshtools as mt

from fpinv import FourPhaseModel, NN_interpolate
from pygimli.physics import Refraction, ERTManager
from settings import *

#need ertData, rstData, a mesh and phi to be given
ertData = pg.DataContainerERT("ert_filtered.data")
print(ertData)
mesh = pg.load("mesh_1.bms")
paraDomain = pg.load("paraDomain_1.bms")
depth = mesh.ymax() - mesh.ymin()

ert = ERTManager()
resinv = ert.invert(ertData,
                    mesh=mesh,
                    lam=60,
                    zWeight=zWeight,
                    maxIter=maxIter)
print("ERT chi:", ert.inv.chi2())
print("ERT rms:", ert.inv.relrms())

np.savetxt("res_conventional.dat", resinv)
#############
ttData = pg.DataContainer("rst_filtered.data", "s g")
rst = Refraction(ttData)

# INVERSION
Exemple #29
0
def showERTData(data, vals=None, **kwargs):
    """Plot ERT data as pseudosection matrix (position over separation).

    Creates figure, axis and draw a pseudosection.

    Parameters
    ----------

    data : :gimliapi:`BERT::DataContainerERT`

    **kwargs :

        * axes : matplotlib.axes
            Axes to plot into. Default is None and a new figure and
            axes are created.
        * vals : Array[nData]
            Values to be plotted. Default is data('rhoa').
    """
    var = kwargs.pop('var', 0)
    if var > 0:
        import pybert as pb
        pg._g(kwargs)
        return pb.showData(data, vals, var=var, **kwargs)

    # remove ax keyword global
    ax = kwargs.pop('ax', None)

    if ax is None:
        fig = pg.plt.figure()
        ax = None
        axTopo = None
        if 'showTopo' in kwargs:
            ax = fig.add_subplot(1, 1, 1)
#            axs = fig.subplots(2, 1, sharex=True)
#            # Remove horizontal space between axes
#            fig.subplots_adjust(hspace=0)
#            ax = axs[1]
#            axTopo = axs[0]
        else:
            ax = fig.add_subplot(1, 1, 1)

    pg.checkAndFixLocaleDecimal_point(verbose=False)

    if vals is None:
        vals = 'rhoa'

    if isinstance(vals, str):
        if data.haveData(vals):
            vals = data(vals)
        else:
            pg.critical('field not in data container: ', vals)

    kwargs['cMap'] = kwargs.pop('cMap', pg.utils.cMap('rhoa'))
    kwargs['label'] = kwargs.pop('label', pg.utils.unit('rhoa'))
    kwargs['logScale'] = kwargs.pop('logScale', min(vals) > 0.0)

    try:
        ax, cbar = drawERTData(ax, data, vals=vals, **kwargs)
    except:
        pg.warning('Something gone wrong while drawing data. '
                   'Try fallback with equidistant electrodes.')
        d = pg.DataContainerERT(data)
        sc = data.sensorCount()
        d.setSensors(list(zip(range(sc), np.zeros(sc))))
        ax, cbar = drawERTData(ax, d, vals=vals, **kwargs)

    # TODO here cbar handling like pg.show

    if 'xlabel' in kwargs:
        ax.set_xlabel(kwargs['xlabel'])
    if 'ylabel' in kwargs:
        ax.set_ylabel(kwargs['ylabel'])

    if 'showTopo' in kwargs:
        # if axTopo is not None:
        print(ax.get_position())
        axTopo = pg.plt.axes([ax.get_position().x0,
                              ax.get_position().y0,
                              ax.get_position().x0+0.2,
                              ax.get_position().y0+0.2])

        x = pg.x(data)
        x *= (ax.get_xlim()[1] - ax.get_xlim()[0]) / (max(x)-min(x))
        x += ax.get_xlim()[0]
        axTopo.plot(x, pg.z(data), '-o', markersize=4)
        axTopo.set_ylim(min(pg.z(data)), max(pg.z(data)))
        axTopo.set_aspect(1)

    # ax.set_aspect('equal')
    # plt.pause(0.1)
    pg.viewer.mpl.updateAxes(ax)
    return ax, cbar
meshERT = pg.load("meshERT_2.bms")

for cell in meshRST.cells():
    NN = mesh.findCell(cell.center())
    cell.setMarker(mesh.cellMarkers()[NN.id()])

for cell in meshERT.cells():
    NN = mesh.findCell(cell.center())
    if NN:
        cell.setMarker(mesh.cellMarkers()[NN.id()])
    else:
        cell.setMarker(len(np.unique(mesh.cellMarkers())))  # triangle boundary

# create scheme files
sensors = np.load("sensors.npy", allow_pickle=True)
shmERT = pg.DataContainerERT("erttrue.dat")
shmSRT = createRAData(sensors)

Fsyn = np.loadtxt("syn_model.dat")

# %% compute forward response and jacobians
jacERT, jacSRT = jacobian4PM(meshERT, meshRST, shmERT, shmSRT, Fsyn)
jacJoint = np.vstack((jacSRT, jacERT))
print(jacERT.shape, jacSRT.shape, jacJoint.shape)
jacJoint.dump("jacJoint.npz")
pg.tic("Calculating JTJ")
JTJ = jacJoint.T.dot(jacJoint)
pg.toc()
MCM = np.linalg.inv(JTJ)
MCM.dump("MCM.npz")
# plt.matshow(MCM)