예제 #1
0
def __w_file(network, station, starttime):
    try:
        int(rating["k"])
    except ValueError:
        return False
    with shelve.open(
            os.path.join(finddir(), 'ratings') + network + "." + station +
            "rating") as f:
        if int(rating["k"]) != 5:
            f[starttime] = rating["k"]
            print("You've rated the stream", rating["k"])
예제 #2
0
def sort_rated(network, station, phase, preproloc):
    """
    Functions that sorts waveforms in 4 folders,
    depending on their manual rating.

    Parameters
    ----------
    network : STRING
        Network code (2 letters).
    station : STRING
        Station code (3 letters).
    phase : STRING, optional
        "P" or "S".
    preproloc : string
        Directory that contains the preprocessed files (not quality controlled)

    Returns
    -------
    None.

    """

    inloc = os.path.join(preproloc, phase, "by_station", network, station)
    for n in range(1, 5):
        os.makedirs(inloc + str(n), exist_ok=True)
    dic = shelve.open(
        os.path.join(finddir(), 'ratings') + network + "." + station +
        "rating")
    for file in os.listdir(inloc):
        if file[:4] == "info":  # Skip the info files
            continue
        try:
            st = read(inloc + file)
        except IsADirectoryError as e:
            print(e)
            continue
        starttime = str(st[0].stats.starttime)
        if starttime in dic:
            shutil.copy(inloc + file,
                        os.path.join(inloc + dic[starttime], +file))
예제 #3
0
def load_model(fname='iasp91.dat'):
    """
    Load 1D velocity model from file.
    The model file should have 4 columns with depth, vp, vs, n.
    The model file for iasp91 starts like this::

        #IASP91 velocity model
        #depth  vp    vs   n
          0.00  5.800 3.360 0
          0.00  5.800 3.360 0
        10.00  5.800 3.360 4

    Parameters
    ----------
    fname : string, optional
        filename of model in data or 'iasp91'.
        The default is 'iasp91.dat'.

    Returns
    -------
    SimpleModel
        Returns SimpleModel instance.

    """

    try:
        return _MODEL_CACHE[fname]
    except KeyError:
        pass
    filepath = os.path.join(finddir(), 'velocity_models', fname)
    values = np.loadtxt(filepath, unpack=True)
    try:
        z, vp, vs, n = values
        n = n.astype(int)
    except ValueError:
        n = None
        z, vp, vs = values
    _MODEL_CACHE[fname] = model = SimpleModel(z, vp, vs, n)
    return model
예제 #4
0
    def write(self, filename='gypsum'):
        """
        Save the model.

        Parameters
        ----------
        filename : str, optional
            Filename. The default is 'avvmodel'.

        Returns
        -------
        None.

        """
        folder = os.path.join(finddir(), 'velocity_models')
        # Remove filetype identifier if provided
        x = filename.split('.')
        if len(x) > 1:
            if x[-1] == 'pkl':
                filename = ''.join(x[:-1])
        oloc = os.path.join(folder, filename)
        with open(oloc + ".pkl", 'wb') as output:
            pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)
예제 #5
0
    def write(self, filename='avvmodel'):
        """
        Save the model.

        Parameters
        ----------
        filename : str, optional
            Filename. The default is 'avvmodel'.

        Returns
        -------
        None.

        """
        folder = os.path.join(finddir(), 'velocity_models')
        # Remove filetype identifier if provided
        x = filename.split('.')
        if len(x) > 1:
            filename = filename + '.' - x[-1]
        oloc = os.path.join(folder, filename)
        os.makedirs(oloc, exist_ok=True)

        np.savez(oloc, **self.__dict__)
예제 #6
0
def load_avvmodel():
    """
    Creates a model over the average P and S-wave velocities in the upper
    crust. These are used by the P-SV-SH rotation algorithm. Model data
    is extracted from the Litho1.0 model (location provided above).
    The package is distributed with a readily compiled model.

    Litho1.0 must be installed and location must be set correct for a
    complete compilation! However, function will first look in RAM and then
    in data for a pickle file.

    Compiling takes up to one hour!

    Returns
    -------
    Model containing average velocities for upper crust.

    """
    try:
        return _MODEL_CACHE['avv']
    except KeyError:
        pass

    try:
        filepath = os.path.join(finddir(), 'velocity_models', 'avvmodel.pkl')
        with open(filepath, 'rb') as infile:
            _MODEL_CACHE['avv'] = model = pickle.load(infile)
            return model
    except FileNotFoundError:
        pass

    # latitude and longitude vector
    latv = np.arange(-90, 91)
    lonv = np.arange(-180, 181)
    # self.depth = np.arange(-10, 801)

    # Create grid, spacing .5 deg, 1km
    # self.vp, self.vs, _ = np.mgrid[-180:181, -360:361, -10:801]

    # Grid of average P and S-wave velocities, used for P-SV-SH rotation
    avpS, avsS = np.mgrid[-90:91, -180:181]
    avpP, avsP = np.mgrid[-90:91, -180:181]

    # populate velocity grid
    for m, lat in enumerate(latv):
        for n, lon in enumerate(lonv):
            # Call Litho1.0
            try:
                x = subprocess.Popen(
                    [lith1, "-p", str(lat), str(lon)], stdout=subprocess.PIPE)
                ls = str(x.stdout.read()).split("\\n")  # save the output

                # Close file or it will remain open forever!
                x.stdout.close()

                for ii, item in enumerate(ls):
                    ls[ii] = item.split()

                # clean list
                del ls[-1]
                del ls[0][0]

            except IndexError:
                # There are some points, on which the model is not defined
                lat = lat + .1
                x = subprocess.Popen(
                    [lith1, "-p", str(lat), str(lon)], stdout=subprocess.PIPE)
                ls = str(x.stdout.read()).split("\\n")  # save the output

                # Close file or it will remain open forever!
                x.stdout.close()

                for ii, item in enumerate(ls):
                    ls[ii] = item.split()

                # clean list
                del ls[-1]
                del ls[0][0]

                pass

            # reorder items
            depth = []
            vp = []
            vs = []
            name = []

            for item in ls:
                depth.append(float(item[0]))  # in m
                vp.append(float(item[2]))  # m/s
                vs.append(float(item[3]))  # m/
                name.append(item[-1])  # name of the boundary

            # Interpolate and populate
            vp = np.interp(np.arange(min(depth), 15.5e3 + min(depth), .5e3),
                           np.flip(depth), np.flip(vp))
            vs = np.interp(np.arange(min(depth), 15.5e3 + min(depth), .5e3),
                           np.flip(depth), np.flip(vs))

            # build weighted average for upper ~15km (S-phases)
            # and the upper 6 km (P phases)
            avpS[m, n] = np.average(vp)
            avsS[m, n] = np.average(vs)

            # For P-wave as primary phase (higher frequencies and shorter
            # wavelength)
            avpP[m, n] = np.average(vp[:-18])
            avsP[m, n] = np.average(vs[:-18])

    _MODEL_CACHE['avv'] = model = AverageVelModel(latv, lonv, avpP, avsP, avpS,
                                                  avsS)

    # Dump pickle file
    model.write()

    return model
예제 #7
0
def load_gyps(save=False, latb=None, lonb=None):
    """
    Compiles the GyPSuM 3D-velocity object from included GyPSuM text files

    Parameters
    ----------
    save : Bool, optional
        Pickle the 3D velocity model after compiling it for the first time.
        This will allow for faster access to the model. Saving the model takes
        about 800 MB disk space.
        The default is False, as it lead to unstabilities with joblib.
    latb : Tuple, optional
        Creates a submodel from the full model. In form (minlat, maxlat).
    lonb : Tuple, optional
        (minlon, maxlon)

    Returns
    -------
    ComplexModel object
        Object that can be queried for velocities.

    """
    if latb and not lonb or lonb and not latb:
        raise ValueError(
            """"Provide either no geographic boundaries or both latitude
            and longitude boundaries.""")

    if latb:
        # Changes the boundaries to ints (mainly for filenames)
        latb = (int(np.floor(latb[0])), int(np.ceil(latb[1])))
        lonb = (int(np.floor(lonb[0])), int(np.ceil(lonb[1])))

        try:
            return _MODEL_CACHE['gyps' + str(latb) + str(lonb)]
        except KeyError:
            pass
        try:
            with open(os.path.join('tmp',
                                   str(latb) + str(lonb) + '.pkl',
                                   'rb')) as infile:
                model = pickle.load(infile)

            _MODEL_CACHE['gyps' + str(latb) + str(lonb)] = model
            return model
        except FileNotFoundError:
            pass

    try:
        model = _MODEL_CACHE['gyps']
        if latb:
            _MODEL_CACHE['gyps' + str(latb) + str(lonb)] = model = \
                    model.submodel(latb, lonb)
            if save:
                model.write(filename=str(latb) + str(lonb), folder='tmp')
        return model
    except KeyError:
        pass

    try:
        filepath = os.path.join(finddir(), 'velocity_models', 'gypsum.pkl')
        with open(filepath, 'rb') as infile:
            model = pickle.load(infile)
        if not latb:
            _MODEL_CACHE['gyps'] = model

        else:
            _MODEL_CACHE['gyps' + str(latb) + str(lonb)] = model = \
                model.submodel(latb, lonb)
            if save:
                model.write(filename=str(latb) + str(lonb), folder='tmp')
            return model

    except FileNotFoundError:
        pass

    # Create initial, full model
    # Create the velocity deviation grids
    vpd, vsd, _ = np.mgrid[-90:91, -180:181, 0:18]
    vpd = vpd.astype(float)
    vsd = vsd.astype(float)

    # Load background model
    rp, vpb = zip(*np.loadtxt(os.path.join(gyps, 'StartingVpModel.txt')))
    rs, vsb = zip(*np.loadtxt(os.path.join(gyps, 'StartingVsModel.txt')))

    zbp = R_EARTH - np.array(rp, dtype=float)  # background model depth vector
    zbs = R_EARTH - np.array(rs, dtype=float)  # background model depth vecto
    vpb = np.array(vpb)
    vsb = np.array(vsb)

    del rp, rs

    # Load deviations
    dirlist = os.listdir(gyps)

    # vp deviations
    for i, p in enumerate(fnmatch.filter(dirlist, 'P.*')):
        vpd[:, :, 2 * i] = np.reshape(np.loadtxt(os.path.join(gyps, p)),
                                      vpd[:, :, 0].shape) / 100
        vpd[:, :, 2 * i + 1] = np.reshape(np.loadtxt(os.path.join(gyps, p)),
                                          vpd[:, :, 0].shape) / 100

    # vs deviations
    for i, p in enumerate(fnmatch.filter(dirlist, 'S.*')):
        vsd[:, :, 2 * i] = np.reshape(np.loadtxt(os.path.join(gyps, p)),
                                      vpd[:, :, i].shape) / 100
        vsd[:, :, 2 * i + 1] = np.reshape(np.loadtxt(os.path.join(gyps, p)),
                                          vpd[:, :, i].shape) / 100

    # boundaries for the velocity deviations vectors
    zd = np.hstack((0,
                    np.repeat(
                        np.hstack((np.arange(100, 475,
                                             75), np.array([525, 650, 750]))),
                        2), 850))

    # Interpolation depth
    # zq = np.unique(np.sort(np.hstack(zb, zd)))
    # imax = np.where(zq > 850)
    # zq = zq[:imax]
    zq = np.arange(0, maxz + res, res)

    # Interpolate background velocity model
    vp_bg = np.interp(zq, zbp, vpb)
    vs_bg = np.interp(zq, zbs, vsb)

    del vpb, vsb, zbp, zbs

    # Interpolate velocity disturbances
    intf = interp1d(zd, vpd, axis=2)
    dvp = intf(zq)
    intf = interp1d(zd, vsd, axis=2)
    dvs = intf(zq)

    vp = np.multiply(dvp, vp_bg) + vp_bg
    vs = np.multiply(dvs, vs_bg) + vs_bg

    del vpd, vsd, intf, dvp, dvs

    lat = np.arange(-90, 91, 1)
    lon = np.arange(-180, 181, 1)

    # Create a velocity model with 1km spacing
    model = ComplexModel(zq, vp, vs, lat, lon)

    # Pickle model
    if save:
        model.write()

    if not latb:
        _MODEL_CACHE['gyps'] = model
    else:
        _MODEL_CACHE['gyps' + str(latb) + str(lonb)] = model = \
            model.submodel(latb, lonb)
        if save:
            model.write(filename=str(latb) + str(lonb), folder='tmp')

    return model
예제 #8
0
from scipy.interpolate import interp1d
from scipy.spatial import KDTree
# from obspy.geodetics import gps2dist_azimuth
# from pathlib import Path
import plotly.graph_objs as go
# from plotly.offline import plot

from pyglimer.data import finddir
from pyglimer.constants import R_EARTH, maxz, res, DEG2KM
from pyglimer.utils.geo_utils import geo2cart

# location of lith1 file
lith1 = os.path.join('/home', 'pm', 'LITHO1.0', 'bin', 'access_litho')

#  Location of the GyPSuM textfiles
gyps = os.path.join(finddir(), 'velocity_models', 'GyPSuM')

_MODEL_CACHE = {}


def load_gyps(save=False, latb=None, lonb=None):
    """
    Compiles the GyPSuM 3D-velocity object from included GyPSuM text files

    Parameters
    ----------
    save : Bool, optional
        Pickle the 3D velocity model after compiling it for the first time.
        This will allow for faster access to the model. Saving the model takes
        about 800 MB disk space.
        The default is False, as it lead to unstabilities with joblib.
예제 #9
0
    def __init__(self,
                 preproloc,
                 phase=None,
                 use_old=False,
                 logdir: str or None = None):
        """
        Creates a pandas database of all available receiver functions.
        This database is entirely based on the info files in the "preprocessed"
        folder. Make sure that the output folder is not corrupted before
        running this. Creating this database does not take much time, so there
        will be no option to save it, as it should be up to date.
        However, there is an export function

        :param preproloc: Parental folder, in which the preprocessed mseeds are
            saved (i.e. the folder above the phase division).
        :type preproloc: str
        :param phase: If just one of the primary phases should be checked -
            useful for computational efficiency, when creating ccp.
            Default is None.
        :type phase: str, optional
        :param use_old: When turned on it will read in the saved csv file.
            That is a lot faster, but it will obviously not update,
            defaults to False
        :type use_old: bool, optional
        :param logdir: Directory for log file
        :type logdr: str, optional
        """

        self.preproloc = preproloc

        if phase:
            self.phase = phase.upper()
        #else:
        #   self.phase = phase

        # 1. Initiate logger
        self.logger = logging.Logger(
            "pyglimer.database.stations.StationDBaseLogger")
        self.logger.setLevel(logging.WARNING)

        # FileHandler
        if not logdir:
            try:
                fh = logging.FileHandler(
                    os.path.join(preproloc, os.pardir, os.pardir, 'logs',
                                 'StationDBase.log'))
            except FileNotFoundError:
                os.makedirs(os.path.join(preproloc, os.pardir, os.pardir,
                                         'logs'),
                            exist_ok=True)
                fh = logging.FileHandler(
                    os.path.join(preproloc, os.pardir, os.pardir, 'logs',
                                 'StationDBase.log'))
            # fh = logging.FileHandler(os.path.join('logs', 'StationDBase.log'))
        else:
            fh = logging.FileHandler(os.path.join(logdir, 'StationDBase.log'))
        fh.setLevel(logging.WARNING)
        self.logger.addHandler(fh)

        # Formatter
        fmt = logging.Formatter(
            fmt='%(asctime)s - %(levelname)s - %(message)s')
        fh.setFormatter(fmt)

        # Check if there is already a saved database
        oloc = os.path.join(finddir(), 'database.csv')

        if use_old and Path(oloc).is_file():
            self.db = pd.read_csv(oloc)
        else:
            self.db = self.__create__()

        # Save Database, don't save if only one phase is requested newly
        if not phase:
            self.db.to_csv(oloc)
예제 #10
0
import h5py

from obspy import UTCDateTime, read, Trace, Stream
from obspy.core import Stats
from obspy.signal.filter import lowpass
from geographiclib.geodesic import Geodesic

from pyglimer.data import finddir
from pyglimer.ccp import CCPStack
from pyglimer.rf import RFTrace, RFStream
from pyglimer.rf.create import createRF, read_by_station
from pyglimer.rf.deconvolve import it, multitaper, spectraldivision  # , gen_it
from pyglimer.rf.moveout import moveout, DEG2KM
from pyglimer.waveform.qc import qcs, qcp

tr_folder = os.path.join(finddir(), 'raysum_traces')


def read_raysum(phase, NEZ_file=None, RTZ_file=None, PSS_file=None):
    """
    Reads the output of the raysum program (by Andrew Frederiksen).

    Parameters
    ----------
    NEZ_file : str, optional
        Filename of file in NEZ coordinate system. The default is None.
    RTZ_file : str, optional
        Filename of file in RTZ coordinate system. The default is None.
    PSS_file : str, optional
        Filename of file in P-Sv-Sh coordinate system. The default is None.
예제 #11
0
def create_geom(N: int,
                bazv: np.ndarray,
                raypv: np.ndarray,
                shift_max: int,
                filename: str,
                shape='cross'):
    """
    Creates geometry files for Raysum.

    Parameters
    ----------
    N : int
        Number of stations. Has to be uneven if shape=cross.
        Else has to be N=M**2. Were M is a natural number
    bazv : np.ndarray(1D)
        1D array containing the backzimuths per station (deg).
    raypv : np.ndarray(1D)
        1D array containing the slownesses in s/m per backzimuth.
    shift_max : int
        Maximum horizontal shift in m.
    filename : str
        Name of the output file.
    shape : str
        shape of the array

    Raises
    ------
    ValueError
        For Even Ns.

    Returns
    -------
    None.

    """
    if shape == 'cross':
        if N / 2 == round(N / 2):
            raise ValueError('Number of station has to be uneven.')

        # create shift vectors
        xshift = np.hstack(
            (np.linspace(-shift_max, shift_max, round(
                (N + 1) / 2)), np.zeros(round((N + 1) / 2))))
        yshift = np.hstack((np.zeros(round((N + 1) / 2)),
                            np.linspace(-shift_max, shift_max,
                                        round((N + 1) / 2))))

        coords = np.unique(np.column_stack((yshift, xshift)), axis=0)
    else:
        M = np.sqrt(N)  # Number of stations per line
        xshift, yshift = np.mgrid[-shift_max:shift_max:M,
                                  -shift_max:shift_max:M]
        coords = np.column_stack((xshift.ravel(), yshift.ravel()))

    lines = []  # list with text

    # header
    lines.append('# Automatically created geometry file.\n')
    lines.append('# Note that one file cannot contain more than ' +
                 '200 traces (max for raysum).\n')

    ntr = 0  # Number of traces counter
    fpi = []  # List with indices to split file

    for i in range(N):
        lines.append('# Station ' + str(i) + '\n')

        for j, baz in enumerate(bazv):
            for k, rayp in enumerate(raypv):
                if rayp == 0:
                    rayp = '0.'
                else:
                    rayp = str(round(rayp, 6))

                line = ' '.join([
                    str(int(bazv[j])) + '.', rayp,
                    str(int(coords[i, 0])) + '.',
                    str(int(coords[i, 1])) + '.\n'
                ])
                lines.append(line)
                ntr = ntr + 1

                if ntr == 200:
                    fpi.append(lines.index(line))
                    ntr = 0

    fpi.append(lines.index(line))

    # Write text to file
    # open outfile
    of = os.path.join(finddir(), 'raysum_traces', filename + '.geom')
    with open(of, 'w') as text:
        text.writelines(lines)

    # Write splitted files
    for i, j in enumerate(fpi):
        of = os.path.join(finddir(), 'raysum_traces',
                          filename + str(i) + '.geom')
        with open(of, 'w') as text:
            if i:
                text.writelines(lines[fpi[i - 1] + 1:j + 1])
            else:
                text.writelines(lines[:j + 1])
예제 #12
0
def automatic_rate(network, station, phase, preproloc):
    """
    Checks the automatic QC criteria for SRF waveforms.

    Parameters
    ----------
    network : STRING
        Network code (2 letters).
    station : STRING
        Station code (3 letters).
    phase : STRING, optional
        "P" or "S".
    preproloc : string
        Directory that contains the preprocessed files (not quality controlled)

    Returns
    -------
    diff : INTEGER
        Number of waveforms that were not rated 3 or 4.
    ret : INTEGER
        Number of automatically retained waveforms.
    sts : LIST
        List containing all retained + filtered streams.
    crits : LIST
        List containing bools (retained or not) corresponding to streams in
        sts.

    """

    inloc = os.path.join(preproloc, phase, '/by_station/', network, station)
    diff = 0
    ret = 0
    sts = []
    crits = []
    for file in os.listdir(inloc):
        if file[:4] == "info":  # Skip the info files
            continue
        try:
            st = read(inloc + file)
        except IsADirectoryError as e:
            print(e)
            continue
        starttime = str(st[0].stats.starttime)
        if phase == "S":
            st, crit, hf, noisemat = qcs(st, st[0].stats.delta,
                                         st[0].stats.sampling_rate)
        elif phase == "P":
            st, crit, lf, noisemat = qcp(st, st[0].stats.delta,
                                         st[0].stats.sampling_rate)

        with shelve.open(
                os.path.join(finddir(), 'ratings') + network + "." + station +
                "rating") as f:
            f[starttime + "_auto"] = crit
            if starttime in f and int(f[starttime]) < 3 and crit:
                diff = diff + 1
            if crit:
                ret = ret + 1
        sts.append(st)
        crits.append(crit)
    return diff, ret, sts, crits