예제 #1
0
def preprocess(db, stations, comps, goal_day, params):
    datafiles = {}
    output = Stream()
    for station in stations:
        datafiles[station] = {}
        net, sta = station.split('.')
        gd = datetime.datetime.strptime(goal_day, '%Y-%m-%d')
        files = get_data_availability(db,
                                      net=net,
                                      sta=sta,
                                      starttime=gd,
                                      endtime=gd)
        for comp in comps:
            datafiles[station][comp] = []
        for file in files:
            if file.comp[-1] not in comps:
                continue
            fullpath = os.path.join(file.path, file.file)
            datafiles[station][file.comp[-1]].append(fullpath)
    j = 0
    for istation, station in enumerate(stations):
        for comp in comps:
            files = eval("datafiles['%s']['%s']" % (station, comp))
            if len(files) != 0:
                logging.debug("%s.%s Reading %i Files" %
                              (station, comp, len(files)))
                stream = Stream()
                for file in sorted(files):
                    st = read(file,
                              dytpe=np.float,
                              starttime=UTCDateTime(gd),
                              endtime=UTCDateTime(gd) + 86400)
                    for tr in st:
                        tr.data = tr.data.astype(np.float32)
                    stream += st
                    del st
                stream.sort()
                stream.merge(method=1,
                             interpolation_samples=3,
                             fill_value=None)
                stream = stream.split()

                logging.debug("Checking sample alignment")
                for i, trace in enumerate(stream):
                    stream[i] = check_and_phase_shift(trace)

                logging.debug("Checking Gaps")
                if len(getGaps(stream)) > 0:
                    max_gap = 10
                    only_too_long = False
                    while getGaps(stream) and not only_too_long:
                        too_long = 0
                        gaps = getGaps(stream)
                        for gap in gaps:
                            if int(gap[-1]) <= max_gap:
                                stream[gap[0]] = stream[gap[0]].__add__(
                                    stream[gap[1]],
                                    method=1,
                                    fill_value="interpolate")
                                stream.remove(stream[gap[1]])
                                break
                            else:
                                too_long += 1
                        if too_long == len(gaps):
                            only_too_long = True
                stream = stream.split()
                taper_length = 20.0  # seconds
                for trace in stream:
                    if trace.stats.npts < 4 * taper_length * trace.stats.sampling_rate:
                        stream.remove(trace)
                    else:
                        trace.detrend(type="demean")
                        trace.detrend(type="linear")
                        trace.taper(max_percentage=None, max_length=1.0)
                # try:
                #     stream.merge(method=0, fill_value=0.0)
                # except:
                #     continue
                #
                # logging.debug("%s.%s Slicing Stream to %s:%s" % (station, comp, utcdatetime.UTCDateTime(
                #     goal_day.replace('-', '')), utcdatetime.UTCDateTime(
                #     goal_day.replace('-', '')) + params.goal_duration - stream[0].stats.delta))
                # stream.trim(utcdatetime.UTCDateTime(goal_day.replace('-', '')), utcdatetime.UTCDateTime(
                #     goal_day.replace('-', '')) + params.goal_duration - stream[0].stats.delta, pad=True, fill_value=None,
                #                nearest_sample=False)

                if get_config(db, 'remove_response', isbool=True):
                    logging.debug('Removing instrument response')
                    response_format = get_config(db, 'response_format')
                    response_prefilt = eval(get_config(db, 'response_prefilt'))
                    files = glob.glob(
                        os.path.join(get_config(db, 'response_path'), "*"))
                    if response_format == "inventory":
                        firstinv = True
                        inventory = None
                        for file in files:
                            try:
                                inv = read_inventory(file)
                                if firstinv:
                                    inventory = inv
                                    firstinv = False
                                else:
                                    inventory += inv
                            except:
                                traceback.print_exc()
                                pass
                        if inventory:
                            stream.attach_response(inventory)
                            stream.remove_response(output='VEL',
                                                   pre_filt=response_prefilt)
                    elif response_format == "dataless":
                        for file in files:
                            p = Parser(file)
                            try:
                                p.getPAZ(stream[0].id,
                                         datetime=UTCDateTime(gd))
                                break
                            except:
                                traceback.print_exc()
                                del p
                                continue
                        stream.simulate(
                            seedresp={
                                'filename': p,
                                "units": "VEL"
                            },
                            pre_filt=response_prefilt,
                            paz_remove=None,
                            paz_simulate=None,
                        )
                    elif response_format == "paz":
                        msg = "Unexpected type for `response_format`: %s" % \
                              response_format
                        raise TypeError(msg)
                    elif response_format == "resp":
                        msg = "Unexpected type for `response_format`: %s" % \
                              response_format
                        raise TypeError(msg)
                    else:
                        msg = "Unexpected type for `response_format`: %s" % \
                              response_format
                        raise TypeError(msg)

                for trace in stream:
                    logging.debug("%s.%s Highpass at %.2f Hz" %
                                  (station, comp, params.preprocess_highpass))
                    trace.filter("highpass",
                                 freq=params.preprocess_highpass,
                                 zerophase=True)

                    if trace.stats.sampling_rate != params.goal_sampling_rate:
                        logging.debug(
                            "%s.%s Lowpass at %.2f Hz" %
                            (station, comp, params.preprocess_lowpass))
                        trace.filter("lowpass",
                                     freq=params.preprocess_lowpass,
                                     zerophase=True,
                                     corners=8)

                        if params.resampling_method == "Resample":
                            logging.debug(
                                "%s.%s Downsample to %.1f Hz" %
                                (station, comp, params.goal_sampling_rate))
                            trace.data = resample(
                                trace.data, params.goal_sampling_rate /
                                trace.stats.sampling_rate, 'sinc_fastest')

                        elif params.resampling_method == "Decimate":
                            decimation_factor = trace.stats.sampling_rate / params.goal_sampling_rate
                            if not int(decimation_factor) == decimation_factor:
                                logging.warning(
                                    "%s.%s CANNOT be decimated by an integer factor, consider using Resample or Lanczos methods"
                                    " Trace sampling rate = %i ; Desired CC sampling rate = %i"
                                    %
                                    (station, comp, trace.stats.sampling_rate,
                                     params.goal_sampling_rate))
                                sys.stdout.flush()
                                sys.exit()
                            logging.debug("%s.%s Decimate by a factor of %i" %
                                          (station, comp, decimation_factor))
                            trace.data = trace.data[::int(decimation_factor)]

                        elif params.resampling_method == "Lanczos":
                            logging.debug(
                                "%s.%s Downsample to %.1f Hz" %
                                (station, comp, params.goal_sampling_rate))
                            trace.data = np.array(trace.data)
                            trace.interpolate(
                                method="lanczos",
                                sampling_rate=params.goal_sampling_rate,
                                a=1.0)

                        trace.stats.sampling_rate = params.goal_sampling_rate

                year, month, day, hourf, minf, secf, wday, yday, isdst = trace.stats.starttime.utctimetuple(
                )

                if j == 0:
                    t = time.strptime(
                        "%04i:%02i:%02i:%02i:%02i:%02i" %
                        (year, month, day, hourf, minf, secf),
                        "%Y:%m:%d:%H:%M:%S")
                    basetime = calendar.timegm(t)
                for tr in stream:
                    tr.data = tr.data.astype(np.float32)
                output += stream
                del stream
            del files
    clean_scipy_cache()
    return basetime, output
    def test_ppsd_w_iris_against_obspy_results(self):
        """
        Test against results obtained after merging of #1108.
        """
        # Read in ANMO data for one day
        st = read(os.path.join(self.path, 'IUANMO.seed'))

        # Read in metadata in various different formats
        paz = {
            'gain':
            86298.5,
            'zeros': [0, 0],
            'poles': [
                -59.4313, -22.7121 + 27.1065j, -22.7121 + 27.1065j, -0.0048004,
                -0.073199
            ],
            'sensitivity':
            3.3554 * 10**9
        }
        resp = os.path.join(self.path, 'IUANMO.resp')
        parser = Parser(os.path.join(self.path, 'IUANMO.dataless'))
        inv = read_inventory(os.path.join(self.path, 'IUANMO.xml'))

        # load expected results, for both only PAZ and full response
        filename_paz = os.path.join(self.path, 'IUANMO_ppsd_paz.npz')
        results_paz = PPSD.load_npz(filename_paz,
                                    metadata=None,
                                    allow_pickle=True)
        filename_full = os.path.join(self.path, 'IUANMO_ppsd_fullresponse.npz')
        results_full = PPSD.load_npz(filename_full,
                                     metadata=None,
                                     allow_pickle=True)

        # Calculate the PPSDs and test against expected results
        # first: only PAZ
        ppsd = PPSD(st[0].stats, paz)
        ppsd.add(st)
        # commented code to generate the test data:
        # ## np.savez(filename_paz,
        # ##          **dict([(k, getattr(ppsd, k))
        # ##                  for k in PPSD.NPZ_STORE_KEYS]))
        for key in PPSD.NPZ_STORE_KEYS_ARRAY_TYPES:
            np.testing.assert_allclose(getattr(ppsd, key),
                                       getattr(results_paz, key),
                                       rtol=1e-5)
        for key in PPSD.NPZ_STORE_KEYS_LIST_TYPES:
            for got, expected in zip(getattr(ppsd, key),
                                     getattr(results_paz, key)):
                np.testing.assert_allclose(got, expected, rtol=1e-5)
        for key in PPSD.NPZ_STORE_KEYS_SIMPLE_TYPES:
            if key in ["obspy_version", "numpy_version", "matplotlib_version"]:
                continue
            self.assertEqual(getattr(ppsd, key), getattr(results_paz, key))
        # second: various methods for full response
        for metadata in [parser, inv, resp]:
            ppsd = PPSD(st[0].stats, metadata)
            ppsd.add(st)
            # commented code to generate the test data:
            # ## np.savez(filename_full,
            # ##          **dict([(k, getattr(ppsd, k))
            # ##                  for k in PPSD.NPZ_STORE_KEYS]))
            for key in PPSD.NPZ_STORE_KEYS_ARRAY_TYPES:
                np.testing.assert_allclose(getattr(ppsd, key),
                                           getattr(results_full, key),
                                           rtol=1e-5)
            for key in PPSD.NPZ_STORE_KEYS_LIST_TYPES:
                for got, expected in zip(getattr(ppsd, key),
                                         getattr(results_full, key)):
                    np.testing.assert_allclose(got, expected, rtol=1e-5)
            for key in PPSD.NPZ_STORE_KEYS_SIMPLE_TYPES:
                if key in [
                        "obspy_version", "numpy_version", "matplotlib_version"
                ]:
                    continue
                self.assertEqual(getattr(ppsd, key),
                                 getattr(results_full, key))
예제 #3
0
def obspy_fullresp_RESP(input_dics,
                        trace,
                        resp_file,
                        Address,
                        unit='DIS',
                        BP_filter=(0.008, 0.012, 3.0, 4.0),
                        inform='N/N'):
    """
    Instrument correction using dataless seed --->
    equivalent to full response file steps: detrend, demean, taper, filter,
    deconvolution
    :param input_dics:
    :param trace:
    :param resp_file:
    :param Address:
    :param unit:
    :param BP_filter:
    :param inform:
    :return:
    """
    dataless_parser = Parser(resp_file)
    seedresp = {'filename': dataless_parser, 'units': unit}

    try:
        if input_dics['resample_corr']:
            trace = resample_trace(
                trace,
                des_sr=input_dics['resample_corr'],
                resample_method=input_dics['resample_method'])
        trace.detrend('linear')
        trace.simulate(seedresp=seedresp,
                       paz_remove=None,
                       paz_simulate=None,
                       remove_sensitivity=True,
                       simulate_sensitivity=False,
                       water_level=input_dics['water_level'],
                       zero_mean=True,
                       taper=True,
                       taper_fraction=0.05,
                       pre_filt=eval(BP_filter),
                       pitsasim=False,
                       sacsim=True)
        # Remove the following line since we want to keep
        # the units as it is in the stationXML
        # trace.data *= 1.e9
        trace_identity = '%s.%s.%s.%s' % (
            trace.stats['network'], trace.stats['station'],
            trace.stats['location'], trace.stats['channel'])
        if input_dics['mseed'] == 'N':
            trace.write(os.path.join(Address,
                                     '%s.%s' % (unit.lower(), trace_identity)),
                        format='SAC')
        else:
            trace.write(os.path.join(Address,
                                     '%s.%s' % (unit.lower(), trace_identity)),
                        format='MSEED')

        if unit.lower() == 'dis':
            unit_print = 'displacement'
        elif unit.lower() == 'vel':
            unit_print = 'velocity'
        elif unit.lower() == 'acc':
            unit_print = 'acceleration'
        else:
            unit_print = 'UNKNOWN'
        print '%s -- instrument correction to %s for: %s' \
              % (inform, unit_print, trace_identity)

    except Exception as e:
        print '%s -- %s' % (inform, e)
예제 #4
0
def obspy_fullresp_resp(trace,
                        resp_file,
                        save_path,
                        unit,
                        bp_filter,
                        water_level,
                        zero_mean,
                        taper,
                        taper_fraction,
                        remove_trend,
                        debug=False):
    """
    apply instrument correction by using response file
    :param trace:
    :param resp_file:
    :param save_path:
    :param unit:
    :param bp_filter:
    :param water_level:
    :param zero_mean:
    :param taper:
    :param taper_fraction:
    :param remove_trend:
    :param debug:
    :return:
    """
    if 'dis' in unit.lower():
        unit = 'DIS'
    elif 'vel' in unit.lower():
        unit = 'VEL'
    elif 'acc' in unit.lower():
        unit = 'ACC'
    else:
        unit = unit.upper()

    dataless_parser = Parser(resp_file)
    seedresp = {'filename': dataless_parser, 'units': unit}

    if debug:
        print(20 * '=')
        print('stationXML file: %s' % resp_file)
        print('tarce: %s' % trace.id)
        print('save path: %s' % save_path)

    # remove the trend
    if remove_trend:
        trace.detrend('linear')
    try:
        trace.simulate(seedresp=seedresp,
                       paz_remove=None,
                       paz_simulate=None,
                       remove_sensitivity=True,
                       simulate_sensitivity=False,
                       water_level=water_level,
                       zero_mean=zero_mean,
                       taper=taper,
                       taper_fraction=taper_fraction,
                       pre_filt=eval(bp_filter),
                       pitsasim=False,
                       sacsim=True)

        # Remove the following line since we want to keep
        # the units as it is in the stationXML
        # trace.data *= 1.e9

        if unit.lower() == 'dis':
            unit_print = 'displacement'
        elif unit.lower() == 'vel':
            unit_print = 'velocity'
        elif unit.lower() == 'acc':
            unit_print = 'acceleration'
        else:
            unit_print = 'UNKNOWN'
        print('instrument correction to %s for: %s' % (unit_print, trace.id))

        return trace

    except Exception as error:
        print('[EXCEPTION] %s -- %s' % (trace.id, error))
        return False
from obspy import read
from obspy.signal import PPSD
from obspy.io.xseed import Parser

st = read("https://examples.obspy.org/BW.KW1..EHZ.D.2011.037")
parser = Parser("https://examples.obspy.org/dataless.seed.BW_KW1")
ppsd = PPSD(st[0].stats, metadata=parser)
ppsd.add(st)

st = read("https://examples.obspy.org/BW.KW1..EHZ.D.2011.038")
ppsd.add(st)

ppsd.plot(cumulative=True)
예제 #6
0
stime = UTCDateTime('2017-001T00:00:00.0')
etime = UTCDateTime('2018-001T00:00:00.0')

net = 'IC'
debug = True
window = 60. * 8.
f0s = [1. / 150., 1. / 100., 1. / 75., 1. / 50., 1. / 25]
#f0s = [1./25.]
plots = False
stalist = False

client = Client("IRIS")

# Grab the list of station
sp = Parser('/APPS/metadata/SEED/' + net + '.dataless')


def makecolocplot(eve, sta, debug=False):

    return


def proceve(eve, sta, debug=False):

    try:
        coords = sp.get_coordinates(net + '.' + sta + '.00.LHZ',
                                    eve.origins[0].time)
    except:
        return
    (dis, azi, bazi) = gps2dist_azimuth(coords['latitude'],
예제 #7
0
파일: client.py 프로젝트: mbyt/obspy
    def get_paz(self, seed_id, datetime):
        """
        Get PAZ for a station at given time span. Gain is the A0 normalization
        constant for the poles and zeros.

        :type seed_id: str
        :param seed_id: SEED or channel id, e.g. ``"BW.RJOB..EHZ"`` or
            ``"EHE"``.
        :type datetime: :class:`~obspy.core.utcdatetime.UTCDateTime`
        :param datetime: Time for which the PAZ is requested,
            e.g. ``'2010-01-01 12:00:00'``.
        :rtype: dict
        :return: Dictionary containing zeros, poles, gain and sensitivity.

        .. rubric:: Example

        >>> c = Client(timeout=20)
        >>> paz = c.station.get_paz('BW.MANZ..EHZ', '20090707')
        >>> paz['zeros']
        [0j, 0j]
        >>> len(paz['poles'])
        5
        >>> print(paz['poles'][0])
        (-0.037004+0.037016j)
        >>> paz['gain']
        60077000.0
        >>> paz['sensitivity']
        2516800000.0
        """
        # try to read PAZ from previously obtained XSEED data
        for res in self.client.xml_seeds.get(seed_id, []):
            parser = Parser(res)
            try:
                paz = parser.get_paz(seed_id=seed_id,
                                     datetime=UTCDateTime(datetime))
                return paz
            except:
                continue
        network, station, location, channel = seed_id.split(".")
        # request station information
        station_list = self.get_list(network=network,
                                     station=station,
                                     datetime=datetime)
        if not station_list:
            return {}
        # don't allow wild cards
        for wildcard in ['*', '?']:
            if wildcard in seed_id:
                msg = "Wildcards in seed_id are not allowed."
                raise ValueError(msg)

        if len(station_list) > 1:
            warnings.warn("Received more than one XSEED file. Using first.")

        xml_doc = station_list[0]
        res = self.client.station.get_resource(xml_doc['resource_name'])
        reslist = self.client.xml_seeds.setdefault(seed_id, [])
        if res not in reslist:
            reslist.append(res)
        parser = Parser(res)
        paz = parser.get_paz(seed_id=seed_id, datetime=UTCDateTime(datetime))
        return paz
    inv = read_inventory(path)

    # scrub all the excess informaion
    inv_chan = inv.select(station=sta,
                          location=loc,
                          channel=chan,
                          starttime=starttime)

    # ALQ1 for plotting

    if sta == 'ALQ1':

        # Prepare ALQ1 trace for plotting
        tr_plot = tr.copy()
        tr_plot.detrend('constant')
        parser = Parser(path)

        #remove response from trace
        tr_plot.simulate(seedresp={'filename': parser, 'units': "VEL"})
        tr_plot.filter('bandpass', freqmin=3.14, freqmax=3.18, corners=4)

    else:

        # Get the desired parameters and throw into dictionaries
        tr.stats.coordinates = AttribDict({
            'latitude':
            inv_chan[0][0][0].latitude,
            'elevation':
            inv_chan[0][0][0].elevation / 1000.,
            'longitude':
            inv_chan[0][0][0].longitude
예제 #9
0
def get_data_and_attributes(cfg,
                            inv,
                            catalog_df,
                            staname,
                            indexes,
                            obs='OVPF'):
    """
    Given a catalog data-frame, runs the requests for the data and
    calculates the attributes.
    """
    # Set the number of events in an attribute file (for convenience)
    n_events = len(indexes)

    # For each event in the catalog file
    for i in xrange(n_events):
        # get the number of the line to read
        index = indexes[i]

        try:
            # parse the catalog entry
            starttime, window_length, event_type, analyst =\
                io.get_catalog_entry(catalog_df, index)
            print event_type, staname, i, index, starttime.isoformat()

            # get the data and attributes

            if cfg.do_use_saved_data:
                st_fname = os.path.join(cfg.data_dir,
                                        "%d_PF.%s.*MSEED" % (index, staname))
                try:
                    st = read(st_fname)
                except:
                    st = None

            else:
                if staname == 'BOR':
                    parser = Parser(cfg.BOR_response_fname)
                    st = io.get_waveform_data(starttime,
                                              window_length,
                                              'PF',
                                              staname,
                                              '??Z',
                                              parser,
                                              obs=obs,
                                              simulate=True)
                else:
                    st = io.get_waveform_data(starttime,
                                              window_length,
                                              'PF',
                                              staname,
                                              '??Z',
                                              inv,
                                              obs=obs)
                if cfg.do_save_data and st is not None:
                    for tr in st:
                        tr_fname = os.path.join(
                            cfg.data_dir, "%d_%s.MSEED" % (index, tr.get_id()))
                        tr.write(tr_fname, format='MSEED')

            # actually get the attributes
            try:
                st.detrend()
            except AttributeError:
                raise ValueError
            attributes, att_names =\
                att.get_all_single_station_attributes(st)

            # create the data-frame with the attributes (using the same indexes
            # as those in the catalog)
            if i == 0:
                df_att = pd.DataFrame(attributes,
                                      columns=att_names,
                                      index=[index])
            else:
                df_att_tmp = pd.DataFrame(attributes,
                                          columns=att_names,
                                          index=[index])
                df_att = df_att.append(df_att_tmp, ignore_index=False)

        except ValueError:
            # if there are problems, then set attributes to NaN
            print 'Problem at %d (%d)  - Setting all attributes to NaN' %\
                  (i, index)
            try:
                att_names = df_att.columns.values
            except UnboundLocalError:
                att_names = att.att_names_single_station_1D
            nan = np.ones((1, len(att_names))) * np.nan
            df_att_tmp = pd.DataFrame(nan, columns=att_names, index=[index])
            try:
                df_att = df_att.append(df_att_tmp, ignore_index=False)
            except UnboundLocalError:
                df_att = df_att_tmp
            continue

    # join the attributes onto the portion of the catalog data-frame
    # we are working with and return it
    df_X = catalog_df.ix[indexes].join(df_att)
    return df_X
예제 #10
0
def init_ppsd_dataless(tr, dataless_file):
    parser = Parser(dataless_file)
    paz = parser.get_paz(tr.id)
    ppsd = PPSD(tr.stats, metadata=paz)
    return ppsd
예제 #11
0
 def __init__(self, file_src=None, folder_dst=None):
     if not file_src is None or not folder_dst is None:
         self.parser_object = Parser(file_src)
         self.folder_destination = folder_dst
     else:
         raise NotImplementedError("path file or folder destination must be valid ")
예제 #12
0
def preprocessing_function(tr, processing_info):  # NOQA
    """
    Function to perform the actual preprocessing for one individual seismogram.
    This is part of the project so it can change depending on the project.

    Please keep in mind that you will have to manually update this file to a
    new version if LASIF is ever updated.
    """
    def zerophase_chebychev_lowpass_filter(trace, freqmax):
        """
        Custom Chebychev type two zerophase lowpass filter useful for
        decimation filtering.

        This filter is stable up to a reduction in frequency with a factor of
        10. If more reduction is desired, simply decimate in steps.

        Partly based on a filter in ObsPy.

        :param trace: The trace to be filtered.
        :param freqmax: The desired lowpass frequency.

        Will be replaced once ObsPy has a proper decimation filter.
        """
        # rp - maximum ripple of passband, rs - attenuation of stopband
        rp, rs, order = 1, 96, 1e99
        ws = freqmax / (trace.stats.sampling_rate * 0.5)  # stop band frequency
        wp = ws  # pass band frequency

        while True:
            if order <= 12:
                break
            wp *= 0.99
            order, wn = signal.cheb2ord(wp, ws, rp, rs, analog=0)

        b, a = signal.cheby2(order, rs, wn, btype="low", analog=0, output="ba")

        # Apply twice to get rid of the phase distortion.
        trace.data = signal.filtfilt(b, a, trace.data)

    # =========================================================================
    # Read seismograms and gather basic information.
    # =========================================================================
    specfem_delta_delay = -1.0687500
    starttime = processing_info["event_information"][
        "origin_time"] + specfem_delta_delay
    endtime = starttime + processing_info["process_params"]["dt"] * \
                          (processing_info["process_params"]["npts"] - 1)
    duration = endtime - starttime

    # Make sure the seismograms are long enough. If not, skip them.
    if starttime < tr.stats.starttime or endtime > tr.stats.endtime:
        msg = ("The seismogram does not cover the required time span.\n"
               "Seismogram time span: %s - %s\n"
               "Requested time span: %s - %s" %
               (tr.stats.starttime, tr.stats.endtime, starttime, endtime))
        raise LASIFError(msg)

    # Trim to reduce processing cost.
    # starttime is the origin time of the event
    # endtime is the origin time plus the length of the synthetics
    tr.trim(starttime - 0.2 * duration, endtime + 0.2 * duration)

    # =========================================================================
    # Some basic checks on the data.
    # =========================================================================
    # Non-zero length
    if not len(tr):
        msg = "No data found in time window around the event. File skipped."
        raise LASIFError(msg)

    # No nans or infinity values allowed.
    if not np.isfinite(tr.data).all():
        msg = "Data contains NaNs or Infs. File skipped"
        raise LASIFError(msg)

    # =========================================================================
    # Step 1: Decimation
    # Decimate with the factor closest to the sampling rate of the synthetics.
    # The data is still oversampled by a large amount so there should be no
    # problems. This has to be done here so that the instrument correction is
    # reasonably fast even for input data with a large sampling rate.
    # =========================================================================
    while True:
        decimation_factor = int(processing_info["process_params"]["dt"] /
                                tr.stats.delta)
        # Decimate in steps for large sample rate reductions.
        if decimation_factor > 8:
            decimation_factor = 8
        if decimation_factor > 1:
            new_nyquist = tr.stats.sampling_rate / 2.0 / float(
                decimation_factor)
            zerophase_chebychev_lowpass_filter(tr, new_nyquist)
            tr.decimate(factor=decimation_factor, no_filter=True)
        else:
            break

    # =========================================================================
    # Step 2: Detrend and taper.
    # =========================================================================
    tr.detrend("linear")
    tr.detrend("demean")
    tr.taper(max_percentage=0.05, type="hann")

    # =========================================================================
    # Step 3: Instrument correction
    # Correct seismograms to velocity in m/s.
    # =========================================================================
    output_units = "VEL"
    station_name = "station.{}_{}.response.xml".format(tr.stats.network,
                                                       tr.stats.station)
    station_file = os.path.join("StationXML", station_name)

    # check if the station file actually exists ==============================
    if not os.path.exists(station_file):
        msg = "No station file found for the relevant time span. File skipped"
        raise LASIFError(msg)

    # This is really necessary as other filters are just not sharp enough
    # and lots of energy from other frequency bands leaks into the frequency
    # band of interest
    freqmin = processing_info["process_params"]["highpass"]
    freqmax = processing_info["process_params"]["lowpass"]

    f2 = 0.9 * freqmin
    f3 = 1.1 * freqmax
    # Recommendations from the SAC manual.
    f1 = 0.5 * f2
    f4 = 2.0 * f3
    pre_filt = (f1, f2, f3, f4)

    # processing for seed files ==============================================
    if "/SEED/" in station_file:
        # XXX: Check if this is m/s. In all cases encountered so far it
        # always is, but SEED is in theory also able to specify corrections
        # to other units...
        parser = Parser(station_file)
        try:
            # The simulate might fail but might still modify the data. The
            # backup is needed for the backup plan to only correct using
            # poles and zeros.
            backup_tr = tr.copy()
            try:
                tr.simulate(seedresp={
                    "filename": parser,
                    "units": output_units,
                    "date": tr.stats.starttime
                },
                            pre_filt=pre_filt,
                            zero_mean=False,
                            taper=False)
            except ValueError:
                warnings.warn("Evalresp failed, will only use the Poles and "
                              "Zeros stage")
                tr = backup_tr
                paz = parser.getPAZ(tr.id, tr.stats.starttime)
                if paz["sensitivity"] == 0:
                    warnings.warn("Sensitivity is 0 in SEED file and will "
                                  "not be taken into account!")
                    tr.simulate(paz_remove=paz,
                                remove_sensitivity=False,
                                pre_filt=pre_filt,
                                zero_mean=False,
                                taper=False)
                else:
                    tr.simulate(paz_remove=paz,
                                pre_filt=pre_filt,
                                zero_mean=False,
                                taper=False)
        except Exception:
            msg = ("File  could not be corrected with the help of the "
                   "SEED file '%s'. Will be skipped.") \
                  % processing_info["station_filename"]
            raise LASIFError(msg)
    # processing with RESP files =============================================
    elif "/RESP/" in station_file:
        try:
            tr.simulate(seedresp={
                "filename": station_file,
                "units": output_units,
                "date": tr.stats.starttime
            },
                        pre_filt=pre_filt,
                        zero_mean=False,
                        taper=False)
        except ValueError as e:
            msg = ("File  could not be corrected with the help of the "
                   "RESP file '%s'. Will be skipped. Due to: %s") \
                  % (processing_info["station_filename"], str(e))
            raise LASIFError(msg)
    elif "StationXML" in station_file:
        try:
            inv = obspy.read_inventory(station_file, format="stationxml")
        except Exception as e:
            msg = ("Could not open StationXML file '%s'. Due to: %s. Will be "
                   "skipped." % (station_file, str(e)))
            raise LASIFError(msg)
        tr.attach_response(inv)
        try:
            tr.remove_response(output=output_units,
                               pre_filt=pre_filt,
                               zero_mean=False,
                               taper=False)
        except Exception as e:
            msg = ("File  could not be corrected with the help of the "
                   "StationXML file '%s'. Due to: '%s'  Will be skipped.") \
                  % (station_file, e.__repr__())
            raise LASIFError(msg)
    else:
        raise NotImplementedError

    # =========================================================================
    # Step 4: Bandpass filtering
    # This has to be exactly the same filter as in the source time function
    # in the case of SES3D.
    # =========================================================================
    tr.detrend("linear")
    tr.detrend("demean")
    tr.taper(0.05, type="cosine")
    tr.filter("bandpass",
              freqmin=freqmin,
              freqmax=freqmax,
              corners=3,
              zerophase=True)
    tr.detrend("linear")
    tr.detrend("demean")
    tr.taper(0.05, type="cosine")
    tr.filter("bandpass",
              freqmin=freqmin,
              freqmax=freqmax,
              corners=3,
              zerophase=True)

    # =========================================================================
    # Step 5: Sinc interpolation
    # =========================================================================
    # Make sure that the data array is at least as long as the
    # synthetics array.
    tr.data = np.require(tr.data, requirements="C")
    tr.interpolate(sampling_rate=1.0 / processing_info["process_params"]["dt"],
                   method="lanczos",
                   starttime=starttime,
                   window="blackman",
                   a=12,
                   npts=processing_info["process_params"]["npts"])

    # =========================================================================
    # Save processed data and clean up.
    # =========================================================================
    # Convert to single precision to save some space.
    tr.data = np.require(tr.data, dtype="float32", requirements="C")
    if hasattr(tr.stats, "mseed"):
        tr.stats.mseed.encoding = "FLOAT32"

    return tr
def preprocessing_function(processing_info, iteration):  # NOQA
    """
    Function to perform the actual preprocessing for one individual seismogram.
    This is part of the project so it can change depending on the project.

    Please keep in mind that you will have to manually update this file to a
    new version if LASIF is ever updated.

    You can do whatever you want in this function as long as the function
    signature is honored. The file is read from ``"input_filename"`` and
    written to ``"output_filename"``.

    One goal of this function is to make sure that the data is available at the
    same time steps as the synthetics. The first time sample of the synthetics
    will always be the origin time of the event.

    Furthermore the data has to be converted to m/s.

    :param processing_info: A dictionary containing information about the
        file to be processed. It will have the following structure.
    :type processing_info: dict

    .. code-block:: python

        {'event_information': {
            'depth_in_km': 22.0,
            'event_name': 'GCMT_event_VANCOUVER_ISLAND...',
            'filename': '/.../GCMT_event_VANCOUVER_ISLAND....xml',
            'latitude': 49.53,
            'longitude': -126.89,
            'm_pp': 2.22e+18,
            'm_rp': -2.78e+18,
            'm_rr': -6.15e+17,
            'm_rt': 1.98e+17,
            'm_tp': 5.14e+18,
            'm_tt': -1.61e+18,
            'magnitude': 6.5,
            'magnitude_type': 'Mwc',
            'origin_time': UTCDateTime(2011, 9, 9, 19, 41, 34, 200000),
            'region': u'VANCOUVER ISLAND, CANADA REGION'},
         'input_filename': u'/.../raw/7D.FN01A..HHZ.mseed',
         'output_filename': u'/.../processed_.../7D.FN01A..HHZ.mseed',
         'process_params': {
            'dt': 0.75,
            'highpass': 0.007142857142857143,
            'lowpass': 0.0125,
            'npts': 2000},
         'station_coordinates': {
            'elevation_in_m': -54.0,
            'latitude': 46.882,
            'local_depth_in_m': None,
            'longitude': -124.3337},
         'station_filename': u'/.../STATIONS/RESP/RESP.7D.FN01A..HH*'}

    Please note that you also got the iteration object here, so if you
    want some parameters to change depending on the iteration, just use
    if/else on the iteration objects.

    >>> iteration.name  # doctest: +SKIP
    '11'
    >>> iteration.get_process_params()  # doctest: +SKIP
    {'dt': 0.75,
     'highpass': 0.01,
     'lowpass': 0.02,
     'npts': 500}

    Use ``$ lasif shell`` to play around and figure out what the iteration
    objects can do.

    """
    def zerophase_chebychev_lowpass_filter(trace, freqmax):
        """
        Custom Chebychev type two zerophase lowpass filter useful for
        decimation filtering.

        This filter is stable up to a reduction in frequency with a factor of
        10. If more reduction is desired, simply decimate in steps.

        Partly based on a filter in ObsPy.

        :param trace: The trace to be filtered.
        :param freqmax: The desired lowpass frequency.

        Will be replaced once ObsPy has a proper decimation filter.
        """
        # rp - maximum ripple of passband, rs - attenuation of stopband
        rp, rs, order = 1, 96, 1e99
        ws = freqmax / (trace.stats.sampling_rate * 0.5)  # stop band frequency
        wp = ws  # pass band frequency

        while True:
            if order <= 12:
                break
            wp *= 0.99
            order, wn = signal.cheb2ord(wp, ws, rp, rs, analog=0)

        b, a = signal.cheby2(order, rs, wn, btype="low", analog=0, output="ba")

        # Apply twice to get rid of the phase distortion.
        trace.data = signal.filtfilt(b, a, trace.data)

    # =========================================================================
    # Read seismograms and gather basic information.
    # =========================================================================
    starttime = processing_info["event_information"]["origin_time"]
    endtime = starttime + processing_info["process_params"]["dt"] * \
        (processing_info["process_params"]["npts"] - 1)
    duration = endtime - starttime

    st = obspy.read(processing_info["input_filename"])

    if len(st) != 1:
        warnings.warn("The file '%s' has %i traces and not 1. "
                      "Skip all but the first" %
                      (processing_info["input_filename"], len(st)))
    tr = st[0]

    # Make sure the seismograms are long enough. If not, skip them.
    if starttime < tr.stats.starttime or endtime > tr.stats.endtime:

        msg = ("The seismogram does not cover the required time span.\n"
               "Seismogram time span: %s - %s\n"
               "Requested time span: %s - %s" %
               (tr.stats.starttime, tr.stats.endtime, starttime, endtime))
        raise LASIFError(msg)

    # Trim to reduce processing cost.
    # starttime is the origin time of the event
    # endtime is the origin time plus the length of the synthetics
    tr.trim(starttime - 0.2 * duration, endtime + 0.2 * duration)

    # =========================================================================
    # Some basic checks on the data.
    # =========================================================================
    # Non-zero length
    if not len(tr):
        msg = "No data found in time window around the event. File skipped."
        raise LASIFError(msg)

    # No nans or infinity values allowed.
    if not np.isfinite(tr.data).all():
        msg = "Data contains NaNs or Infs. File skipped"
        raise LASIFError(msg)

    # =========================================================================
    # Step 1: Decimation
    # Decimate with the factor closest to the sampling rate of the synthetics.
    # The data is still oversampled by a large amount so there should be no
    # problems. This has to be done here so that the instrument correction is
    # reasonably fast even for input data with a large sampling rate.
    # =========================================================================
    while True:
        decimation_factor = int(processing_info["process_params"]["dt"] /
                                tr.stats.delta)
        # Decimate in steps for large sample rate reductions.
        if decimation_factor > 8:
            decimation_factor = 8
        if decimation_factor > 1:
            new_nyquist = tr.stats.sampling_rate / 2.0 / float(
                decimation_factor)
            zerophase_chebychev_lowpass_filter(tr, new_nyquist)
            tr.decimate(factor=decimation_factor, no_filter=True)
        else:
            break

    # =========================================================================
    # Step 2: Detrend and taper.
    # =========================================================================
    tr.detrend("linear")
    tr.detrend("demean")
    tr.taper(max_percentage=0.05, type="hann")

    # =========================================================================
    # Step 3: Instrument correction
    # Correct seismograms to velocity in m/s.
    # =========================================================================
    output_units = "VEL"
    station_file = processing_info["station_filename"]

    # check if the station file actually exists ==============================
    if not processing_info["station_filename"]:
        msg = "No station file found for the relevant time span. File skipped"
        raise LASIFError(msg)

    # This is really necessary as other filters are just not sharp enough
    # and lots of energy from other frequency bands leaks into the frequency
    # band of interest
    freqmin = processing_info["process_params"]["highpass"]
    freqmax = processing_info["process_params"]["lowpass"]

    f2 = 0.9 * freqmin
    f3 = 1.1 * freqmax
    # Recommendations from the SAC manual.
    f1 = 0.5 * f2
    f4 = 2.0 * f3
    pre_filt = (f1, f2, f3, f4)

    # processing for seed files ==============================================
    if "/SEED/" in station_file:
        # XXX: Check if this is m/s. In all cases encountered so far it
        # always is, but SEED is in theory also able to specify corrections
        # to other units...
        parser = Parser(station_file)
        try:
            # The simulate might fail but might still modify the data. The
            # backup is needed for the backup plan to only correct using
            # poles and zeros.
            backup_tr = tr.copy()
            try:
                tr.simulate(seedresp={
                    "filename": parser,
                    "units": output_units,
                    "date": tr.stats.starttime
                },
                            pre_filt=pre_filt,
                            zero_mean=False,
                            taper=False)
            except ValueError:
                warnings.warn("Evalresp failed, will only use the Poles and "
                              "Zeros stage")
                tr = backup_tr
                paz = parser.get_paz(tr.id, tr.stats.starttime)
                if paz["sensitivity"] == 0:
                    warnings.warn("Sensitivity is 0 in SEED file and will "
                                  "not be taken into account!")
                    tr.simulate(paz_remove=paz,
                                remove_sensitivity=False,
                                pre_filt=pre_filt,
                                zero_mean=False,
                                taper=False)
                else:
                    tr.simulate(paz_remove=paz,
                                pre_filt=pre_filt,
                                zero_mean=False,
                                taper=False)
        except Exception as e:
            msg = ("File  could not be corrected with the help of the "
                   "SEED file '%s'. Will be skipped due to: %s") \
                % (processing_info["station_filename"], str(e))
            raise LASIFError(msg)
    # processing with RESP files =============================================
    elif "/RESP/" in station_file:
        try:
            tr.simulate(seedresp={
                "filename": station_file,
                "units": output_units,
                "date": tr.stats.starttime
            },
                        pre_filt=pre_filt,
                        zero_mean=False,
                        taper=False)
        except ValueError as e:
            msg = ("File  could not be corrected with the help of the "
                   "RESP file '%s'. Will be skipped. Due to: %s") \
                % (processing_info["station_filename"], str(e))
            raise LASIFError(msg)
    elif "/StationXML/" in station_file:
        try:
            inv = obspy.read_inventory(station_file, format="stationxml")
        except Exception as e:
            msg = ("Could not open StationXML file '%s'. Due to: %s. Will be "
                   "skipped." % (station_file, str(e)))
            raise LASIFError(msg)
        tr.attach_response(inv)
        try:
            tr.remove_response(output=output_units,
                               pre_filt=pre_filt,
                               zero_mean=False,
                               taper=False)
        except Exception as e:
            msg = ("File  could not be corrected with the help of the "
                   "StationXML file '%s'. Due to: '%s'  Will be skipped.") \
                % (processing_info["station_filename"], e.__repr__()),
            raise LASIFError(msg)
    else:
        raise NotImplementedError

    # =========================================================================
    # Step 4: Bandpass filtering
    # This has to be exactly the same filter as in the source time function
    # in the case of SES3D.
    # =========================================================================
    tr.detrend("linear")
    tr.detrend("demean")
    tr.taper(0.05, type="cosine")
    tr.filter("bandpass",
              freqmin=freqmin,
              freqmax=freqmax,
              corners=3,
              zerophase=False)
    tr.detrend("linear")
    tr.detrend("demean")
    tr.taper(0.05, type="cosine")
    tr.filter("bandpass",
              freqmin=freqmin,
              freqmax=freqmax,
              corners=3,
              zerophase=False)

    # =========================================================================
    # Step 5: Sinc interpolation
    # =========================================================================
    # Make sure that the data array is at least as long as the
    # synthetics array.
    tr.interpolate(sampling_rate=1.0 / processing_info["process_params"]["dt"],
                   method="lanczos",
                   starttime=starttime,
                   window="blackman",
                   a=12,
                   npts=processing_info["process_params"]["npts"])

    # =========================================================================
    # Save processed data and clean up.
    # =========================================================================
    # Convert to single precision to save some space.
    tr.data = np.require(tr.data, dtype="float32", requirements="C")
    if hasattr(tr.stats, "mseed"):
        tr.stats.mseed.encoding = "FLOAT32"

    tr.write(processing_info["output_filename"], format=tr.stats._format)
예제 #14
0
            starttime= starttime + timedelta(days=1)
        df_times= pd.DataFrame(times_to_npz, columns=["starttime","endtime"])
        info_partition=df_times
        return info_partition

    @property
    def info_inventory(self):
        info_inv= pd.DataFrame(self.parser.get_inventory()["channels"]) # dataless en formato de dataframe
        return info_inv

    @property
    def info_by_channel(self):
        
        #Devuelve un dataframe donde se da la información del parser por canal.
    
        inv_df= self.info_inventory
        if self.st_restrictions != None:
            for restriction in self.st_restrictions:
                inv_df_bolean= inv_df["channel_id"].str.contains(restriction).rename("restriction").to_frame()
                inv_df.drop( inv_df_bolean[inv_df_bolean.restriction == True].index, inplace=True  )
            
            inv_df=inv_df.reset_index(drop=True)
        info_by_channel= inv_df
        return info_by_channel

if __name__ == '__main__':
    client_RSSB = Client("\\\\168.176.35.177\\archive")
    parser_RSSB = Parser('D:\EDCT\SNA_RSSB\Auto_PPSD\BT_UNALv5.dataless') 
    SNA= Auto_PPSD(client_RSSB, parser_RSSB, "20170101", "20200508")
    print(SNA.info_by_channel)
    
예제 #15
0
def config(set, sync):
    """This command should now only be used to use the command line to set
    a parameter value in the data base. It used to launch the Configurator but
    the recommended way to configure MSNoise is to use the "msnoise admin" web
    interface."""
    if set:
        from ..default import default
        if not set.count("="):
            click.echo("!! format of the set command is name=value !!")
            return
        name, value = set.split("=")
        if not name in default:
            click.echo("!! unknown parameter %s !!" % name)
            return
        from ..api import connect, update_config
        db = connect()
        update_config(db, name, value)
        db.commit()
        db.close()
        click.echo("Successfully updated parameter %s = %s" % (name, value))
    elif sync:
        import glob
        from ..api import connect, get_config, get_stations, update_station
        db = connect()
        response_format = get_config(db, 'response_format')
        response_files = glob.glob(
            os.path.join(get_config(db, 'response_path'), "*"))
        if response_format == "inventory":
            from obspy import read_inventory
            firstinv = True
            metadata = None
            for file in response_files:
                try:
                    inv = read_inventory(file)
                    if firstinv:
                        metadata = inv
                        firstinv = False
                    else:
                        metadata += inv
                except:
                    pass
        elif response_format == "dataless":
            from obspy.io.xseed import Parser
            all_metadata = {}
            for file in response_files:
                metadata = Parser(file)
                tmpinv = metadata.get_inventory()
                for chan in tmpinv["channels"]:
                    all_metadata[chan["channel_id"]] = metadata
        else:
            print("Response Format Not Supported")
            exit()
        for station in get_stations(db):
            id = "%s.%s.00.HHZ" % (station.net, station.sta)
            if response_format == "inventory":
                coords = inv.get_coordinates(id)
            else:

                coords = all_metadata[id].get_coordinates(id)
            update_station(
                db,
                station.net,
                station.sta,
                coords["longitude"],
                coords["latitude"],
                coords["elevation"],
                "DEG",
            )
            logging.info("Added coordinates (%.5f %.5f) for station %s.%s" %
                         (coords["longitude"], coords["latitude"], station.net,
                          station.sta))
        db.close()

    else:
        from ..s001configurator import main
        click.echo('Let\'s Configure MSNoise !')
        main()
예제 #16
0
            print(split_files[m][n][o])
            streams += obspy.read(split_files[m][n][o])
        print('Merging streams...')
        streams.merge()
        print('Current data is:')
        print(streams)

        # Build probabilistic power spectral density objects for each trace
        all_ppsds = []
        all_ppsd_names = []
        for stream in streams:
            print('Calculating PPSDs for stream:')
            print(stream)
            ppsds = []
            ppsd_names = []
            metadata = Parser(metadata_directory + stream.stats.station +
                              stream.stats.channel[-1:] + '.seed')
            ppsd = PPSD(stream.stats, metadata)
            ppsd.add(stream)
            ppsds.append(ppsd)
            ppsd_names.append(stream.stats.station + '_' +
                              stream.stats.channel + '_PPSD')
        all_ppsds.extend(ppsds)
        all_ppsd_names.extend(ppsd_names)

        # Plot PPSD data for each trace in 3 views
        print('Plotting PPSD data...')
        for n in range(len(all_ppsds)):
            all_ppsds[n].plot(show_coverage=True,
                              show_noise_models=True,
                              xaxis_frequency=True,
                              cmap=pqlx,
예제 #17
0
            st += read('/msd/IU_ANMO/' + str(ctime.year) + '/' +
                       str(ctime.julday).zfill(3) + '/' + presloc + '_LDO*')
            ctime += 24. * 60. * 60.
        else:
            string = '/msd/' + net + '_' + sta + '/' + str(
                ctime.year) + '/' + str(
                    ctime.julday).zfill(3) + '/' + loc + '_'
            st += read(string + 'LH*')
            st += read('/msd/' + net + '_' + sta + '/' + str(ctime.year) +
                       '/' + str(ctime.julday).zfill(3) + '/' + presloc +
                       '_LDO*')
            ctime += 24. * 60. * 60.
    st.merge(fill_value=0)

    ## Read in metadata ##
    sp = Parser()
    if net == 'XX':
        print(st[0].id)
        for tr in st:
            if tr.stats.channel == 'LDO':
                continue
            else:
                stri = tr.id
                inv = read_inventory('/home/aalejandro/Pressure/RESP/RESP.' +
                                     stri)
                st.attach_response(inv)
    else:
        client = Client('IRIS')
        inv = client.get_stations(network=net,
                                  station=sta,
                                  starttime=stime,
예제 #18
0
################################################################################
# set some defaults
################################################################################

interp_freqs = logspace(-1, 2, 121)[:-12]  # from 0.1-50 Hz

################################################################################
# parse AU dataless
################################################################################

# read dataless seed volumes
print('\nReading dataless seed volumes...')
from obspy.io.xseed import Parser
if getcwd().startswith('/nas'):
    au_parser = Parser(
        '/nas/active/ops/community_safety/ehp/georisk_earthquake/hazard/Networks/AU/AU.IRIS.dataless'
    )
    cwb_parser = Parser(
        '/nas/active/ops/community_safety/ehp/georisk_earthquake/hazard/Networks/AU/AU.cwb.dataless'
    )
    s1_parser = Parser(
        '/nas/active/ops/community_safety/ehp/georisk_earthquake/hazard/Networks/S1/S1.IRIS.dataless'
    )
    #ge_parser = Parser('/nas/active/ops/community_safety/ehp/georisk_earthquake/hazard/Networks/GE/GE1.IRIS.dataless')
    #ge_parser = Parser('/nas/active/ops/community_safety/ehp/georisk_earthquake/hazard/Networks/GE/GE2.IRIS.dataless')
    iu_parser = Parser(
        '/nas/active/ops/community_safety/ehp/georisk_earthquake/hazard/Networks/IU/IU.IRIS.dataless'
    )
    ii_parser = Parser(
        '/nas/active/ops/community_safety/ehp/georisk_earthquake/hazard/Networks/II/II.IRIS.dataless'
    )
예제 #19
0
#!/bin/env python
''' a function to use local resp information '''
import re
from obspy.io.xseed import Parser
import os
# need to look at the dataless seed to get the station orientation.

#here is a snippet from austin:
fname = 'IU.dataless'
src = '/APPS/metadata/SEED/'
if re.search('dataless', fname):
    string = os.path.join(src, fname)
    print(string)
    sf = Parser(os.path.join(src, fname))
    #print(sf)
    inv = sf.getInventory()
    netsta = inv['stations'][0]['station_id']
    print(netsta)
	print "%s saved"%name_fig
	plt.show()
	
if __name__ == '__main__':
	#formato de las imagenes
	_format = 'png'

	#Parametros de entrada			
	dataless_name = sys.argv[1]
	fn = 1. #Frecuencia de normalizacion. Para estados analogos se toma normalmente como  1.0 Hz

	#Crea archivo log
	log_file = open(dataless_name+'.log', 'w')

	#Carga dataless mediante io.xseed
	dtlss = Parser(dataless_name)
	print >> log_file, dtlss

	#Crea un diccionario con respuesta por canal
	inv = dtlss.get_inventory()

	###workout para el dataless:
	F = np.arange(.001,100,.001)
	for chn in inv['channels']:
		channel_id, start_date, end_date, instrument = chn['channel_id'], chn['start_date'], chn['end_date'], chn['instrument']
		location = channel_id.split('.')[2]
		if end_date != "":
			starttime, endtime = start_date.date, end_date.date
		else:
			starttime, endtime = start_date.date, " "
		PAZ = dtlss.get_paz(seed_id=channel_id,datetime=start_date)
예제 #21
0
파일: test_core.py 프로젝트: seisfish/obspy
    def test_response_calculation_from_seed_and_xseed(self):
        """
        Test the response calculations with the obspy.core interface.

        It does it by converting whatever it gets to RESP files and then
        uses evalresp to get the response. This is compared to using the
        ObsPy Response object - this also uses evalresp but the actual flow
        of the data is very different.

        This is an expensive test but worth it for the trust it builds and
        the bugs it found and prevents.
        """
        # Very broad range but the responses should be exactly identical as
        # they use the same code under the hood so it should prove no issue.
        frequencies = np.logspace(-3, 3, 20)

        for filename in self.seed_files + self.xseed_files:
            # Parse the files using the Parser object.
            with warnings.catch_warnings(record=True):
                p = Parser(filename)
                p_resp = {_i[0]: _i[1] for _i in p.get_resp()}
                # Also read using the core routines.
                inv = obspy.read_inventory(filename)

            # Get all the channels and epochs.
            channels = collections.defaultdict(list)
            for c in p.get_inventory()["channels"]:
                channels[c["channel_id"]].append(
                    (c["start_date"], c["end_date"]))

            # Loop over each.
            for channel, epochs in channels.items():
                with NamedTemporaryFile() as tf:
                    r = p_resp["RESP.%s" % channel]
                    r.seek(0, 0)
                    tf.write(r.read())

                    # Now loop over the epochs.
                    for start, end in epochs:
                        if end:
                            t = start + (end - start) / 2
                        else:
                            t = start + 10

                        # Find response
                        n, s, l, c = channel.split(".")
                        _inv_t = inv.select(network=n,
                                            station=s,
                                            location=l,
                                            channel=c,
                                            starttime=t - 1,
                                            endtime=t + 1)
                        # Should now only be a single channel.
                        self.assertEqual(_inv_t.get_contents()["channels"],
                                         [channel])
                        inv_r = _inv_t[0][0][0].response

                        for unit in ("DISP", "VEL", "ACC"):
                            # Directly call evalresp.
                            e_r = evalresp_for_frequencies(
                                t_samp=None,
                                frequencies=frequencies,
                                filename=tf.name,
                                date=t,
                                units=unit)
                            i_r = inv_r.get_evalresp_response_for_frequencies(
                                frequencies=frequencies, output=unit)
                            # Adaptive absolute tolerance to deal with very
                            # small values.
                            atol = 1E-7 * max(
                                np.abs(e_r).max(),
                                np.abs(i_r).max())
                            np.testing.assert_allclose(
                                e_r.real,
                                i_r.real,
                                err_msg="real - %s - %s" % (filename, unit),
                                rtol=1E-6,
                                atol=atol)
                            np.testing.assert_allclose(
                                e_r.imag,
                                i_r.imag,
                                err_msg="imag - %s - %s" % (filename, unit),
                                rtol=1E-6,
                                atol=atol)

                            # Bonus: Also read the RESP file directly with
                            # obspy.core and test the response.
                            i_r_r = obspy.read_inventory(tf.name).select(
                                starttime=t - 1,
                                endtime=t + 1)[0][0][0].response\
                                .get_evalresp_response_for_frequencies(
                                frequencies=frequencies, output=unit)
                            np.testing.assert_allclose(
                                e_r.real,
                                i_r_r.real,
                                err_msg="RESP real - %s - %s" %
                                (filename, unit),
                                rtol=1E-6,
                                atol=atol)
                            np.testing.assert_allclose(
                                e_r.imag,
                                i_r_r.imag,
                                err_msg="RESP imag - %s - %s" %
                                (filename, unit),
                                rtol=1E-6,
                                atol=atol)
예제 #22
0
pickfiles = listdir_extension(folder, 'picks')

################################################################################
# loop through earthquakes and get data
################################################################################
usgscsv = '20200511_merged_events.csv'
usgscsv = '20201008_events.csv'
evdict = parse_usgs_events(usgscsv)

# read dataless seed volumes
from obspy.io.xseed import Parser
#au_parser = Parser('AU.dataless')
print('Reading dataless seed volumes...')
if getcwd().startswith('/nas'):
    au_parser = Parser(
        '/nas/active/ops/community_safety/ehp/georisk_earthquake/hazard/Networks/AU/AU.IRIS.dataless'
    )
    s1_parser = Parser(
        '/nas/active/ops/community_safety/ehp/georisk_earthquake/hazard/Networks/S1/S1.IRIS.dataless'
    )
    ge_parser = Parser(
        '/nas/active/ops/community_safety/ehp/georisk_earthquake/hazard/Networks/GE/GE1.IRIS.dataless'
    )
    #ge_parser = Parser('/nas/active/ops/community_safety/ehp/georisk_earthquake/hazard/Networks/GE/GE2.IRIS.dataless')
    iu_parser = Parser(
        '/nas/active/ops/community_safety/ehp/georisk_earthquake/hazard/Networks/IU/IU.IRIS.dataless'
    )
    ii_parser = Parser(
        '/nas/active/ops/community_safety/ehp/georisk_earthquake/hazard/Networks/II/II.IRIS.dataless'
    )
예제 #23
0
from data_fmt_tools import remove_low_sample_data
from obspy import read, UTCDateTime
from os import getcwd
from datetime import timedelta
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.style.use('classic')

fig = plt.figure(1, figsize=(8, 2.67))
ax = fig.add_subplot(111)

if getcwd().startswith('/nas'):
    datalessPath = '/nas/active/ops/community_safety/ehp/georisk_earthquake/hazard/Networks/IU/IU.IRIS.dataless'
else:
    datalessPath = '/Users/trev/Documents/Networks/IU/IU.IRIS.dataless'
iu_parser = Parser(datalessPath)


# read file
if getcwd().startswith('/nas'):
    mseed = '/nas/active/ops/community_safety/ehp/georisk_earthquake/hazard/Ground_Motion/Australian_Earthquakes/20180916.Lake_Muir/waves/201809160455.NWAO.IU.mseed'
else:
    mseed = '/Users/trev/Documents/Earthquake_Data/20180916.Lake_Muir/201809160455.NWAO.IU.mseed'

st = read(mseed)
# remove low sample rate data
new_st = remove_low_sample_data(st)
new_st.merge()

tr = new_st[2]
예제 #24
0
    def scan(self):
        self.channel_array=[] 
        for filename in self.file_list:
            # print filename
            # Load Dataless to modify, assume only one station
            p = Parser(filename)

            # Get station name and network code
            net, sta = p.get_inventory()['stations'][0]['station_id'].split('.')
            

            # Get Data Format Identifier Codes
            lookup_steim2 = -1
            lookup_geoscope_3bit = -1
            lookup_geoscope_4bit = -1

            format_lookup_list = []
            for local_blockette in p.abbreviations:
                if local_blockette.blockette_type == 30:
                    # Increment number of format
                    # Get Data Format Identifier Code for Steim2
                    if "Steim2 Integer Compression Format" in local_blockette.short_descriptive_name:
                        lookup_steim2 = local_blockette.data_format_identifier_code
                        # print "lookup_steim2 = ", lookup_steim2
                        format_lookup_list.append(lookup_steim2)
                    # Get Data Format Identifier Code for Geoscope 3 bits
                    if "Geoscope gain-range on 3 bits" in local_blockette.short_descriptive_name:
                        lookup_geoscope_3bit = local_blockette.data_format_identifier_code
                        # print "lookup_3bit = ", lookup_geoscope_3bit
                        format_lookup_list.append(lookup_geoscope_3bit)
                    # Get Data Format Identifier Code for Geoscope 4 bits
                    if "Geoscope gain range on 4 bits" in local_blockette.short_descriptive_name:
                        lookup_geoscope_4bit = local_blockette.data_format_identifier_code
                        # print "lookup_4bit = ", lookup_geoscope_4bit
                        format_lookup_list.append(lookup_geoscope_4bit)

            # print format_lookup_list

            # Get Station = first station
            blksta = p.stations[0]

            # Look for all blockettes 52 that reference to one of Geoscope Data Format
            # Identifier Code
            # print "---- Look for all blockettes 52 that reference Geoscope
            # Data Format 3 bit ----"

            if lookup_geoscope_3bit != -1:
                for blockette in blksta:
                    if (blockette.blockette_type == 52) and (blockette.data_format_identifier_code == lookup_geoscope_3bit):
                        print "3",  net,sta, blockette.channel_identifier,  blockette.location_identifier, blockette.start_date, blockette.end_date
                        self.channel_array.append(channel_period(net, sta, '', blockette.channel_identifier, blockette.start_date, blockette.end_date, '3'))
    
                        # embed()
            # print "---- Look for all blockettes 52 that reference Geoscope
            # Data Format 4 bit ----"
            if lookup_geoscope_4bit != -1:
                for blockette in blksta:
                    if (blockette.blockette_type == 52) and (blockette.data_format_identifier_code == lookup_geoscope_4bit):
                        print "4",  net,sta, blockette.channel_identifier,  blockette.location_identifier, blockette.start_date, blockette.end_date
                        self.channel_array.append(channel_period(net, sta, '', blockette.channel_identifier, blockette.start_date, blockette.end_date, '3'))
            
            cPickle.dump( self.channel_array, open( "dataless.p", "wb" ) )
#            cPickle.dump({ "lion": "yellow", "kitty": "red" }, open( "dataless.p", "wb" ) )
            
            
            for cha in self.channel_array:
                print cha
예제 #25
0
from obspy.core import utcdatetime, event
from obspy.core.event import Catalog, Event, Magnitude, Origin, StationMagnitude
#from obspy.neic.client import Client
from obspy.clients.neic.client import Client
from obspy.io.xseed import Parser
#from obspy.core.util import gps2DistAzimuth
from obspy.geodetics import gps2dist_azimuth as gps2DistAzimuth
#from obspy.taup import TauPyModel
from obspy.signal import invsim as inv
from obspy.io.xseed.utils import SEEDParserException
# plot all traces into one pdf file
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import pyplot

# we will use dataless seed from IRIS to get station information
parser = Parser("../../data/AU.dataless")

# travel-time model will be iasp91 but could be any
from obspy import taup
vel_model = taup.TauPyModel(model="iasp91")

#from obspy.taup.TauPyModel import get_travel_times
# local modules
#import local_magnitude

##########################################################################
# set constants and funcs
##########################################################################

r_earth = 6371.
예제 #26
0
    def modify(self):

        for filename in self.file_list:
            print filename
            # Load Dataless to modify, assume only one station
            p = Parser(filename)

            # Get Data Format Identifier Codes
            lookup_steim2 = -1
            lookup_geoscope_3bit = -1
            lookup_geoscope_4bit = -1

            format_lookup_list = []
            print "---- Get data format identifier codes ----"
            for local_blockette in p.abbreviations:
                if local_blockette.blockette_type == 30:
                    # Increment number of format
                    # Get Data Format Identifier Code for Steim2
                    if "Steim2 Integer Compression Format" in local_blockette.short_descriptive_name:
                        lookup_steim2 = local_blockette.data_format_identifier_code
                        print "lookup_steim2 = ", lookup_steim2
                        format_lookup_list.append(lookup_steim2)
                    # Get Data Format Identifier Code for Geoscope 3 bits
                    if "Geoscope gain-range on 3 bits" in local_blockette.short_descriptive_name:
                        lookup_geoscope_3bit = local_blockette.data_format_identifier_code
                        print "lookup_3bit = ", lookup_geoscope_3bit
                        format_lookup_list.append(lookup_geoscope_3bit)
                    # Get Data Format Identifier Code for Geoscope 4 bits
                    if "Geoscope gain range on 4 bits" in local_blockette.short_descriptive_name:
                        lookup_geoscope_4bit = local_blockette.data_format_identifier_code
                        print "lookup_4bit = ", lookup_geoscope_4bit
                        format_lookup_list.append(lookup_geoscope_4bit)

            print format_lookup_list

            # Create Steim2 Data Format blockette if it does not exist
            print "---- Create Steim2 Data Format blockette if it does not exist ----"
            if lookup_steim2 == -1:
                # Get a blockette 30 with Steim2 encoding
                psteim2 = Parser("./test/dataless.G.CLF.seed")
                # Copy it on current dataless
                p.abbreviations.insert(0, psteim2.abbreviations[0])
                # create new lookup code (make sure it is not already used otherwise
                # increment)
                lookup_steim2 = 1
                while lookup_steim2 in format_lookup_list:
                    lookup_steim2 += 1
                print lookup_steim2
                # Set new lookup code
                p.abbreviations[0].data_format_identifier_code = lookup_steim2

            # Get Station = first station
            print "---- Get Station = first station ----"
            blksta = p.stations[0]

            # Remove Comment Blockettes because it is IPGP datacenter that insert them
            # and problems with PDCC

            print "---- Remove Comment Blockettes ----"

            i = 1
            while i < len(blksta):
                if blksta[i].blockette_type == 51:
                    blksta.pop(i)
                else:
                    i += 1

            # Remove Comment Blockettes
            i = 1
            while i < len(blksta):
                if blksta[i].blockette_type == 59:
                    blksta.pop(i)
                else:
                    i += 1

            # Look for all blockettes 52 that reference to one of Geoscope Data Format
            # Identifier Code
            print "---- Look for all blockettes 52 that reference Geoscope Data Format 3 bit ----"
            i = 1
            clone = -1
            if lookup_geoscope_3bit != -1:
                while i < len(blksta):
                    if blksta[i].blockette_type == 52:
                        if blksta[i].data_format_identifier_code == lookup_geoscope_3bit:
                            print ""
                            print blksta[i].channel_identifier, blksta[i].start_date, blksta[i].location_identifier
                            # Clone blockette 52
                            blksta.insert(i, copy.deepcopy(blksta[i]))
                            blksta[i].location_identifier = "00"
                            blksta[i].data_format_identifier_code = lookup_steim2
                            i += 1
                            clone = i
                        else:
                            clone = -1
                    else:
                        if clone != -1:  # Blockette is concerned
                            print blksta[i].stage_sequence_number, blksta[i].blockette_type,
                            # Clone blockette
                            b = copy.deepcopy(blksta[i])
                            # Detect stage 0
                            if b.stage_sequence_number == 0:
                                # If stage 0, add gain blockette before
                                newb = copy.deepcopy(blksta[i])
                                newb.sensitivity_gain = gain_geoscope_3
                                newb.stage_sequence_number = blksta[
                                    i - 1].stage_sequence_number + 1
                                print "new stage =", newb.stage_sequence_number
                                blksta.insert(clone, newb)
                                clone += 1
                                b.sensitivity_gain *= gain_geoscope_3
                                i += 1
                            blksta.insert(clone, b)
                            clone += 1
                            i += 1
                    i += 1

            # Verify
            print ""
            print ""
            print "---- Verify ----"
            display = -1
            if lookup_geoscope_3bit != -1:
                for blksta_local in blksta:
                    if blksta_local.blockette_type == 52:
                        if blksta_local.data_format_identifier_code == lookup_geoscope_3bit:
                            print ""
                            print blksta_local.channel_identifier, blksta_local.start_date,  blksta_local.location_identifier
                            display = 1
                        else:
                            display = -1
                    else:
                        if display == 1:
                            if blksta_local.stage_sequence_number == 0:
                                print blksta_local.stage_sequence_number, blksta_local.blockette_type, blksta_local.sensitivity_gain
                            else:
                                print blksta_local.stage_sequence_number, blksta_local.blockette_type

            # Look for all blockettes 52 that reference to one of Geoscope Data Format
            # Identifier Code
            print ""
            print ""
            print "---- Look for all blockettes 52 that reference Geoscope Data Format 4 bit ----"
            i = 1
            clone = -1
            if lookup_geoscope_4bit != -1:
                while i < len(blksta):
                    if blksta[i].blockette_type == 52:
                        if blksta[i].data_format_identifier_code == lookup_geoscope_4bit:
                            print ""
                            print blksta[i].channel_identifier, blksta[i].start_date, blksta[i].location_identifier
                            # Clone blockette 52
                            blksta.insert(i, copy.deepcopy(blksta[i]))
                            blksta[i].location_identifier = "00"
                            print lookup_steim2, lookup_geoscope_4bit
                            blksta[i].data_format_identifier_code = lookup_steim2
                            i += 1
                            clone = i
                        else:
                            clone = -1
                    else:
                        if clone != -1:  # Blockette is concerned
                            print blksta[i].stage_sequence_number, blksta[i].blockette_type,
                            # Clone blockette
                            b = copy.deepcopy(blksta[i])
                            # Detect stage 0
                            if b.stage_sequence_number == 0:
                                # If stage 0, add gain blockette before
                                newb = copy.deepcopy(blksta[i])
                                newb.sensitivity_gain = gain_geoscope_4
                                newb.stage_sequence_number = blksta[
                                    i - 1].stage_sequence_number + 1
                                print "new stage =", newb.stage_sequence_number
                                blksta.insert(clone, newb)
                                clone += 1
                                b.sensitivity_gain *= gain_geoscope_4
                                i += 1
                            blksta.insert(clone, b)
                            clone += 1
                            i += 1
                    i += 1

            # Verify
            print ""
            print ""
            print "---- Verify ----"
            display = -1
            if lookup_geoscope_4bit != -1:
                for blksta_local in blksta:
                    if blksta_local.blockette_type == 52:
                        if blksta_local.data_format_identifier_code == lookup_geoscope_4bit:
                            print ""
                            print blksta_local.channel_identifier, blksta_local.start_date,  blksta_local.location_identifier
                            display = 1
                        else:
                            display = -1
                    else:
                        if display == 1:
                            if blksta_local.stage_sequence_number == 0:
                                print blksta_local.stage_sequence_number, blksta_local.blockette_type, blksta_local.sensitivity_gain,
                            else:
                                print blksta_local.stage_sequence_number, blksta_local.blockette_type,

            # Write new dataless
            print ""
            print "---- Write new dataless ----"
            p.write_seed(self.args.output + '/' + os.path.basename(filename))
예제 #27
0
    def test_evalresp_with_output_from_seed(self):
        """
        The StationXML file has been converted to SEED with the help of a tool
        provided by IRIS:

        https://seiscode.iris.washington.edu/projects/stationxml-converter
        """
        t_samp = 0.05
        nfft = 16384

        # Test for different output units.
        units = ["DISP", "VEL", "ACC"]
        filenames = ["IRIS_single_channel_with_response", "XM.05", "AU.MEEK"]

        for filename in filenames:
            xml_filename = os.path.join(self.data_dir,
                                        filename + os.path.extsep + "xml")
            seed_filename = os.path.join(self.data_dir,
                                         filename + os.path.extsep + "seed")

            p = Parser(seed_filename)

            # older systems don't like an end date in the year 2599
            t_ = UTCDateTime(2030, 1, 1)
            if p.blockettes[50][0].end_effective_date > t_:
                p.blockettes[50][0].end_effective_date = None
            if p.blockettes[52][0].end_date > t_:
                p.blockettes[52][0].end_date = None

            resp_filename = p.get_resp()[0][-1]

            inv = read_inventory(xml_filename)

            network = inv[0].code
            station = inv[0][0].code
            location = inv[0][0][0].location_code
            channel = inv[0][0][0].code
            date = inv[0][0][0].start_date

            for unit in units:
                resp_filename.seek(0, 0)

                seed_response, seed_freq = evalresp(t_samp,
                                                    nfft,
                                                    resp_filename,
                                                    date=date,
                                                    station=station,
                                                    channel=channel,
                                                    network=network,
                                                    locid=location,
                                                    units=unit,
                                                    freq=True)

                xml_response, xml_freq = \
                    inv[0][0][0].response.get_evalresp_response(t_samp, nfft,
                                                                output=unit)

                assert np.allclose(seed_freq, xml_freq, rtol=1E-5)
                assert np.allclose(seed_response, xml_response, rtol=1E-5)

                # also test getting response for a set of discrete frequencies
                indices = (-2, 0, -1, 1, 2, 20, -30, -100)
                freqs = [seed_freq[i_] for i_ in indices]
                response = inv[0][0][0].response
                got = response.get_evalresp_response_for_frequencies(
                    freqs, output=unit)
                expected = [seed_response[i_] for i_ in indices]
                np.testing.assert_allclose(got, expected, rtol=1E-5)
예제 #28
0
def determ_magn(s_file, t_window, wav_path, dataless_path, distance_type='hypocentral', 
	plot_wavs = False, print_stat_mag = False ) :
	#distance_type='hypocentral'#epicentral or hypocentral
	outfile_name=s_file.split('.')[0]+'_'+str(t_window)+'s_M.out'

	# parameters for M formula: ML=a*log10(ampl)+b*log10(dist)+c*Dist+d. Two formulas:
	distance=15
	distance2=60

	#dist <= distance km :
	A, B, C, D = [1, 1.5, 0, 0.45-log10(2.8)]
	# distance < dist < distance2 km :
	E, F, G, H = [1, 0, 0.0180, 1.77+0.1]
	#dist > distance2 km :
	I, J, K, L = [1, 0, 0.0038, 2.62+0.1]

	dataless=glob(dataless_path)

	#dataless_ug='/home/veronica/ownCloud/PSD/dataless_all.dseed'
	#dataless_permanent='/home/veronica/ownCloud/PSD/dataless_pstations_good.dseed'
	#True or False to plot the amplitudes for E and N channels
	#plot_wavs = False
	#saves the magnitudes for each station
	#print_stat_mag = False

	#wood-anderson stantard seismograph instrument response according to SED
	paz_wa = {'sensitivity': 2800, 'zeros': [0j], 'gain': 1,
		  'poles': [-6.2832 - 4.7124j, -6.2832 + 4.7124j]}

	#parser_ug = Parser(dataless_ug)
	#parser_pstat = Parser(dataless_permanent)

	#sfile_path=glob(rea_path)

	#wav_path=glob('/home/veronica/SEISAN/WAV/'+wav_database+'*')
	#wav_path=glob(wavs_db_path+)
	#count=0
	magnitude=[]

	g=open(outfile_name,'w')

	f=open(s_file,'r').read()	
	all_sfile=re.split('                                  ' +
		'                                              \n', f)

	for sfile in all_sfile :

		station_pickings=[]
		waveforms=[]
		for lines in sfile.split('\n'):

			if len(lines) > 0 and lines[-1] == '6' :
				waveforms.append(lines.split(' ')[1])

			if len(lines) > 0 and lines[-1] == ' ':
				if lines[10:11] == 'S' :
					if  lines[14:15]==' ' or lines[14:15]<4:
						station_pickings.append(lines[1:6]+', '+
							lines[18:20]+', '+ lines[20:22] +', '+
							lines[22:28]+', '+ lines[71:76])

			if len(lines) > 0 and lines[-1] == '1' and lines[23:30] != '       ' :
				event=UTCDateTime(year=int(lines[1:5]),
					month=int(lines[6:8]),
					day=int(lines[8:10]),
					hour=int(lines[11:13]),
					minute=int(lines[13:15]))+float(lines[16:20])

				type_eve=lines[21:23]
				lat=float(lines[23:30])
				lon=float(lines[31:38])
				dep=float(lines[38:43])
				rms=float(lines[51:55])

		if station_pickings:
			try:
				try:
					wav=read(wav_path+'/'+
						str(event.year)+'/'+
						"%02d" %event.month+
						'/'+waveforms[0].split('.')[0]
						+'*.mseed')
					#if len(wav) > 0 :
					#	for w in range(1,len(wav)):
					#		wav+=read(wav_path+'/'+
					#			str(event.year)+'/'+
					#			"%02d" %event.month+
					#			'/'+waveforms[w])
				except:
					wav=read(wav_path+'/'+waveforms[0].split('.')[0]
						+'*.mseed')
					#if len(wav) > 0 :
					#	for w in range(1,len(wav)):
					#		wav+=read(wav_path+'/'+waveforms[w])

				wav.detrend(type='demean')
				wav.taper(max_percentage=0.005, type='hann', side='both')

				y=wav[0].stats.starttime.year
				m=wav[0].stats.starttime.month
				d=wav[0].stats.starttime.day

			#	print 'Magnitude calculation for ', waveform		
				stat_mag=[]
				stat_info=[]
			except:
				print('no waveform found!')
				wav=read()
				wav.clear()
				pass

			if wav :

				for info in station_pickings :

					stat=info.split(',')[0].split(' ')[0]

					hh=int(info.split(',')[1])
					mm=int(info.split(',')[2])
					ss=float(info.split(',')[3])

					#print info
					t=UTCDateTime(y,m,d,hh,mm)+ss

					tr_e=wav.select(station=stat, channel='??E')
					tr_n=wav.select(station=stat, channel='??N')

					if not (tr_e and tr_n) :
						tr_e=wav.select(station=stat, channel='??3')
						tr_n=wav.select(station=stat, channel='??2')

					for each_dataless in dataless:
						try:
							parser=Parser(each_dataless)

							paz_stat_e = parser.get_paz(
								tr_e[0].id, datetime=t)
							paz_stat_n = parser.get_paz(
								tr_e[0].id, datetime=t)

							tr_e[0].stats.latitude=parser.get_coordinates(
								tr_e[0].id, datetime=t).get('latitude')
							tr_e[0].stats.longitude=parser.get_coordinates(
								tr_e[0].id, datetime=t).get('longitude')

							tr_n[0].stats.latitude=parser.get_coordinates(
								tr_n[0].id,datetime=t).get('latitude')
							tr_n[0].stats.longitude=parser.get_coordinates(
								tr_n[0].id,datetime=t).get('longitude')
						except:
							pass

					if not (tr_e and tr_n):
						print 'Missing waveform for '+ stat

					if tr_n and not tr_n[0].stats.longitude :
						print  'Missing instrument response for ' + stat
						paz_stat_e = False
						paz_stat_n = False
						#print 'Unknown problems with station ' + stat

					if paz_stat_e and paz_stat_n and (tr_e or tr_n) :
						try :
							sta_lat = tr_e[0].stats.latitude
							sta_lon = tr_e[0].stats.longitude
						except:
							sta_lat = tr_n[0].stats.latitude
							sta_lon = tr_n[0].stats.longitude

						if len(tr_e) > 1 :
							#print 'Merging traces for '+ stat
							#tr_e.plot()
							tr_e.merge(fill_value='interpolate')
							if max(abs(tr_e[0].data)) == 0 :
								tr_e=Stream(traces=[wav.select(
									station=stat, 
									channel='??E')[1]])

						tr_e.simulate(paz_remove=paz_stat_e, 
							paz_simulate=paz_wa,
							water_level=10)
						tr_e.trim(starttime=t-1, endtime=t+t_window)

						if tr_e:
							amplt_e=max(abs(tr_e[0].data))
							#sta_lat = tr_e[0].stats.latitude
							#sta_lon = tr_e[0].stats.longitude

						if not tr_e:
							amplt_e=np.nan

						if len(tr_n) > 1:
							#print 'Merging traces for '+ stat
							#tr_n.plot()
							tr_n.merge(fill_value='interpolate')
							#tr_n.plot()
							if max(abs(tr_n[0].data)) == 0 :
								tr_n=Stream(traces=[wav.select(
									station=stat, 
									channel='??N')[1]])

						tr_n.simulate(paz_remove=paz_stat_n, 
							paz_simulate=paz_wa,
							water_level=10)
						tr_n.trim(starttime=t-1, endtime=t+t_window)

						if tr_n:
							amplt_n=max(abs(tr_n[0].data))
							#sta_lat = tr_n[0].stats.latitude
							#sta_lon = tr_n[0].stats.longitude
						if not tr_n:
							amplt_n=np.nan

						#ampl=max(amplt_e, amplt_n)
						ampl=np.nanmax([amplt_e,amplt_n])
						#dist=float(info.split(',')[4]) 
						#distance taken from s-file
						#calculation of distance between station and event

						event_lat = lat
						event_lon = lon
						event_dep = dep
						dist, az, baz = gps2dist_azimuth(event_lat, event_lon, 
							sta_lat, sta_lon)
						dist = dist / 1000
						#print 'epi_dist: ', epi_dist, ' dist: ', dist

						if distance_type=='epicentral':
							dist=dist

						elif distance_type=='hypocentral':
							dist=math.sqrt((float(dist)**2)+
								(float(event_dep)**2))

						else:
							raise ValueError("ERROR: Wrong type "+
								"of distance! "+ distance_type +
								"' selected. Needs to be"+ 
								"'epicentral' or 'hypocentral'")

						if dist <= distance :
							#ml = (A*log10(ampl * 1000) + B*log10(dist) +
							#	C*dist + D)	
							ml = np.NaN
							print ('Distance lower than 15km for station '+ 
								stat + ' ' +str(round(dist,1)) )

						if dist > distance and dist <= distance2 :
							ml = (E*log10(ampl * 1000) + F*log10(dist) + 
								G*dist + H)

						if dist > distance2:
							ml = (I*log10(ampl * 1000) + J*log10(dist) +
								K*dist + L)

						## print(ml), stat
						stat_mag.append(ml)

						if print_stat_mag == True :
							#f.write( str(stat) + '  ' + str(ml) + '  ' +
							#	str(dist) + '  ' + str(ampl*1000) +'\n')
							stat_info.append( str(stat) + ' ' + str(ml) +
								' '+ str(dist) + ' ' + str(ampl*1000) )
							#separate list by space and convert to array
							#a=np.array([x.split(' ') for x in stat_info])
							#b=np.asarray(a)

						#plot for control wavs
						if plot_wavs == True :
							if not os.path.exists("figures"):
								os.makedirs("figures")

							fig=plt.figure()
							fig.suptitle('Amplitude (A) mm - time window '+ 
								str(t_window+1) + 's' +
								'\n' + str(event)[0:16] + '  ' + stat+ 
								' - ML ' + str(round(ml,1)) +' - D '
								+ str(round(dist,2)))

							ax1 = fig.add_subplot(4,1,1)
							ax1.plot(tr_e[0].data, 'g')	
							ax1.set_title(' A     E', x=0.92, y=0.6)
							ax1.yaxis.set_ticks(np.linspace(
								min(tr_e[0].data), 
								max(tr_e[0].data), 3))
							ax1.axvline(amplt_e, color='b')
							ax1.set_xlim(0,len(tr_e[0].data))
							ax1.axes.get_xaxis().set_ticklabels([])
							abc=np.where(abs(tr_e[0].data) == amplt_e)
							ax1.axvline(float(abc[0]), color='b', alpha=0.5)
							ax1.yaxis.set_major_formatter(
								mtick.FormatStrFormatter('%.2e'))

							ax2 = fig.add_subplot(4,1,2)
							ax2.plot(abs(tr_e[0].data), 'g', 
								label='max '+'{:.2E}'.format(amplt_e))
							ax2.legend(loc="upper left", fontsize=11)
							ax2.set_title('|A|    E', x=0.92, y=0.6)
							ax2.yaxis.set_ticks(np.linspace(0, ampl, 3))
							ax2.set_xlim(0,len(tr_e[0].data))
							ax2.axvline(float(abc[0]), color='b', alpha=0.5)
								#, scatter options
								#marker='o',  edgecolor='black',
								#linewidth='0.8', s=80)
							ax2.axes.get_xaxis().set_ticklabels([])
							ax2.set_ylim(0,ampl)
							ax2.yaxis.set_major_formatter(
								mtick.FormatStrFormatter('%.2e'))

							ax3 = fig.add_subplot(4,1,3)
							ax3.plot(tr_n[0].data, 'r')	
							ax3.set_title(' A     N', x=0.92, y=0.6)
							ax3.yaxis.set_ticks(np.linspace(
								min(tr_n[0].data), 
								max(tr_n[0].data), 3))
							ax3.set_xlim(0,len(tr_n[0].data))
							ax3.axes.get_xaxis().set_ticklabels([])
							abc=np.where(abs(tr_n[0].data) == amplt_n)
							ax3.axvline(float(abc[0]), color='b', alpha=0.5)
							ax3.yaxis.set_major_formatter(
								mtick.FormatStrFormatter('%.2e'))

							ax4 = fig.add_subplot(4,1,4)
							ax4.plot(abs(tr_n[0].data), 'r', 
								label='max '+'{:.2E}'.format(amplt_n))
							ax4.legend(loc="upper left", fontsize=11)
							ax4.set_title('|A|    N', x=0.92, y=0.6)
							ax4.yaxis.set_ticks(np.linspace(0, ampl, 3))
							ax4.set_xlim(0,len(tr_n[0].data))
							ax4.set_xlabel('npts')
							ax4.axvline(float(abc[0]), color='b', alpha=0.5)
							ax4.set_ylim(0,ampl)
							ax4.yaxis.set_major_formatter(
								mtick.FormatStrFormatter('%.2e'))

							plt.savefig('figures/'+str(event)[0:16] + '_' + 
								str(round(dist,2)) + '_' + stat+'.png',
								bbox_inches='tight')
							plt.close()

				eve_mag=round(np.nanmedian(stat_mag),1)	
				magnitude.append(eve_mag)

				print ( str(event) + ' ' + str(type_eve) + ' ' + str(eve_mag) + ' ' 
					+ str(len(stat_mag)) + ' ' +str(lat) + ' '+ str(lon) 
					+ ' ' + str(dep) )

				g.write( str(event) + ' ' + str(type_eve) + ' ' + str(eve_mag) + ' ' 
					+ '{:02.0f}'.format(len(stat_mag)) + ' ' + '{:01.3f}'.format(lat) 
					+ ' ' + '{:01.3f}'.format(lon) + ' '+ '{:02.2f}'.format(dep)
					+' ' + str(rms) + ' \n' )

			#		f.write( ' ' + '{:%Y   %m   %d   %H   %M   %S.%f}'.format(event.
			#			datetime)[0:-4]+'     '+'{:07.3f}'.format(lat[count])+
			#			'     '+ '{:07.3f}'.format(lon[count]) + 
			#			'     ' + '{:07.3f}'.format
			#			(dep[count]) + '   ' + str(eve_mag)+'\n' )

				if print_stat_mag :
					stat_info=sorted(stat_info, key = lambda x: x.split(' ')[1])
					for stat_info_lines in stat_info: 
						f.write(stat_info_lines+'\n')
					g.write('--------------------------------------------------------' 
						+ '\n')

		#
		#count=count+1
	g.close()
예제 #29
0
if resultdir[-1] == '/':
    resultdir = resultdir[:-1]
if not os.path.exists(os.getcwd() + '/' + resultdir):
    os.mkdir(os.getcwd() + '/' + resultdir)

# Lets get the current network
curnet = parserval.network

# Lets open the results file to write
statfile = open(os.getcwd() + '/' + resultdir + '/Results' + curnet + '.csv',
                'w')
statfile.write('net,sta,loc,chan,scalefac,lag,corr\n')

# Lets read in the dataless
try:
    sp = Parser(datalessloc + curnet + ".dataless")
except:
    print "Can not read the dataless."
    exit(0)

# If we arent doing manual station
# lists we need to get one for the network
if parserval.sta:
    manstalist = True
    if debug:
        print "We are using a manual station list"
    stalist = parserval.sta.split(",")
    stations = []
    for sta in stalist:
        stations.append(parserval.network + " " + sta)
    if debug:
예제 #30
0
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 09 12:08:12 2017

@author: u56903
"""
from obspy.io.xseed import Parser
from obspy.core import read
from os import path

# read dataless seed volumes
#parser = Parser(path.join('dataless', 'AU.dataless'))
parser = Parser(path.join('dataless', 'AU.seed'))

# parse example mseed data
st = read(path.join('mseed', '200509212246.mseed'))

for tr in st:
    # get record info    
    seedid=tr.get_id()
    channel = tr.stats.channel
    
    start_time = tr.stats.starttime
    
    # check to see if response info exists
    try:
        tr.stats.coordinates = parser.get_coordinates(seedid,start_time)
        
        paz=parser.get_paz(seedid,start_time)
        
        print seedid, paz