예제 #1
0
def test_get_travel_time_df():
    datapath = os.path.join('data', 'testdata', 'travel_times')
    datadir = pkg_resources.resource_filename('gmprocess', datapath)

    sc1 = StreamCollection.from_directory(os.path.join(datadir, 'ci37218996'))
    sc2 = StreamCollection.from_directory(os.path.join(datadir, 'ci38461735'))
    scs = [sc1, sc2]

    df1, catalog = create_travel_time_dataframe(
        sc1, os.path.join(datadir, 'catalog_test_traveltimes.csv'), 5, 0.1,
        'iasp91')
    df2, catalog = create_travel_time_dataframe(
        sc2, os.path.join(datadir, 'catalog_test_traveltimes.csv'), 5, 0.1,
        'iasp91')

    model = TauPyModel('iasp91')
    for dfidx, df in enumerate([df1, df2]):
        for staidx, sta in enumerate(df):
            for eqidx, time in enumerate(df[sta]):
                sta_coords = scs[dfidx][staidx][0].stats.coordinates
                event = catalog[eqidx]
                dist = locations2degrees(sta_coords['latitude'],
                                         sta_coords['longitude'],
                                         event.latitude, event.longitude)
                if event.depth_km < 0:
                    depth = 0
                else:
                    depth = event.depth_km
                travel_time = model.get_travel_times(depth, dist,
                                                     ['p', 'P', 'Pn'])[0].time
                abs_time = event.time + travel_time
                np.testing.assert_almost_equal(abs_time, time, decimal=1)
예제 #2
0
def test_trim_multiple_events():
    datapath = os.path.join('data', 'testdata', 'multiple_events')
    datadir = pkg_resources.resource_filename('gmprocess', datapath)
    sc = StreamCollection.from_directory(os.path.join(datadir, 'ci38457511'))
    origin = get_event_object('ci38457511')
    df, catalog = create_travel_time_dataframe(
        sc, os.path.join(datadir, 'catalog.csv'), 5, 0.1, 'iasp91')
    for st in sc:
        st.detrend('demean')
        remove_response(st, None, None)
        st = corner_frequencies.constant(st)
        lowpass_filter(st)
        highpass_filter(st)
        signal_split(st, origin)
        signal_end(st,
                   origin.time,
                   origin.longitude,
                   origin.latitude,
                   origin.magnitude,
                   method='model',
                   model='AS16')
        cut(st, 2)
        trim_multiple_events(st, origin, catalog, df, 0.2, 0.7, 'B14',
                             {'vs30': 760}, {'rake': 0})

    num_failures = sum([1 if not st.passed else 0 for st in sc])
    assert num_failures == 1

    failure = sc.select(station='WRV2')[0][0].getParameter('failure')
    assert failure['module'] == 'trim_multiple_events'
    assert failure['reason'] == ('A significant arrival from another event '
                                 'occurs within the first 70.0 percent of the '
                                 'signal window')

    for tr in sc.select(station='JRC2')[0]:
        np.testing.assert_almost_equal(
            tr.stats.endtime, UTCDateTime('2019-07-06T03:20:38.7983Z'))
예제 #3
0
def process_streams(streams, origin, config=None):
    """
    Run processing steps from the config file.

    This method looks in the 'processing' config section and loops over those
    steps and hands off the config options to the appropriate prcessing method.
    Streams that fail any of the tests are kepth in the StreamCollection but
    the parameter 'passed_checks' is set to False and subsequent processing
    steps are not applied once a check has failed.

    Args:
        streams (list):
            A StreamCollection object.
        origin (ScalarEvent):
            ScalarEvent object.
        config (dict):
            Configuration dictionary (or None). See get_config().

    Returns:
        A StreamCollection object.
    """

    if not isinstance(streams, StreamCollection):
        raise ValueError('streams must be a StreamCollection instance.')

    if config is None:
        config = get_config()

    logging.info('Processing streams...')

    event_time = origin.time
    event_lon = origin.longitude
    event_lat = origin.latitude

    # -------------------------------------------------------------------------
    # Compute a travel-time matrix for interpolation later in the
    # trim_multiple events step
    if any('trim_multiple_events' in dict for dict in config['processing']):
        travel_time_df, catalog = create_travel_time_dataframe(
            streams, **config['travel_time'])
    # -------------------------------------------------------------------------
    # Begin noise/signal window steps

    logging.info('Windowing noise and signal...')
    window_conf = config['windows']
    model = TauPyModel(config['pickers']['travel_time']['model'])

    for st in streams:
        logging.info('Checking stream %s...' % st.get_id())
        # Estimate noise/signal split time
        st = signal_split(st,
                          origin,
                          model,
                          picker_config=config['pickers'],
                          config=config)

        # Estimate end of signal
        end_conf = window_conf['signal_end']
        event_mag = origin.magnitude
        st = signal_end(st,
                        event_time=event_time,
                        event_lon=event_lon,
                        event_lat=event_lat,
                        event_mag=event_mag,
                        **end_conf)
        wcheck_conf = window_conf['window_checks']
        if wcheck_conf['do_check']:
            st = window_checks(
                st,
                min_noise_duration=wcheck_conf['min_noise_duration'],
                min_signal_duration=wcheck_conf['min_signal_duration'])

    # -------------------------------------------------------------------------
    # Begin processing steps
    logging.info('Starting processing...')
    processing_steps = config['processing']

    # Loop over streams
    for stream in streams:
        logging.info('Stream: %s' % stream.get_id())
        for processing_step_dict in processing_steps:

            key_list = list(processing_step_dict.keys())
            if len(key_list) != 1:
                raise ValueError(
                    'Each processing step must contain exactly one key.')
            step_name = key_list[0]

            logging.info('Processing step: %s' % step_name)
            step_args = processing_step_dict[step_name]
            # Using globals doesn't seem like a great solution here, but it
            # works.
            if step_name not in globals():
                raise ValueError('Processing step %s is not valid.' %
                                 step_name)

            # Origin is required by some steps and has to be handled specially.
            # There must be a better solution for this...
            if step_name == 'fit_spectra':
                step_args = {'origin': origin}
            elif step_name in REQ_ORIGIN:
                step_args['origin'] = origin
            elif step_name == 'trim_multiple_events':
                step_args['catalog'] = catalog
                step_args['travel_time_df'] = travel_time_df
            elif step_name == 'compute_snr':
                step_args['mag'] = origin.magnitude

            if step_args is None:
                stream = globals()[step_name](stream)
            else:
                stream = globals()[step_name](stream, **step_args)

    # -------------------------------------------------------------------------
    # Begin colocated instrument selection
    colocated_conf = config['colocated']
    streams.select_colocated(**colocated_conf)

    for st in streams:
        for tr in st:
            tr.stats.standard.process_level = PROCESS_LEVELS['V2']

    logging.info('Finished processing streams.')
    return streams