Example #1
0
def pick_events(window=(-100, 500), filter=(0.033, 2.), phase='P', new_sampling_rate=20):  #@ReservedAssignment
    logfile = os.path.dirname(data.rf_events) + '/log_pick_events_%s.txt'
    util.checkDir(logfile)
    util.setRootLogger(logfile=logfile % '', logdebugfile=logfile % '_debug')
    log.info('***** Pick events: %s' % util.parameters())
    mod_data.eventPicker(data, component='all', phase=phase, window=window,
                     filter=filter, new_sampling_rate=new_sampling_rate)
Example #2
0
 def sort(self, keys='datetime', reverse=False, logit=True):
     """
     Sort events after list of keys.
     """
     # Check the list and all items.
     items = [
         'datetime', 'id', 'latidue', 'longitude', 'depth', 'magnitude'
     ]
     msg = "keys must be a list of item strings. Available items to " \
         "sort after: \n" + ' '.join(items)
     if isinstance(keys, basestring):
         keys = [keys]
     if not isinstance(keys, list):
         raise TypeError(msg)
     for _i in keys:
         try:
             items.index(_i)
         except:
             raise TypeError(msg)
     # Loop over all keys in reversed order.
     for _i in keys[::-1]:
         super(Events, self).sort(key=lambda x: x[_i], reverse=reverse)
     if logit:
         log.info('Sort events after %s' %
                  util.parameters(only=['keys', 'reverse']))
Example #3
0
def pick_events(
        window=(-100, 500), filter=(0.033, 2.), phase='P',
        new_sampling_rate=20):  #@ReservedAssignment
    logfile = os.path.dirname(data.rf_events) + '/log_pick_events_%s.txt'
    util.checkDir(logfile)
    util.setRootLogger(logfile=logfile % '', logdebugfile=logfile % '_debug')
    log.info('***** Pick events: %s' % util.parameters())
    mod_data.eventPicker(data,
                         component='all',
                         phase=phase,
                         window=window,
                         filter=filter,
                         new_sampling_rate=new_sampling_rate)
Example #4
0
def filter(data, correlations, filters, stack=None, period=24 * 3600):  #@ReservedAssignment
    log.info('Filter correlations: %s' % util.parameters())
    for correlation in correlations:
        expr = data.getX(correlation, '????', filter_=None, period=period, stack=stack) + '.QHD'
        files = glob(expr)
        for file_ in files:
            try:
                st = read(file_)
            except Exception as err:
                log.warning('Could not load file, because:/n%s' % str(err))
                continue
            for filter_ in filters:
                st2 = st.copy()
                st2.filter2(*filter_)
                data.writeX(st2, correlation, st[0].stats.endtime, filter=filter_, period=period, stack=stack)
Example #5
0
def stack(data, correlations, dt=-1, filters=None, period=24 * 3600, shift=None, onefile=False, yearfiles=False):
    #t1 = t1.__class__(t1.date)
    #t2 = t2.__class__(t2.date)
    log.info('Stack correlations: %s' % util.parameters())
    print 'Stack correlations... '
    if filters is None:
        filters = (None,)
    stack = Stream()
    last_year = None
    for correlation in ProgressBar()(correlations):
        for filter_ in filters:
            try:
                st = read(data.getX(correlation, '*', filter=filter_, period=period) + '.QHD')
            except Exception as err:
                log.warning('Could not load file, because:/n%s' % str(err))
            else:
                print correlation
                for some_traces in streamtimegen(st, dt=dt, start=None, shift=shift):
                    tr = some_traces.calculate('mean')
                    stack.append(tr)
                    this_year = (some_traces[0].stats.starttime).year
                    if last_year is None:
                        last_year = this_year
                    #if yearfiles and (some_traces[0].stats.starttime + period).julday == 1 and len(stack) > 0:
                    if yearfiles and this_year != last_year and len(stack) > 0:
                        data.writeX(stack, correlation, time=some_traces[0].stats.starttime - 365 * 24 * 3600, filter_=filter_, period=period, stack=(dt, shift))
                        last_year = this_year
                        stack = Stream()
                if not onefile:
                    if yearfiles:
                        time = some_traces[0].stats.starttime
                    else:
                        time = None
                    if len(stack) > 0:
                        data.writeX(stack, correlation, time=time, filter=filter_, period=period, stack=(dt, shift))
                    last_year = None
                    stack = Stream()

    if onefile:
        data.writeX(stack, ('all', 'all'), time=None, filter=filters[0], period=period, stack=(dt, shift))
Example #6
0
 def sort(self, keys='datetime', reverse=False, logit=True):
     """
     Sort events after list of keys.
     """
     # Check the list and all items.
     items = ['datetime', 'id', 'latidue', 'longitude', 'depth', 'magnitude']
     msg = "keys must be a list of item strings. Available items to " \
         "sort after: \n" + ' '.join(items)
     if isinstance(keys, basestring):
         keys = [keys]
     if not isinstance(keys, list):
         raise TypeError(msg)
     for _i in keys:
         try:
             items.index(_i)
         except:
             raise TypeError(msg)
     # Loop over all keys in reversed order.
     for _i in keys[::-1]:
         super(Events, self).sort(key=lambda x: x[_i], reverse=reverse)
     if logit:
         log.info('Sort events after %s' % util.parameters(only=['keys', 'reverse']))
Example #7
0
def stack_day(data, correlations, dt=-1, start=None, onefile=False):
    #t1 = t1.__class__(t1.date)
    #t2 = t2.__class__(t2.date)
    log.info('Stack day correlations: %s' % util.parameters())
    if start is not None:
        dt_log = '%s-%s' % (dt, start)
    else:
        dt_log = dt
    stack = Stream()
    for correlation in correlations:
        try:
            days = read(data.getXDay(correlation, '*') + '.QHD')
        except Exception as err:
            log.warning('Could not load file, because:/n%s' % str(err))
        else:
            for somedays in streamdaygen(days, dt=dt, start=start):
                tr = somedays.calculate('mean')
                stack.append(tr)
            if not onefile:
                data.writeXDayStack(stack, correlation, dt_log)
                stack = Stream()
    if onefile:
        data.writeXDayStack(stack, ('all', 'all'), dt_log)
Example #8
0
def noisexcorrf(data, correlations, t1, t2, shift_sec, period=24 * 3600,
                 pool=None, max_preload=5, overlap=0):
    """
    Freqeuency domain noise cross correlation

    Expects day files prepared with prepare()
    """
    if period == 'day':
        period = 24 * 3600
    elif period == 'hour':
        period = 3600
    log.info('Noise cross correlation: %s' % util.parameters())
    print 'Noise cross correlation...'
#    if period != 24 * 3600:
#        raise ValueError('function at the moment only '
#                         'working with period=24*3600.')
    if 24 * 3600 % period != 0:
        raise ValueError('period has to be factor of a day')
    for correlation in ProgressBar()(correlations):
        autocorr = correlation[0] == correlation[1]
        station1 = correlation[0][:-1]
        station2 = correlation[1][:-1]
        comp1 = correlation[0][-1]
        comp2 = correlation[1][-1]
#        data1 = FloatingStream(data, t1, station1, shift_sec, component=comp1, period=period)
#        if not autocorr:
#            data2 = FloatingStream(data, t1, station2, 0, component=comp2, period=period)
        xcorr = [] if pool else Stream()
        stream1 = None
        for t in timegen(t1, t2, 24 * 3600):
            if len(xcorr) > 0 and (t - period).date != t.date and (
                    (period >= 3600 and t.julday == 1) or period < 3600):
                data.writeX(_get_async_resutls(xcorr) if pool else xcorr,
                            correlation, t - period, period=period)
                xcorr = [] if pool else Stream()
            if (not (data.getStream(t, station1, component=comp1, check=True) and
                     (autocorr or data.getStream(t, station2, component=comp2, check=True)))):
                log.debug('No data for %s %s' % (str(correlation), t))
                continue
            try:
                if overlap == 0 or stream1 is None:
                    stream1 = data.getStream(t, station1, component=comp1)
                else:
                    stream1 = stream1 + data.getStream(t, station1, component=comp1)
                    stream1.merge()
                    stream1.trim(t - overlap, None)
                if autocorr:
                    stream2 = stream1
                else:
                    if overlap == 0:
                        stream2 = data.getStream(t, station2, component=comp2)
                    else:
                        raise NotImplementedError()
            except ValueError:
                log.warning('Could not load data for %s %s' % (str(correlation), t))
                continue
            if not (len(stream1) == len(stream2) == 1):
                log.debug('Stream is not compatible (%d,%d traces)' % (len(stream1), len(stream2)))
                continue
            tr1 = stream1[0]
            tr2 = stream2[0]
            if tr1.stats.sampling_rate != tr2.stats.sampling_rate:
                raise ValueError('Sampling rate is different.')
            if period == 24 * 3600:
                # check data
                if tr1.stats.npts != tr2.stats.npts:
                    log.info('Discard data because of different npts %d vs %d' % (tr1.stats.npts, tr2.stats.npts))
                    continue
                log.debug ('Calculating xcorr for %s' % t)
                # original implementation only ok for period == 'day'
                args = (tr1, tr2, shift_sec, correlation)
                if pool:
                    if len(xcorr) >= max_preload:
                        xcorr[-max_preload].wait()
                    xcorr.append(pool.apply_async(_noisexcorr_traces, args))
                else:
                    xcorr.append(_noisexcorr_traces(*args))
            # new implementation for all other periods
            else:
                if tr1.stats.is_fft:
                    tr1.ifft()
                if tr2.stats.is_fft:
                    tr2.ifft()
                t1_ = tr1.stats.starttime
                while t1_ + period <= tr1.stats.endtime + 1:
                    t2_ = t1_ + period
                    tr1_ = tr1.slice(t1_, t2_)
                    tr2_ = tr2.slice(t1_, t2_)
                    args = (tr1_, tr2_, shift_sec, correlation)
                    if pool:
                        if len(xcorr) >= max_preload:
                            xcorr[-max_preload].wait()
                        xcorr.append(pool.apply_async(_noisexcorr_traces, args))
                    else:
                        xcorr.append(_noisexcorr_traces(*args))
                    t1_ = t1_ + period - overlap
        if len(xcorr) > 0:
            data.writeX(_get_async_resutls(xcorr) if pool else xcorr,
                        correlation, t, period=period)
Example #9
0
def prepare(data, stations, t1, t2, component='all', use_floating_stream=True,
            use_client=True,  #arclink_client_for_LVC=None,
            pool=None, max_preload=5, **kwargs):
    """
    Prepare data for cross correlation.

    Day files of raw data are loaded (from files data.raw), and the prepared
    data is again written to day files (to files data.raw).

    @param data: sito.data object with attributes (raw, x_prep) set
    @param stations: tuple of stations
    @param t1: UTC object with date of first day
    @param t2: UTC object with date of last day
    @param filter_: tuple of min and max frequencies (or None) for filtering
    @param downsample: downsample to this sampling rate (or None)
    @param component: string out of ('Z', 'N', 'E', 'all')
    @param normalize: method for normalizing the data in time domain (or None)
           see xcorr.timeNorm
    @param param_norm: parameter passed to xcorr.timeNorm()
    @param whitening: bool, apply spectral whitening, default: False
    @type use_floating_stream: bool
    @param use_floating_stream: You should set this to True, if you have day
           files which do not start/end exactly at midnight
           default: False
    @param reserve: parameter passed to FloatingStream()
           should be the time between the end/beginning of your data and
           midnight

    Her is the documentation string of xcorr.timeNorm
    """
#            filter=(None, None), downsample=None, #@ReservedAssignment
#            eventremoval=None, param_removal=None,
#            normalize=None, param_norm=None,
#            whitening=None, filter_before_whitening=True, use_this_filter_after_whitening=None,
#            freq_domain=False, discard=0.5):
#    kwargs = dict(filter=filter, downsample=downsample, #@ReservedAssignment
#                  eventremoval=eventremoval, param_removal=param_removal,
#                  normalize=normalize, param_norm=param_norm,
#                  whitening=whitening, filter_before_whitening=filter_before_whitening,
#                  use_this_filter_after_whitening=use_this_filter_after_whitening,
#                  freq_domain=freq_domain, discard=discard)
    log.info('Prepare data for noise correlation: %s' % util.parameters())
    print 'Prepare data for noise correlation...'
    if pool:
        async_results = []
    if use_client:
        if 'trim' not in kwargs:
            kwargs['trim'] = 'day'
        for station in ProgressBar()(stations):
            for t_day in daygen(t1, t2):
        #for (station, t_day) in ProgressBar()(itertools.product(stations, daygen(t1, t2))):
                try:
                    stream = data.getRawStreamFromClient(t_day - 60, t_day + 24 * 3600 + 60, station, component=component)
                except Exception as ex:
                    log.info('Error loading station %s day %s: %s' % (str(station), t_day.date, ex))
                    continue
#                    if station != 'LVC' or not arclink_client_for_LVC:
#                        continue
#                    try:
#                        stream = arclink_client_for_LVC.getWaveform('GE', 'LVC', '10', 'BH' + component,
#                                                                    t_day - 60, t_day + 24 * 3600 + 60)
#                    except:
#                        continue
#                    else:
#                        log.info('Found data on GEOFON.')
                if len(stream) == 0:
                    log.info('Stream length 0 station %s day %s' % (str(station), t_day.date))
                    continue
                ## one day
                def _callback(msg):
                    if msg:
                        log.info(msg % ((stream[0].stats.npts / stream[0].stats.sampling_rate / 24 / 3600),
                                        str(station), t_day.date))
                args = (stream, data.getDay(station, t_day, data.x_prep))
                if pool:
                    if len(async_results) >= max_preload:
                        async_results[-max_preload].wait()
                    async_results.append(
                        pool.apply_async(_prepare_stream, args, kwargs, callback=_callback))
                else:
                    _callback(_prepare_stream(*args, **kwargs))
    else:
        if not use_floating_stream:
            data2 = data
        for station in stations:
            if use_floating_stream:
                data2 = FloatingStream(data, t1, station, 0, component=component, use_get_raw=True)
            for t_day in daygen(t1, t2):
                try:
                    stream = data2.getRawStream(t_day, station, component=component)
                except ValueError:
                    log.info('Error loading station %s day %s' % (str(station), t_day.date))
                    continue
                if len(stream) == 0:
                    log.info('Stream length 0 station %s day %s' % (str(station), t_day.date))
                    continue
                ## one day
                def _callback(msg):
                    if msg:
                        log.info(msg % ((stream[0].stats.npts / stream[0].stats.sampling_rate / 24 / 3600),
                                        str(station), t_day.date))
                args = (stream, data.getDay(station, t_day, data.x_prep))
                if pool:
                    if len(async_results) >= max_preload:
                        async_results[-max_preload].wait()
                    async_results.append(
                        pool.apply_async(_prepare_stream, args, kwargs, callback=_callback))
                else:
                    _callback(_prepare_stream(*args, **kwargs))
    if pool:
        for res in async_results:
            res.wait()