示例#1
0
 def test_cosineTaper(self):
     # SAC trace was generated with:
     # taper type cosine width 0.05
     for i in [99, 100]:
         sac_taper = os.path.join(self.path, "ones_trace_%d_tapered.sac" % i)
         tr = read(sac_taper)[0]
         tap = cosTaper(i, p=0.1, halfcosine=False, sactaper=True)
         np.testing.assert_array_almost_equal(tap, tr.data, decimal=6)
示例#2
0
 def test_cosineTaper(self):
     # SAC trace was generated with:
     # taper type cosine width 0.05
     for i in [99, 100]:
         sac_taper = os.path.join(self.path,
                                  'ones_trace_%d_tapered.sac' % i)
         tr = read(sac_taper)[0]
         tap = cosTaper(i, p=0.1, halfcosine=False, sactaper=True)
         np.testing.assert_array_almost_equal(tap, tr.data, decimal=6)
示例#3
0
def tapering(st_aux):
    [ch, samp] = st_aux.shape
    try:
        cosVal = inv.cosTaper(int(samp), p=0.1)
    except:
        cosVal = inv.cosine_taper(int(samp), p=0.1)
    for ii in range(ch):
        st_aux[ii, :] = st_aux[ii, :] * cosVal
    return st_aux
示例#4
0
def time_stretch_apply(corr_data, stretch, single_sided=False):
    """ Apply time axis stretch to traces.

    Stretch the time axis of traces e.g. to compensate a velocity shift in the
    propagation medium.
    Such shifts can occur in corrlation traces in case of a drifting clock.
    This function ``applies`` the stretches. To correct for stretching
    estimated with :class:`~miic.core.stretch_mod.time_stretch_estimate`you
    need to apply negative stretching.

    :type corr_data: :class:`~numpy.ndarray`
    :param corr_data: 2d ndarray containing the correlation functions that are
        to be shifted.
        One for each row.
    :type stretch: :class:`~numpy.ndarray`
    :param stretch: ndarray with stretch.shape[0] = corr_data.shape[0]
        containing the stretches relative units.

    :rtype: :class:`~numpy.ndarray`
    :return: **stretched_mat**: stretched version of the input matrix
    """
    mat = corr_data
    # check input
    # stretch is just a 1d array
    if len(stretch.shape) == 1:
        t_stretch = np.zeros([stretch.shape[0], 1])
        t_stretch[:, 0] = stretch
        stretch = t_stretch
    # stretch has the wrong length
    elif stretch.shape[0] != mat.shape[0]:
        print "InputError: shift.shape[0] must be equal corr_data.shape[0]"
        return 0
    # shift has multiple columns (multiple measurements for the same time)
    if stretch.shape[1] > 1:
        stretch = np.delete(stretch, np.arange(1, stretch.shape[1]), axis=1)

    # taper and extend the reference trace to avoid interpolation
    # artefacts at the ends of the trace
    taper = cosTaper(mat.shape[1], 0.05)
    mat *= np.tile(taper, [mat.shape[0], 1])

    # time axis
    if single_sided:
        time_idx = np.arange(mat.shape[1])
    else:
        time_idx = np.arange(mat.shape[1]) - (mat.shape[1] - 1.0) / 2.0

    # allocate space for the result
    stretched_mat = np.zeros_like(mat)

    # stretch every line
    for (ii, line) in enumerate(mat):
        s = UnivariateSpline(time_idx, line, s=0)
        stretched_mat[ii, :] = s(time_idx * np.exp(-stretch[ii]))

    return stretched_mat
示例#5
0
def time_stretch_apply(corr_data, stretch, single_sided=False):
    """ Apply time axis stretch to traces.

    Stretch the time axis of traces e.g. to compensate a velocity shift in the
    propagation medium.
    Such shifts can occur in corrlation traces in case of a drifting clock.
    This function ``applies`` the stretches. To correct for stretching
    estimated with :class:`~miic.core.stretch_mod.time_stretch_estimate`you
    need to apply negative stretching.

    :type corr_data: :class:`~numpy.ndarray`
    :param corr_data: 2d ndarray containing the correlation functions that are
        to be shifted.
        One for each row.
    :type stretch: :class:`~numpy.ndarray`
    :param stretch: ndarray with stretch.shape[0] = corr_data.shape[0]
        containing the stretches relative units.

    :rtype: :class:`~numpy.ndarray`
    :return: **stretched_mat**: stretched version of the input matrix
    """
    mat = corr_data
    # check input
    # stretch is just a 1d array
    if len(stretch.shape) == 1:
        t_stretch = np.zeros([stretch.shape[0], 1])
        t_stretch[:, 0] = stretch
        stretch = t_stretch
    # stretch has the wrong length
    elif stretch.shape[0] != mat.shape[0]:
        print 'InputError: shift.shape[0] must be equal corr_data.shape[0]'
        return 0
    # shift has multiple columns (multiple measurements for the same time)
    if stretch.shape[1] > 1:
        stretch = np.delete(stretch, np.arange(1, stretch.shape[1]), axis=1)

    # taper and extend the reference trace to avoid interpolation
    # artefacts at the ends of the trace
    taper = cosTaper(mat.shape[1], 0.05)
    mat *= np.tile(taper, [mat.shape[0], 1])

    # time axis
    if single_sided:
        time_idx = np.arange(mat.shape[1])
    else:
        time_idx = np.arange(mat.shape[1]) - (mat.shape[1] - 1.) / 2.

    # allocate space for the result
    stretched_mat = np.zeros_like(mat)

    # stretch every line
    for (ii, line) in enumerate(mat):
        s = UnivariateSpline(time_idx, line, s=0)
        stretched_mat[ii, :] = s(time_idx * np.exp(-stretch[ii]))

    return stretched_mat
示例#6
0
 def __call__(self, tr):
     try:
         if tr.stats.npts > 1:
             tr.data *= cosTaper(tr.stats.npts, 0.01)
             FFT = fft(tr.data)
             tr.data = ifft(FFT / abs(FFT)).real
     except ValueError:
         print "ERROR"
         print tr.stats.npts
     return tr
示例#7
0
 def __call__(self, tr):
     try:
         if tr.stats.npts > 1:
             tr.data *= cosTaper(tr.stats.npts, 0.01)
             FFT = fft(tr.data)
             tr.data = ifft(FFT / abs(FFT)).real
     except ValueError:
         print "ERROR"
         print tr.stats.npts
     return tr
def rotate(eventdir, sacfiles):
    """preprocess performs the demean,detrend,taper and rotation into radial and
    transverse components. It saves these at STACK_R.sac and STACK_T.sac"""

    ev = []
    # READ 3 Component SAC files into object array.
    for i in range(2):
        ff = os.path.join(eventdir, sacfiles[i])
        try:
            st = read(ff)
        except Exception:
            raise SeisDataError('ReadSacError')
        ev.append(st[0])

    # Calculate values to be used in transformations
    if ev[1].stats.sac['t3'] < 0:
        return SeisDataError('t3 not picked')
    dt = ev[1].stats.delta
    pslow = ev[1].stats.sac['user0']
    baz = ev[1].stats.sac['baz']
    PP = ev[1].stats.sac['t5']
    N = ev[1].stats.npts
    # Begin seismogram 60 seconds before P arrival
####### TRUNCATE if not truncated############


    # Here we either a full size taper, or a short taper padded with zeros
    if PP and (PP < ev[1].stats.sac['e'] ):
        nend = (PP - ev[1].stats.sac['b'] - 0.5)/dt # Window out 1/2 second before PP
        ctap = np.append( cosTaper(nend), np.zeros(N-nend + 1) )
    else:
        ctap = cosTaper(N)

    # detrend, taper all three components

    # Call freetran and rotate into P and S space
    ev[0].data, ev[1].data = freetran(
        ev[0].data, ev[1].data, pslow, 6.45, 3.64)
    # Save freetran transformed data objects
    ev[0].write(os.path.join(eventdir,'stack_P.sac'), format='SAC')
    ev[1].write(os.path.join(eventdir,'stack_S.sac'), format='SAC')
示例#9
0
def detrend_taper_rotate(eventdir, sacfiles):
    """preprocess performs the demean,detrend,taper and rotation into radial and
    transverse components. It saves these at STACK_R.sac and STACK_T.sac"""

    ev = []
    # READ 3 Component SAC files into object array.
    for i in range(3):
        ff = os.path.join(eventdir, sacfiles[i])
        st = read(ff)
        ev.append(st[0])

    # Calculate values to be used in transformations
    dt = ev[1].stats.delta
    pslow = ev[1].stats.sac['user0']
    baz = ev[1].stats.sac['baz']
    PP = ev[1].stats.sac['t7']
    N = ev[1].stats.npts
    # Begin seismogram 50 seconds before P arrival
    # Here we either a full size taper, or a short taper padded with zeros
    if PP and (PP < ev[1].stats.sac['e'] ):
        nend = (PP - ev[1].stats.sac['b'] - 0.5)/dt # Window out 1/2 second before PP
        ctap = np.append( cosTaper(nend), np.zeros(N-nend + 1) )
    else:
        ctap = cosTaper(N)

    # detrend, taper all three components

    for i in range(3):
        ####### DETREND & TAPER #################
        ev[i].data = detrend(ev[i].data) * ctap

    # R, T = rotate(N, E)
    ev[1].data, ev[0].data = rotate(ev[1].data, ev[0].data, baz)
    # Call freetran and rotate into P and S space
    ev[1].data, ev[2].data = freetran(
        ev[1].data, ev[2].data, pslow, 6.06, 3.5)
    # Save freetran transformed data objects
    ev[1].write(os.path.join(eventdir,'stack_P.sac'), format='SAC')
    ev[2].write(os.path.join(eventdir,'stack_S.sac'), format='SAC')
示例#10
0
  def filter(self):
    """ Filter data for each band.
    """
    n_bands = self._N_bands()
    LEN = self.tr.stats.npts
    df = self.tr.stats.sampling_rate

    # create zeros 2D array for BF
    BF = np.zeros(shape = (n_bands,LEN))

    for j in range(n_bands):
      octave_high = (self.freqmin + self.freqmin * 2.0) / 2.0 * (2**j)
      octave_low = octave_high / 2.0
      BF[j] = bandpass(self.tr.data, octave_low, octave_high, df, corners = self.cnr, zerophase = False)
      BF[j] = cosTaper(LEN, self.perc_taper) * BF[j]

    return BF
示例#11
0
def stream2mult_array(str,wlen,overlap):
    """ turn obspy stream into array of short 2D arrays

    the data from a stream is extracted, sliced, and copied to 2D numpy arrays
     - ** parameters**, **types**, **return**, and **return types**::
         :param str: stream to be converted
        :param wlen: length of short arrays
        :param overlap: length of overlap between short arrays

        :type str: obspy stream
        :type wlen: length of windows in seconds
        :type overlap: length of overlap in seconds

        :return: 2D array with time series for all channels
        :rtype: numpy array

        :return: array of times of the beginning of the windows
        :rtype: list of UTCDateTime

        :return: array of window lengths
        :rtype: list of seconds
    """
    arr=stream2array(str)
    sps=str[0].stats.sampling_rate
    npts=str[0].stats.npts
    timeI=str[0].stats.starttime
    wlen_sam=sps*wlen
    overlap_sam=sps*overlap
    curr=0
    mat=[]
    time=[]
    lenA=[]
    try:
        cosVal=inv.cosTaper(int(wlen_sam),p=0.1)
    except:
        cosVal=inv.cosine_taper(int(wlen_sam),p=0.1)
    while curr+wlen_sam<=npts:
        mat.append(arr[:,int(curr):int(curr+wlen_sam)]*cosVal)
        time.append(UTCDateTime(float(timeI)+float(curr)/sps))
        lenA.append(len(arr[0,int(curr):int(curr+wlen_sam)]))
        curr=curr+wlen_sam-overlap_sam
    mat=np.asarray(mat)
    return mat,time,lenA
示例#12
0
def spect_norm(x):
    """ Computes the spectral normalization of  1D numpy array x

    This function divides the amplitude of the x spectrum by its absolute value

    :type x: :class:`~numpy.ndarray`
    :param x: 1d array

    :rtype: :class:`~numpy.ndarray`
    :return: **x_copy**: Whitened version of x
    """

    x_copy = x.copy()

    x_copy *= cosTaper(len(x_copy), 0.01)

    FFT = fft(x_copy)

    x_copy = ifft(FFT / abs(FFT)).real

    return x_copy
示例#13
0
def spect_norm(x):
    """ Computes the spectral normalization of  1D numpy array x

    This function divides the amplitude of the x spectrum by its absolute value

    :type x: :class:`~numpy.ndarray`
    :param x: 1d array

    :rtype: :class:`~numpy.ndarray`
    :return: **x_copy**: Whitened version of x
    """

    x_copy = x.copy()

    x_copy *= cosTaper(len(x_copy), 0.01)

    FFT = fft(x_copy)

    x_copy = ifft(FFT / abs(FFT)).real

    return x_copy
示例#14
0
    def filter(self):
        """ Filter data for each band.
    """
        n_bands = self._N_bands()
        LEN = self.tr.stats.npts
        df = self.tr.stats.sampling_rate

        # create zeros 2D array for BF
        BF = np.zeros(shape=(n_bands, LEN))

        for j in range(n_bands):
            octave_high = (self.freqmin + self.freqmin * 2.0) / 2.0 * (2**j)
            octave_low = octave_high / 2.0
            BF[j] = bandpass(self.tr.data,
                             octave_low,
                             octave_high,
                             df,
                             corners=self.cnr,
                             zerophase=False)
            BF[j] = cosTaper(LEN, self.perc_taper) * BF[j]

        return BF
示例#15
0
def check_and_phase_shift(trace):
    # print trace
    taper_length = 20.0
    if trace.stats.npts < 4 * taper_length * trace.stats.sampling_rate:
        trace.data = np.zeros(trace.stats.npts)
        return trace

    dt = np.mod(trace.stats.starttime.datetime.microsecond * 1.0e-6,
                trace.stats.delta)
    if (trace.stats.delta - dt) <= np.finfo(float).eps:
        dt = 0
    if dt != 0:
        if dt <= (trace.stats.delta / 2.):
            dt = -dt
#            direction = "left"
        else:
            dt = (trace.stats.delta - dt)


#            direction = "right"
        trace.detrend(type="demean")
        trace.detrend(type="simple")
        taper_1s = taper_length * float(
            trace.stats.sampling_rate) / trace.stats.npts
        cp = cosTaper(trace.stats.npts, taper_1s)
        trace.data *= cp

        n = int(2**nextpow2(len(trace.data)))
        FFTdata = scipy.fftpack.fft(trace.data, n=n)
        fftfreq = scipy.fftpack.fftfreq(n, d=trace.stats.delta)
        FFTdata = FFTdata * np.exp(1j * 2. * np.pi * fftfreq * dt)
        trace.data = np.real(
            scipy.fftpack.ifft(FFTdata, n=n)[:len(trace.data)])
        trace.stats.starttime += dt
        return trace
    else:
        return trace
示例#16
0
def array_processing(stream, win_len, win_frac, sll_x, slm_x, sll_y, slm_y,
                     sl_s, semb_thres, vel_thres, frqlow, frqhigh, stime,
                     etime, prewhiten, verbose=False, coordsys='lonlat',
                     timestamp='mlabday', method=0, store=None):
    """
    Method for Seismic-Array-Beamforming/FK-Analysis/Capon

    :param stream: Stream object, the trace.stats dict like class must
        contain an :class:`~obspy.core.util.attribdict.AttribDict` with
        'latitude', 'longitude' (in degrees) and 'elevation' (in km), or 'x',
        'y', 'elevation' (in km) items/attributes. See param ``coordsys``.
    :type win_len: float
    :param win_len: Sliding window length in seconds
    :type win_frac: float
    :param win_frac: Fraction of sliding window to use for step
    :type sll_x: float
    :param sll_x: slowness x min (lower)
    :type slm_x: float
    :param slm_x: slowness x max
    :type sll_y: float
    :param sll_y: slowness y min (lower)
    :type slm_y: float
    :param slm_y: slowness y max
    :type sl_s: float
    :param sl_s: slowness step
    :type semb_thres: float
    :param semb_thres: Threshold for semblance
    :type vel_thres: float
    :param vel_thres: Threshold for velocity
    :type frqlow: float
    :param frqlow: lower frequency for fk/capon
    :type frqhigh: float
    :param frqhigh: higher frequency for fk/capon
    :type stime: :class:`~obspy.core.utcdatetime.UTCDateTime`
    :param stime: Start time of interest
    :type etime: :class:`~obspy.core.utcdatetime.UTCDateTime`
    :param etime: End time of interest
    :type prewhiten: int
    :param prewhiten: Do prewhitening, values: 1 or 0
    :param coordsys: valid values: 'lonlat' and 'xy', choose which stream
        attributes to use for coordinates
    :type timestamp: str
    :param timestamp: valid values: 'julsec' and 'mlabday'; 'julsec' returns
        the timestamp in seconds since 1970-01-01T00:00:00, 'mlabday'
        returns the timestamp in days (decimals represent hours, minutes
        and seconds) since '0001-01-01T00:00:00' as needed for matplotlib
        date plotting (see e.g. matplotlib's num2date)
    :type method: int
    :param method: the method to use 0 == bf, 1 == capon
    :type store: function
    :param store: A custom function which gets called on each iteration. It is
        called with the relative power map and the time offset as first and
        second arguments and the iteration number as third argument. Useful for
        storing or plotting the map for each iteration. For this purpose the
        dump function of this module can be used.
    :return: :class:`numpy.ndarray` of timestamp, relative relpow, absolute
        relpow, backazimuth, slowness
    """
    res = []
    eotr = True

    # check that sampling rates do not vary
    fs = stream[0].stats.sampling_rate
    if len(stream) != len(stream.select(sampling_rate=fs)):
        msg = 'in sonic sampling rates of traces in stream are not equal'
        raise ValueError(msg)

    grdpts_x = int(((slm_x - sll_x) / sl_s + 0.5) + 1)
    grdpts_y = int(((slm_y - sll_y) / sl_s + 0.5) + 1)

    geometry = get_geometry(stream, coordsys=coordsys, verbose=verbose)

    if verbose:
        print("geometry:")
        print(geometry)
        print("stream contains following traces:")
        print(stream)
        print(("stime = " + str(stime) + ", etime = " + str(etime)))

    time_shift_table = get_timeshift(geometry, sll_x, sll_y,
                                     sl_s, grdpts_x, grdpts_y)
    # offset of arrays
    spoint, _epoint = get_spoint(stream, stime, etime)
    #
    # loop with a sliding window over the dat trace array and apply bbfk
    #
    nstat = len(stream)
    fs = stream[0].stats.sampling_rate
    nsamp = int(win_len * fs)
    nstep = int(nsamp * win_frac)

    # generate plan for rfftr
    nfft = nextpow2(nsamp)
    deltaf = fs / float(nfft)
    nlow = int(frqlow / float(deltaf) + 0.5)
    nhigh = int(frqhigh / float(deltaf) + 0.5)
    nlow = max(1, nlow)  # avoid using the offset
    nhigh = min(nfft // 2 - 1, nhigh)  # avoid using nyquist
    nf = nhigh - nlow + 1  # include upper and lower frequency
    # to spead up the routine a bit we estimate all steering vectors in advance
    steer = np.empty((nf, grdpts_x, grdpts_y, nstat), dtype='c16')
    clibsignal.calcSteer(nstat, grdpts_x, grdpts_y, nf, nlow,
                         deltaf, time_shift_table, steer)
    R = np.empty((nf, nstat, nstat), dtype='c16')
    ft = np.empty((nstat, nf), dtype='c16')
    newstart = stime
    tap = cosTaper(nsamp, p=0.22)  # 0.22 matches 0.2 of historical C bbfk.c
    offset = 0
    relpow_map = np.empty((grdpts_x, grdpts_y), dtype='f8')
    abspow_map = np.empty((grdpts_x, grdpts_y), dtype='f8')
    while eotr:
        try:
            for i, tr in enumerate(stream):
                dat = tr.data[spoint[i] + offset:
                              spoint[i] + offset + nsamp]
                dat = (dat - dat.mean()) * tap
                ft[i, :] = np.fft.rfft(dat, nfft)[nlow:nlow + nf]
        except IndexError:
            break
        ft = np.require(ft, 'c16', ['C_CONTIGUOUS'])
        relpow_map.fill(0.)
        abspow_map.fill(0.)
        # computing the covariances of the signal at different receivers
        dpow = 0.
        for i in range(nstat):
            for j in range(i, nstat):
                R[:, i, j] = ft[i, :] * ft[j, :].conj()
                if method == 1:
                    R[:, i, j] /= np.abs(R[:, i, j].sum())
                if i != j:
                    R[:, j, i] = R[:, i, j].conjugate()
                else:
                    dpow += np.abs(R[:, i, j].sum())
        dpow *= nstat
        if method == 1:
            # P(f) = 1/(e.H R(f)^-1 e)
            for n in range(nf):
                R[n, :, :] = np.linalg.pinv(R[n, :, :], rcond=1e-6)

        errcode = clibsignal.generalizedBeamformer(
            relpow_map, abspow_map, steer, R, nstat, prewhiten,
            grdpts_x, grdpts_y, nf, dpow, method)
        if errcode != 0:
            msg = 'generalizedBeamforming exited with error %d'
            raise Exception(msg % errcode)
        ix, iy = np.unravel_index(relpow_map.argmax(), relpow_map.shape)
        relpow, abspow = relpow_map[ix, iy], abspow_map[ix, iy]
        if store is not None:
            store(relpow_map, abspow_map, offset)
        # here we compute baz, slow
        slow_x = sll_x + ix * sl_s
        slow_y = sll_y + iy * sl_s

        slow = np.sqrt(slow_x ** 2 + slow_y ** 2)
        if slow < 1e-8:
            slow = 1e-8
        azimut = 180 * math.atan2(slow_x, slow_y) / math.pi
        baz = azimut % -360 + 180
        if relpow > semb_thres and 1. / slow > vel_thres:
            res.append(np.array([newstart.timestamp, relpow, abspow, baz,
                                 slow]))
            if verbose:
                print((newstart, (newstart + (nsamp / fs)), res[-1][1:]))
        if (newstart + (nsamp + nstep) / fs) > etime:
            eotr = False
        offset += nstep

        newstart += nstep / fs
    res = np.array(res)
    if timestamp == 'julsec':
        pass
    elif timestamp == 'mlabday':
        # 719162 == hours between 1970 and 0001
        res[:, 0] = res[:, 0] / (24. * 3600) + 719162
    else:
        msg = "Option timestamp must be one of 'julsec', or 'mlabday'"
        raise ValueError(msg)
    return np.array(res)
示例#17
0
def preprocess(db, stations, comps, goal_day, params, tramef_Z, tramef_E = np.array([]), tramef_N = np.array([])):
    datafilesZ = {}
    datafilesE = {}
    datafilesN = {}

    for station in stations:
        datafilesZ[station] = []
        datafilesE[station] = []
        datafilesN[station] = []
        net, sta = station.split('.')
        gd = datetime.datetime.strptime(goal_day, '%Y-%m-%d')
        files = get_data_availability(
            db, net=net, sta=sta, starttime=gd, endtime=gd)
        for file in files:
            comp = file.comp
            fullpath = os.path.join(file.path, file.file)
            if comp[-1] == 'Z':
                datafilesZ[station].append(fullpath)
            elif comp[-1] == 'E':
                datafilesE[station].append(fullpath)
            elif comp[-1] == 'N':
                datafilesN[station].append(fullpath)

    j = 0
    for istation, station in enumerate(stations):
        for comp in comps:
            files = eval("datafiles%s['%s']" % (comp, station))
            if len(files) != 0:
                logging.debug("%s.%s Reading %i Files" %
                              (station, comp, len(files)))
                stream = Stream()
                for file in sorted(files):
                    st = read(file, dytpe=np.float,
                              starttime=UTCDateTime(gd),
                              endtime=UTCDateTime(gd)+86400)
                    for tr in st:
                        tr.data = tr.data.astype(np.float)
                    stream += st
                    del st

                logging.debug("Checking sample alignment")
                for i, trace in enumerate(stream):
                    stream[i] = check_and_phase_shift(trace)

                stream.sort()
                logging.debug("Checking Gaps")
                if len(getGaps(stream)) > 0:
                    max_gap = 10
                    only_too_long=False
                    while getGaps(stream) and not only_too_long:
                        too_long = 0
                        gaps = getGaps(stream)
                        for gap in gaps:
                            if int(gap[-1]) <= max_gap:
                                stream[gap[0]] = stream[gap[0]].__add__(stream[gap[1]], method=0, fill_value="interpolate")
                                stream.remove(stream[gap[1]])
                                break
                            else:
                                too_long += 1
                        if too_long == len(gaps):
                            only_too_long = True

                taper_length = 20.0 #seconds
                for trace in stream:
                    if trace.stats.npts < 4 * taper_length*trace.stats.sampling_rate:
                        trace.data = np.zeros(trace.stats.npts)
                    else:
                        trace.detrend(type="demean")
                        trace.detrend(type="linear")
                        taper_1s = taper_length * float(trace.stats.sampling_rate) / trace.stats.npts
                        cp = cosTaper(trace.stats.npts, taper_1s)
                        trace.data *= cp
                try:
                    stream.merge(method=0, fill_value=0.0)
                except:
                    continue

                logging.debug("%s.%s Slicing Stream to %s:%s" % (station, comp, utcdatetime.UTCDateTime(
                    goal_day.replace('-', '')), utcdatetime.UTCDateTime(goal_day.replace('-', '')) + params.goal_duration - stream[0].stats.delta))
                stream[0].trim(utcdatetime.UTCDateTime(goal_day.replace('-', '')), utcdatetime.UTCDateTime(
                    goal_day.replace('-', '')) + params.goal_duration - stream[0].stats.delta, pad=True, fill_value=0.0,
                    nearest_sample=False)


                if get_config(db, 'remove_response', isbool=True):
                    logging.debug('Removing instrument response')
                    response_format = get_config(db, 'response_format')
                    response_prefilt = eval(get_config(db, 'response_prefilt'))
                    files = glob.glob(os.path.join(get_config(db,
                                                              'response_path'),
                                                   "*"))
                    if response_format == "inventory":
                        firstinv = False
                        inventory = None
                        for file in files:
                            try:
                                inv = read_inventory(file)
                                if firstinv:
                                    inventory = inv
                                    firstinv = False
                                else:
                                    inventory += inv
                            except:
                                pass
                        stream.attach_response(inventory)
                        stream.remove_response(output='VEL',
                                               pre_filt=response_prefilt)
                    elif response_format == "dataless":
                        for file in files:
                            p = Parser(file)
                            try:
                                p.getPAZ(stream[0].id,
                                         datetime=UTCDateTime(gd))
                                break
                            except:
                                traceback.print_exc()
                                del p
                                continue
                        stream.simulate(seedresp={'filename': p, "units":"VEL"},
                                        pre_filt=response_prefilt,
                                        paz_remove=None,
                                        paz_simulate=None,)
                    elif response_format == "paz":
                        msg = "Unexpected type for `response_format`: %s" % \
                              response_format
                        raise TypeError(msg)
                    elif response_format == "resp":
                        msg = "Unexpected type for `response_format`: %s" % \
                              response_format
                        raise TypeError(msg)
                    else:
                        msg = "Unexpected type for `response_format`: %s" % \
                              response_format
                        raise TypeError(msg)
                trace = stream[0]

                logging.debug(
                    "%s.%s Highpass at %.2f Hz" % (station, comp, params.preprocess_highpass))
                trace.filter("highpass", freq=params.preprocess_highpass, zerophase=True)
                
                if trace.stats.sampling_rate != params.goal_sampling_rate:
                    logging.debug(
                        "%s.%s Lowpass at %.2f Hz" % (station, comp, params.preprocess_lowpass))
                    trace.filter("lowpass", freq=params.preprocess_lowpass, zerophase=True)

                    

                    if params.resampling_method == "Resample":
                        logging.debug("%s.%s Downsample to %.1f Hz" %
                                      (station, comp, params.goal_sampling_rate))
                        trace.data = resample(
                            trace.data, params.goal_sampling_rate / trace.stats.sampling_rate, 'sinc_fastest')

                    elif params.resampling_method == "Decimate":
                        logging.debug("%s.%s Decimate by a factor of %i" %
                                      (station, comp, params.decimation_factor))
                        trace.data = trace.data[::params.decimation_factor]
                    trace.stats.sampling_rate = params.goal_sampling_rate

                year, month, day, hourf, minf, secf, wday, yday, isdst = trace.stats.starttime.utctimetuple()

                if j == 0:
                    t = time.strptime("%04i:%02i:%02i:%02i:%02i:%02i" %
                                      (year, month, day, hourf, minf, secf), "%Y:%m:%d:%H:%M:%S")
                    basetime = calendar.timegm(t)

                if len(trace.data) % 2 != 0:
                    trace.data = np.append(trace.data, 0.)

                if comp == "Z":
                    tramef_Z[istation] = trace.data
                elif comp == "E":
                    tramef_E[istation] = trace.data
                elif comp == "N":
                    tramef_N[istation] = trace.data

                del trace, stream
    if len(tramef_E) != 0:
        return basetime, tramef_Z, tramef_E, tramef_N
    else:
        return basetime, tramef_Z
示例#18
0
def main():
    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s [%(levelname)s] %(message)s',
                        datefmt='%Y-%m-%d %H:%M:%S')

    logging.info('*** Starting: Compute CC ***')

    # Connection to the DB
    db = connect()

    if len(get_filters(db, all=False)) == 0:
        logging.info("NO FILTERS DEFINED, exiting")
        sys.exit()

    # Get Configuration
    params = Params()
    params.goal_sampling_rate = float(get_config(db, "cc_sampling_rate"))
    params.goal_duration = float(get_config(db, "analysis_duration"))
    params.overlap = float(get_config(db, "overlap"))
    params.maxlag = float(get_config(db, "maxlag"))
    params.min30 = float(get_config(db, "corr_duration")) * params.goal_sampling_rate
    params.windsorizing = float(get_config(db, "windsorizing"))
    params.resampling_method = get_config(db, "resampling_method")
    params.decimation_factor = int(get_config(db, "decimation_factor"))
    params.preprocess_lowpass = float(get_config(db, "preprocess_lowpass"))
    params.preprocess_highpass = float(get_config(db, "preprocess_highpass"))
    params.keep_all = get_config(db, 'keep_all', isbool=True)
    params.keep_days = get_config(db, 'keep_days', isbool=True)
    params.components_to_compute = get_components_to_compute(db)

    logging.info("Will compute %s" % " ".join(params.components_to_compute))

    while is_next_job(db, jobtype='CC'):
        jobs = get_next_job(db, jobtype='CC')
        stations = []
        pairs = []
        refs = []

        for job in jobs:
            refs.append(job.ref)
            pairs.append(job.pair)
            netsta1, netsta2 = job.pair.split(':')
            stations.append(netsta1)
            stations.append(netsta2)
            goal_day = job.day

        stations = np.unique(stations)

        logging.info("New CC Job: %s (%i pairs with %i stations)" %
                     (goal_day, len(pairs), len(stations)))
        jt = time.time()

        xlen = int(params.goal_duration * params.goal_sampling_rate)

        if ''.join(params.components_to_compute).count('R') > 0 or ''.join(params.components_to_compute).count('T') > 0:
            comps = ['Z', 'E', 'N']
            tramef_Z = np.zeros((len(stations), xlen))
            tramef_E = np.zeros((len(stations), xlen))
            tramef_N = np.zeros((len(stations), xlen))
            basetime, tramef_Z, tramef_E, tramef_N = preprocess(db, stations, comps, goal_day, params, tramef_Z, tramef_E, tramef_N)

        else:
            comps = ['Z']
            tramef_Z = np.zeros((len(stations), xlen))
            basetime, tramef_Z = preprocess(db, stations, comps, goal_day, params, tramef_Z)


        # print '##### STREAMS ARE ALL PREPARED AT goal Hz #####'
        dt = 1. / params.goal_sampling_rate
        # Calculate the number of slices

        slices = int(params.goal_duration * params.goal_sampling_rate / params.min30)
        begins = []
        ends = []
        i = 0
        while i <=  (params.goal_duration - params.min30/params.goal_sampling_rate):
            begins.append(int(i * params.goal_sampling_rate))
            ends.append(int(i * params.goal_sampling_rate + params.min30))
            i += int(params.min30/params.goal_sampling_rate * (1.0-params.overlap))
        slices = len(begins)

        #
        # Computing only ZZ components ? Then we can be much faster:
        #

        if False:
        #if len(params.components_to_compute) == 1 and params.components_to_compute[0] == "ZZ":
            Nfft = params.min30
            if params.min30 / 2 % 2 != 0:
                Nfft = params.min30 + 2
            cp = cosTaper(int(params.min30), 0.04)

            logging.info("Pre-Whitening Traces")
            whitened_slices = np.zeros((len(stations), len(get_filters(db, all=False)), slices, int(Nfft)), dtype=np.complex)
            for istation, station in enumerate(stations):
                for islice, (begin, end) in enumerate(zip(begins,ends)):
                    tmp = tramef_Z[istation, begin:end]
                    rmsmat = np.std(np.abs(tmp))
                    if params.windsorizing == -1:
                        tmp = np.sign(tmp)
                    elif params.windsorizing != 0:
                        indexes = np.where(
                            np.abs(tmp) > (params.windsorizing * rmsmat))[0]
                        tmp[indexes] = (tmp[indexes] / np.abs(
                            tmp[indexes])) * params.windsorizing * rmsmat
                    tmp *= cp
                    for ifilter, filter in enumerate(get_filters(db, all=False)):
                        whitened_slices[istation, ifilter, islice,:] = whiten(tmp, Nfft, dt, float(filter.low), float(filter.high), plot=False)
                    del tmp
            del tramef_Z
            logging.info("Processing CC")
            for ifilter, filter in enumerate(get_filters(db, all=False)):
                for pair in pairs:
                    orig_pair = pair
                    if params.keep_all:
                        allcorr = {}
                    if params.keep_days:
                        daycorr = np.zeros(get_maxlag_samples(db,))
                        ndaycorr = 0
                    station1, station2 = pair.split(':')
                    pair = (np.where(stations == station1)
                            [0][0], np.where(stations == station2)[0][0])
                    for islice in range(slices):
                        tmp = np.vstack((whitened_slices[pair[0], ifilter, islice],
                                         whitened_slices[pair[1], ifilter, islice]))
                        corr = myCorr(tmp, np.ceil(params.maxlag / dt), plot=False)
                        tmptime = time.gmtime(basetime + begins[islice] /
                                                  params.goal_sampling_rate)
                        thisdate = time.strftime("%Y-%m-%d", tmptime)
                        thistime = time.strftime("%Y-%m-%d %H:%M:%S",
                                                 tmptime)
                        if not np.any(np.isnan(corr)) and not np.any(np.isinf(corr)):
                            if params.keep_all:
                                ccfid = "%s_%s_%s_%s_%s" % (station1, station2,
                                                            filter.ref, 'ZZ',
                                                            thisdate)
                                if ccfid not in allcorr:
                                    allcorr[ccfid] = {}
                                allcorr[ccfid][thistime] = corr

                            if params.keep_days:
                                daycorr += corr
                                ndaycorr += 1

                    if params.keep_all:
                        for ccfid in allcorr.keys():
                            export_allcorr(db, ccfid, allcorr[ccfid])

                    if params.keep_days:
                        thisdate = time.strftime(
                            "%Y-%m-%d", time.gmtime(basetime))
                        thistime = time.strftime(
                            "%H_%M", time.gmtime(basetime))
                        add_corr(
                            db, station1.replace(
                                '.', '_'), station2.replace('.', '_'), filter.ref,
                            thisdate, thistime, params.min30 / params.goal_sampling_rate, 'ZZ', daycorr, params.goal_sampling_rate, day=True, ncorr=ndaycorr)
                    update_job(db, goal_day, orig_pair, 'CC', 'D')
            logging.info("Job Finished. It took %.2f seconds" % (time.time() - jt))

        else:
            # ITERATING OVER PAIRS #####
            for pair in pairs:
                orig_pair = pair

                logging.info('Processing pair: %s' % pair.replace(':', ' vs '))
                tt = time.time()
                station1, station2 = pair.split(':')
                pair = (np.where(stations == station1)
                        [0][0], np.where(stations == station2)[0][0])

                s1 = get_station(db, station1.split('.')[0], station1.split('.')[1])
                s2 = get_station(db, station2.split('.')[0], station2.split('.')[1])

                if s1.X:
                    X0 = s1.X
                    Y0 = s1.Y
                    c0 = s1.coordinates

                    X1 = s2.X
                    Y1 = s2.Y
                    c1 = s2.coordinates

                    if c0 == c1:
                        coordinates = c0
                    else:
                        coordinates = 'MIX'

                    cplAz = np.deg2rad(azimuth(coordinates, X0, Y0, X1, Y1))
                    logging.debug("Azimuth=%.1f"%np.rad2deg(cplAz))
                else:
                    # logging.debug('No Coordinates found! Skipping azimuth calculation!')
                    cplAz = 0.

                for components in params.components_to_compute:
                    
                    if components == "ZZ":
                        t1 = tramef_Z[pair[0]]
                        t2 = tramef_Z[pair[1]]
                    elif components[0] == "Z":
                        t1 = tramef_Z[pair[0]]
                        t2 = tramef_E[pair[1]]
                    elif components[1] == "Z":
                        t1 = tramef_E[pair[0]]
                        t2 = tramef_Z[pair[1]]
                    else:
                        t1 = tramef_E[pair[0]]
                        t2 = tramef_E[pair[1]]
                    if np.all(t1 == 0) or np.all(t2 == 0):
                        logging.debug("%s contains empty trace(s), skipping"%components)
                        continue
                    del t1, t2
                    
                    if components[0] == "Z":
                        t1 = tramef_Z[pair[0]]
                    elif components[0] == "R":
                        if cplAz != 0:
                            t1 = tramef_N[pair[0]] * np.cos(cplAz) +\
                                 tramef_E[pair[0]] * np.sin(cplAz)
                        else:
                            t1 = tramef_E[pair[0]]

                    elif components[0] == "T":
                        if cplAz != 0:
                            t1 = tramef_N[pair[0]] * np.sin(cplAz) -\
                                 tramef_E[pair[0]] * np.cos(cplAz)
                        else:
                            t1 = tramef_N[pair[0]]

                    if components[1] == "Z":
                        t2 = tramef_Z[pair[1]]
                    elif components[1] == "R":
                        if cplAz != 0:
                            t2 = tramef_N[pair[1]] * np.cos(cplAz) +\
                                 tramef_E[pair[1]] * np.sin(cplAz)
                        else:
                            t2 = tramef_E[pair[1]]
                    elif components[1] == "T":
                        if cplAz != 0:
                            t2 = tramef_N[pair[1]] * np.sin(cplAz) -\
                                 tramef_E[pair[1]] * np.cos(cplAz)
                        else:
                            t2 = tramef_N[pair[1]]

                    trames = np.vstack((t1, t2))
                    del t1, t2

                    daycorr = {}
                    ndaycorr = {}
                    allcorr = {}
                    for filterdb in get_filters(db, all=False):
                        filterid = filterdb.ref
                        daycorr[filterid] = np.zeros(get_maxlag_samples(db,))
                        ndaycorr[filterid] = 0

                    for islice, (begin, end) in enumerate(zip(begins, ends)):
                        # print "Progress: %#2d/%2d"% (islice+1,slices)
                        trame2h = trames[:, begin:end]

                        rmsmat = np.std(np.abs(trame2h), axis=1)
                        for filterdb in get_filters(db, all=False):
                            filterid = filterdb.ref
                            low = float(filterdb.low)
                            high = float(filterdb.high)
                            rms_threshold = filterdb.rms_threshold

                            Nfft = int(params.min30)
                            if params.min30 / 2 % 2 != 0:
                                Nfft = params.min30 + 2

                            trames2hWb = np.zeros((2, int(Nfft)), dtype=np.complex)
                            skip = False
                            for i, station in enumerate(pair):
                                if rmsmat[i] > rms_threshold:
                                    cp = cosTaper(len(trame2h[i]),0.04)
                                    trame2h[i] -= trame2h[i].mean()
                                    
                                    if params.windsorizing == -1:
                                        trame2h[i] = np.sign(trame2h[i])
                                    elif params.windsorizing != 0:
                                        indexes = np.where(
                                            np.abs(trame2h[i]) > (params.windsorizing * rmsmat[i]))[0]
                                        # clipping at windsorizing*rms
                                        trame2h[i][indexes] = (trame2h[i][indexes] / np.abs(
                                            trame2h[i][indexes])) * params.windsorizing * rmsmat[i]

                                    trames2hWb[i] = whiten(
                                        trame2h[i]*cp, Nfft, dt, low, high, plot=False)
                                else:
                                    trames2hWb[i] = np.zeros(int(Nfft))
                                    skip = True
                                    logging.debug('Slice is Zeros!')
                            if not skip:
                                corr = myCorr(trames2hWb, np.ceil(params.maxlag / dt), plot=False)
                                tmptime = time.gmtime(basetime + begin /
                                                      params.goal_sampling_rate)
                                thisdate = time.strftime("%Y-%m-%d", tmptime)
                                thistime = time.strftime("%Y-%m-%d %H:%M:%S",
                                                         tmptime)
                                if params.keep_all:
                                    ccfid = "%s_%s_%s_%s_%s" % (station1, station2,
                                                             filterid, components,
                                                             thisdate)
                                    if ccfid not in allcorr:
                                        allcorr[ccfid] = {}
                                    allcorr[ccfid][thistime] = corr

                                if params.keep_days:
                                    if not np.any(np.isnan(corr)) and \
                                            not np.any(np.isinf(corr)):
                                        daycorr[filterid] += corr
                                        ndaycorr[filterid] += 1

                                del corr, thistime, trames2hWb

                    if params.keep_all:
                        for ccfid in allcorr.keys():
                            export_allcorr(db, ccfid, allcorr[ccfid])

                    if params.keep_days:
                        try:
                            for filterdb in get_filters(db, all=False):
                                filterid = filterdb.ref
                                corr = daycorr[filterid]
                                ncorr = ndaycorr[filterid]
                                if ncorr > 0:
                                    logging.debug(
                                        "Saving daily CCF for filter %02i, comp %s (stack of %02i CCF)" % (filterid, components, ncorr))

                                    thisdate = time.strftime(
                                        "%Y-%m-%d", time.gmtime(basetime))
                                    thistime = time.strftime(
                                        "%H_%M", time.gmtime(basetime))
                                    add_corr(
                                        db, station1.replace('.', '_'),
                                        station2.replace('.', '_'), filterid,
                                        thisdate, thistime,  params.min30 /
                                        params.goal_sampling_rate,
                                        components, corr,
                                        params.goal_sampling_rate, day=True,
                                        ncorr=ncorr)
                                del corr, ncorr
                        except Exception as e:
                            logging.debug(str(e))
                    del trames, daycorr, ndaycorr
                logging.debug("Updating Job")
                update_job(db, goal_day, orig_pair, 'CC', 'D')

                logging.info("Finished processing this pair. It took %.2f seconds" %
                              (time.time() - tt))
            logging.info("Job Finished. It took %.2f seconds" % (time.time() - jt))
    logging.info('*** Finished: Compute CC ***')
示例#19
0
def time_shift_estimate(corr_data, ref_trc=None, tw=None, shift_range=10,
                        shift_steps=100, single_sided=False):
    """ Time shift estimate through shifting and comparison.

    This function is intended to estimate shift of traces as they can occur
    in noise cross-correlation in case of drifting clocks.

    Time shifts are estimated comparing each correlation function stored
    in the ``corr_data`` matrix (one for each row) with ``shift_steps``
    shifted versions  of reference trace stored in ``ref_trc``.
    The maximum amount of shifting may be passed in ``shift_range``.
    The best match (shifting amount and corresponding correlation value) is
    calculated on different time windows. If ``tw = None`` the shifting is
    estimated on the whole trace.

    :type corr_data: :class:`~numpy.ndarray`
    :param corr_data: 2d ndarray containing the correlation functions.
        One for each row.
    :type ref_trc: :class:`~numpy.ndarray`
    :param ref_trc: 1D array containing the reference trace to be shifted and
        compared to the individual traces in ``mat``
    :type tw: list of :class:`~numpy.ndarray` of int
    :param tw: list of 1D ndarrays holding the indices of sampels in the time
        windows to be use in the time shift estimate. The sampels are counted
        from the zero lag time with the index of the first sample being 0. If
        ``tw = None`` the full time range is used.
    :type shift_range: scalar
    :param shift_range: Maximum amount of time shift in samples (in one
        direction).
        Shifting is tested in both directions from ``-shift_range`` to
        ``shift_range``
    :type shift_steps: scalar`
    :param shift_steps: Number of shifted version to be tested. The increment
            will be ``(2 * shift_range) / shift_steps``
    :type sinlge_sided: boolean
    :param single_sided: If ``True`` the zero lag time of the traces is in the
        first sample. If ``False`` zero lag is assumed to be in the center of
        the traces and the shifting is evaluated on the causal and acausal
        parts of the traces separately and averaged. This is done to avoid bias
        from velocity changes (stretching) in the case of strongly asymmetric
        traces.

    :rtype: dictionary
    :return: **shift_result**: dictionary with the following key-value pairs

        **corr**: :class:`~numpy.ndarray` 2d ndarray containing the correlation
                  value for the best
        match for each row of ``mat`` and for each time window.
        Its dimension is: :func:(len(tw),mat.shape[1])

        **shift**: :class:`~numpy.ndarray` 2d ndarray containing the amount of
                    shifting corresponding to the best match for each row of
                    ``mat`` and for each time window. Shift is measured in
                    units of the sampling interval.
        Its dimension is: :py:func:`(len(tw),mat.shape[1])`
    """

    mat = corr_data

    # generate the reference trace if not given (use the whole time span)
    if ref_trc is None:
        ref_trc = np.nansum(mat, axis=0) / mat.shape[0]

    # generate time window if not given (use the full length of the correlation
    # trace)
    if tw is None:
        tw = time_windows_creation([0], [int(np.floor(mat.shape[1] / 2.))])

    # taper and extend the reference trace to avoid interpolation
    # artefacts at the ends of the trace
    taper = cosTaper(len(ref_trc), 0.05)
    ref_trc *= taper

    # different values of shifting to be tested
    shifts = np.linspace(-shift_range, shift_range, shift_steps)

    # time axis
    time_idx = np.arange(len(ref_trc))

    # create the array to hold the shifted traces
    ref_shift = np.zeros((len(shifts), len(ref_trc)))

    # create a spline object for the reference trace
    ref_tr_spline = UnivariateSpline(time_idx, ref_trc, s=0)

    # evaluate the spline object at different points and put in the prepared
    # array
    for (k, this_shift) in enumerate(shifts):
        ref_shift[k, :] = ref_tr_spline(time_idx - this_shift)

    # search best fit of the crosscorrs to one of the shifted ref_traces
    if single_sided:
        vdict = velocity_change_estimete(mat, tw, ref_shift,
                                         shifts, sides='right',
                                         return_sim_mat=True)
        corr = vdict['corr']
        shift = vdict['dt']
        sim_mat = vdict['sim_mat']
    else:
        # estimate shifts for causal and acausal part individually and avarage
        # to avoid apparent shift from velocity change and asymmetric
        # amplitudes
        lvdict = velocity_change_estimete(mat, tw, ref_shift,
                                          shifts,
                                          sides='left',
                                          return_sim_mat=True)
        lcorr = lvdict['corr']
        lshift = lvdict['dt']
        lsim_mat = lvdict['sim_mat']

        rvdict = velocity_change_estimete(mat, tw, ref_shift,
                                          shifts,
                                          sides='right',
                                          return_sim_mat=True)
        rcorr = rvdict['corr']
        rshift = rvdict['dt']
        rsim_mat = rvdict['sim_mat']

        shift = np.zeros_like(lshift)
        corr = np.zeros_like(lshift)
        sim_mat = np.zeros_like(lsim_mat)
        for ii in range(len(tw)):
            corr[ii] = (lcorr[ii] + rcorr[ii]) / 2.
            shift[ii] = (lshift[ii] + rshift[ii]) / 2.
        sim_mat = (lsim_mat + rsim_mat) / 2.

    # create the result dictionary
    dt = {'corr': corr.T, 'shift': shift.T, 'sim_mat': sim_mat}

    return dt
示例#20
0
def array_processing(stream, win_len, win_frac, sll_x, slm_x, sll_y, slm_y,
                     sl_s, semb_thres, vel_thres, frqlow, frqhigh, stime,
                     etime, prewhiten, verbose=False, coordsys='lonlat',
                     timestamp='mlabday', method=0, store=None):
    """
    Method for Seismic-Array-Beamforming/FK-Analysis/Capon

    :param stream: Stream object, the trace.stats dict like class must
        contain an :class:`~obspy.core.util.attribdict.AttribDict` with
        'latitude', 'longitude' (in degrees) and 'elevation' (in km), or 'x',
        'y', 'elevation' (in km) items/attributes. See param ``coordsys``.
    :type win_len: float
    :param win_len: Sliding window length in seconds
    :type win_frac: float
    :param win_frac: Fraction of sliding window to use for step
    :type sll_x: float
    :param sll_x: slowness x min (lower)
    :type slm_x: float
    :param slm_x: slowness x max
    :type sll_y: float
    :param sll_y: slowness y min (lower)
    :type slm_y: float
    :param slm_y: slowness y max
    :type sl_s: float
    :param sl_s: slowness step
    :type semb_thres: float
    :param semb_thres: Threshold for semblance
    :type vel_thres: float
    :param vel_thres: Threshold for velocity
    :type frqlow: float
    :param frqlow: lower frequency for fk/capon
    :type frqhigh: float
    :param frqhigh: higher frequency for fk/capon
    :type stime: :class:`~obspy.core.utcdatetime.UTCDateTime`
    :param stime: Start time of interest
    :type etime: :class:`~obspy.core.utcdatetime.UTCDateTime`
    :param etime: End time of interest
    :type prewhiten: int
    :param prewhiten: Do prewhitening, values: 1 or 0
    :param coordsys: valid values: 'lonlat' and 'xy', choose which stream
        attributes to use for coordinates
    :type timestamp: str
    :param timestamp: valid values: 'julsec' and 'mlabday'; 'julsec' returns
        the timestamp in seconds since 1970-01-01T00:00:00, 'mlabday'
        returns the timestamp in days (decimals represent hours, minutes
        and seconds) since '0001-01-01T00:00:00' as needed for matplotlib
        date plotting (see e.g. matplotlib's num2date)
    :type method: int
    :param method: the method to use 0 == bf, 1 == capon
    :type store: function
    :param store: A custom function which gets called on each iteration. It is
        called with the relative power map and the time offset as first and
        second arguments and the iteration number as third argument. Useful for
        storing or plotting the map for each iteration. For this purpose the
        dump function of this module can be used.
    :return: :class:`numpy.ndarray` of timestamp, relative relpow, absolute
        relpow, backazimuth, slowness
    """
    res = []
    eotr = True

    # check that sampling rates do not vary
    fs = stream[0].stats.sampling_rate
    if len(stream) != len(stream.select(sampling_rate=fs)):
        msg = 'in sonic sampling rates of traces in stream are not equal'
        raise ValueError(msg)

    grdpts_x = int(((slm_x - sll_x) / sl_s + 0.5) + 1)
    grdpts_y = int(((slm_y - sll_y) / sl_s + 0.5) + 1)

    geometry = get_geometry(stream, coordsys=coordsys, verbose=verbose)

    if verbose:
        print("geometry:")
        print(geometry)
        print("stream contains following traces:")
        print(stream)
        print("stime = " + str(stime) + ", etime = " + str(etime))

    time_shift_table = get_timeshift(geometry, sll_x, sll_y,
                                     sl_s, grdpts_x, grdpts_y)
    # offset of arrays
    spoint, _epoint = get_spoint(stream, stime, etime)
    #
    # loop with a sliding window over the dat trace array and apply bbfk
    #
    nstat = len(stream)
    fs = stream[0].stats.sampling_rate
    nsamp = int(win_len * fs)
    nstep = int(nsamp * win_frac)

    # generate plan for rfftr
    nfft = nextpow2(nsamp)
    deltaf = fs / float(nfft)
    nlow = int(frqlow / float(deltaf) + 0.5)
    nhigh = int(frqhigh / float(deltaf) + 0.5)
    nlow = max(1, nlow)  # avoid using the offset
    nhigh = min(nfft // 2 - 1, nhigh)  # avoid using nyquist
    nf = nhigh - nlow + 1  # include upper and lower frequency
    # to speed up the routine a bit we estimate all steering vectors in advance
    steer = np.empty((nf, grdpts_x, grdpts_y, nstat), dtype=np.complex128)
    clibsignal.calcSteer(nstat, grdpts_x, grdpts_y, nf, nlow,
                         deltaf, time_shift_table, steer)
    R = np.empty((nf, nstat, nstat), dtype=np.complex128)
    ft = np.empty((nstat, nf), dtype=np.complex128)
    newstart = stime
    tap = cosTaper(nsamp, p=0.22)  # 0.22 matches 0.2 of historical C bbfk.c
    offset = 0
    relpow_map = np.empty((grdpts_x, grdpts_y), dtype=np.float64)
    abspow_map = np.empty((grdpts_x, grdpts_y), dtype=np.float64)
    while eotr:
        try:
            for i, tr in enumerate(stream):
                dat = tr.data[spoint[i] + offset:
                              spoint[i] + offset + nsamp]
                dat = (dat - dat.mean()) * tap
                ft[i, :] = np.fft.rfft(dat, nfft)[nlow:nlow + nf]
        except IndexError:
            break
        ft = np.ascontiguousarray(ft, np.complex128)
        relpow_map.fill(0.)
        abspow_map.fill(0.)
        # computing the covariances of the signal at different receivers
        dpow = 0.
        for i in range(nstat):
            for j in range(i, nstat):
                R[:, i, j] = ft[i, :] * ft[j, :].conj()
                if method == 1:
                    R[:, i, j] /= np.abs(R[:, i, j].sum())
                if i != j:
                    R[:, j, i] = R[:, i, j].conjugate()
                else:
                    dpow += np.abs(R[:, i, j].sum())
        dpow *= nstat
        if method == 1:
            # P(f) = 1/(e.H R(f)^-1 e)
            for n in range(nf):
                R[n, :, :] = np.linalg.pinv(R[n, :, :], rcond=1e-6)

        errcode = clibsignal.generalizedBeamformer(
            relpow_map, abspow_map, steer, R, nstat, prewhiten,
            grdpts_x, grdpts_y, nf, dpow, method)
        if errcode != 0:
            msg = 'generalizedBeamforming exited with error %d'
            raise Exception(msg % errcode)
        ix, iy = np.unravel_index(relpow_map.argmax(), relpow_map.shape)
        relpow, abspow = relpow_map[ix, iy], abspow_map[ix, iy]
        if store is not None:
            store(relpow_map, abspow_map, offset)
        # here we compute baz, slow
        slow_x = sll_x + ix * sl_s
        slow_y = sll_y + iy * sl_s

        slow = np.sqrt(slow_x ** 2 + slow_y ** 2)
        if slow < 1e-8:
            slow = 1e-8
        azimut = 180 * math.atan2(slow_x, slow_y) / math.pi
        baz = azimut % -360 + 180
        if relpow > semb_thres and 1. / slow > vel_thres:
            res.append(np.array([newstart.timestamp, relpow, abspow, baz,
                                 slow]))
            if verbose:
                print(newstart, (newstart + (nsamp / fs)), res[-1][1:])
        if (newstart + (nsamp + nstep) / fs) > etime:
            eotr = False
        offset += nstep

        newstart += nstep / fs
    res = np.array(res)
    if timestamp == 'julsec':
        pass
    elif timestamp == 'mlabday':
        # 719162 == hours between 1970 and 0001
        res[:, 0] = res[:, 0] / (24. * 3600) + 719162
    else:
        msg = "Option timestamp must be one of 'julsec', or 'mlabday'"
        raise ValueError(msg)
    return np.array(res)
示例#21
0
def plot_windows(data_trace, synthetic_trace, windows, dominant_period,
                 filename=None, debug=False):
    """
    Helper function plotting the picked windows in some variants. Useful for
    debugging and checking what's actually going on.

    If using the debug option, please use the same data_trace and
    synthetic_trace as you used for the select_windows() function. They will
    be augmented with certain values used for the debugging plots.

    :param data_trace: The data trace.
    :type data_trace: obspy.core.trace.Trace
    :param synthetic_trace: The synthetic trace.
    :type synthetic_trace: obspy.core.trace.Trace
    :param windows: The windows, as returned by select_windows()
    :type windows: list
    :param dominant_period: The dominant period of the data. Used for the
        tapering.
    :type dominant_period: float
    :param filename: If given, a file will be written. Otherwise the plot
        will be shown.
    :type filename: basestring
    :param debug: Toggle plotting debugging information. Optional. Defaults
        to False.
    :type debug: bool
    """
    import matplotlib.pylab as plt
    from obspy.signal.invsim import cosTaper

    plt.figure(figsize=(16, 10))
    plt.subplots_adjust(hspace=0.3)

    npts = synthetic_trace.stats.npts

    # Plot the raw data.
    time_array = np.linspace(0, (npts - 1) * synthetic_trace.stats.delta, npts)
    plt.subplot(411)
    plt.plot(time_array, data_trace.data, color="black", label="data")
    plt.plot(time_array, synthetic_trace.data, color="red",
             label="synthetics")
    plt.xlim(0, time_array[-1])
    plt.title("Raw data")

    # Plot the chosen windows.
    bottom = np.ones(npts) * -10.0
    top = np.ones(npts) * 10.0
    for left_idx, right_idx in windows:
        top[left_idx: right_idx + 1] = -10.0
    plt.subplot(412)
    plt.plot(time_array, data_trace.data, color="black", label="data")
    plt.plot(time_array, synthetic_trace.data, color="red",
             label="synthetics")
    ymin, ymax = plt.ylim()
    plt.fill_between(time_array, bottom, top, color="red", alpha="0.5")
    plt.xlim(0, time_array[-1])
    plt.ylim(ymin, ymax)
    plt.title("Chosen windows")

    # Plot the tapered data.
    final_data = np.zeros(npts)
    final_data_scaled = np.zeros(npts)
    synth_data = np.zeros(npts)
    synth_data_scaled = np.zeros(npts)

    for left_idx, right_idx in windows:
        right_idx += 1
        length = right_idx - left_idx

        # Setup the taper.
        p = (dominant_period / synthetic_trace.stats.delta / length) / 2.0
        if p >= 0.5:
            p = 0.49
        elif p < 0.1:
            p = 0.1
        taper = cosTaper(length, p=p)

        data_window = taper * data_trace.data[left_idx: right_idx].copy()
        synth_window = taper * synthetic_trace.data[left_idx: right_idx].copy()

        data_window_scaled = data_window / data_window.ptp() * 2.0
        synth_window_scaled = synth_window / synth_window.ptp() * 2.0

        final_data[left_idx: right_idx] = data_window
        synth_data[left_idx: right_idx] = synth_window
        final_data_scaled[left_idx: right_idx] = data_window_scaled
        synth_data_scaled[left_idx: right_idx] = synth_window_scaled

    plt.subplot(413)
    plt.plot(time_array, final_data, color="black")
    plt.plot(time_array, synth_data, color="red")
    plt.xlim(0, time_array[-1])
    plt.title("Tapered windows")

    plt.subplot(414)
    plt.plot(time_array, final_data_scaled, color="black")
    plt.plot(time_array, synth_data_scaled, color="red")
    plt.xlim(0, time_array[-1])
    plt.title("Tapered windows, scaled to same amplitude")

    if debug:
        first_valid_index = data_trace.stats.first_valid_index * \
            synthetic_trace.stats.delta
        noise_level = data_trace.stats.noise_level

        data_p, data_t, data_e = find_local_extrema(
            data_trace.data, start_index=first_valid_index)
        synth_p, synth_t, synth_e = find_local_extrema(
            synthetic_trace.data, start_index=first_valid_index)

        for _i in xrange(1, 3):
            plt.subplot(4, 1, _i)
            ymin, ymax = plt.ylim()
            xmin, xmax = plt.xlim()
            plt.vlines(first_valid_index, ymin, ymax, color="green",
                       label="Theoretical First Arrival")
            plt.hlines(noise_level, xmin, xmax, color="0.5",
                       label="Noise Level", linestyles="--")
            plt.hlines(-noise_level, xmin, xmax, color="0.5", linestyles="--")

            plt.hlines(noise_level * 5, xmin, xmax, color="0.8",
                       label="Minimal acceptable amplitude", linestyles="--")
            plt.hlines(-noise_level * 5, xmin, xmax, color="0.8",
                       linestyles="--")
            if _i == 2:
                plt.scatter(time_array[data_e], data_trace.data[data_e],
                            color="black", s=10)
                plt.scatter(time_array[synth_e], synthetic_trace.data[synth_e],
                            color="red", s=10)
            plt.ylim(ymin, ymax)
            plt.xlim(xmin, xmax)

        plt.subplot(411)
        plt.legend(prop={"size": "small"})

    plt.suptitle(data_trace.id)

    if filename:
        plt.savefig(filename)
    else:
        plt.show()
示例#22
0
def deconvolve_traces(signal, divisor, eps, freq=[], residual=False):
    """ Deconvolve a time series from a set of time series.

    The function is a wrapper for the :class:`scipy.signal.convolve`
    function.
    
    :type signal: :class:`~obspy.core.stream.Stream`
    :param signal: signal from which the divisor is to be deconvolved
    :type divisor: :class:`~obspy.core.trace.Trace`
    :param divisor: time series that is to be deconvolved from signal
    :type eps: float
    :param eps: fraction of spectral mean used as a water level to 
        avoid spectral holes in the deconvolution.
    :type freq: two element array-like
    :param freq: frequency range for the estimation of the mean power
        that is scaled with ``eps`` to obtian the water level  
    :type residual: bool
    :param residual: return residual if True, defaults to False

    :rtype: obspy.core.stream
    :return: **(dcst, rst)**: decorrelated stream and residual (only
        if ``residual=True``

    """

    
    zerotime = UTCDateTime(1971,1,1)
    # trace length is taken from signal (must be even to use real fft)
    if signal[0].stats['npts'] % 2:
        trlen = signal[0].stats['npts']+1
    else:
        trlen = signal[0].stats['npts']
    delta = divisor.stats['delta']
    
    
    # prepare divisor
    divisor.detrend(type='constant')
    taper = cosTaper(divisor.stats['npts'],p=0.05)
    divisor.data *= taper

    divisor.trim(starttime=divisor.stats['starttime'],endtime=divisor.stats['starttime']+
                 (trlen-1)*delta,pad=True,fill_value=0,nearest_sample=False)
    # FFT divisor
    fd = np.fft.fftpack.rfft(divisor.data)
    # estimate the waterlevel to stabilize deconvolution
    if freq:
        f = np.linspace(-signal[0].stats['sampling_rate']/2., signal[0].stats['sampling_rate']/2.,len(fd))
        ind = np.nonzero(np.all([f>freq[0],f<freq[1]],axis=0))
        wl = eps * np.mean((fd*fd.conj())[ind])
    else:
        wl = eps * np.mean((fd*fd.conj()))
    
    # create the output stream
    dcst = Stream()
    rst = Stream()
    for tr in signal:
        if tr.stats['sampling_rate'] != divisor.stats['sampling_rate']:
            print "Sampling rates don't match for \n %s" % tr
            continue
        
        # prepare nuerator
        tr.detrend('constant')
        taper = cosTaper(tr.stats['npts'])
        tr.trim(starttime=tr.stats['starttime'], endtime=tr.stats['starttime']+
                (trlen-1)*delta,pad=True,fill_value=0,nearest_sample=False)
        tr.data *= taper
        # fft numerator
        sf = np.fft.fftpack.rfft(tr.data)
        
        # calculate deconvolution
        fdc = sf*fd/(fd**2+wl)
        dc = np.fft.fftpack.irfft(fdc)

        # template to hold results
        dctr = tr.copy()
        # propagate metadata
        dctr.stats = combine_stats(tr,divisor)
        dctr.data = dc
        dctr.stats['npts'] = len(dc)
        dctr.stats['starttime'] = zerotime - (divisor.stats['starttime']-tr.stats['starttime'])
        dctr.stats_tr1 = tr.stats
        dctr.stats_tr2 = divisor.stats
        
        # append to output stream
        dcst.append(dctr)
        
        if residual:
            # residual
            rtr = dctr.copy()
            rtr.data = tr.data - np.fft.fftpack.irfft(fdc * fd)
            # append to output stream
            rst.append(rtr)
            return (dcst, rst)
        
        return dcst
示例#23
0
def time_stretch_estimate(corr_data, ref_trc=None, tw=None, stretch_range=0.1,
                          stretch_steps=100, sides='both'):
    """ Time shift estimate through shifting and comparison.

    This function estimates stretching of the time axis of traces as it can
    occur if the propagation velocity changes.

    Time stretching is estimated comparing each correlation function stored
    in the ``corr_data`` matrix (one for each row) with ``stretch_steps``
    stretched versions  of reference trace stored in ``ref_trc``.
    The maximum amount of stretching may be passed in ``stretch_range``. The
    time axis is multiplied by exp(stretch).
    The best match (stretching amount and corresponding correlation value) is
    calculated on different time windows. If ``tw = None`` the stretching is
    estimated on the whole trace.

    :type corr_data: :class:`~numpy.ndarray`
    :param corr_data: 2d ndarray containing the correlation functions.
        One for each row.
    :type ref_trc: :class:`~numpy.ndarray`
    :param ref_trc: 1D array containing the reference trace to be shifted and
        compared to the individual traces in ``mat``
    :type tw: list of :class:`~numpy.ndarray` of int
    :param tw: list of 1D ndarrays holding the indices of sampels in the time
        windows to be use in the time shift estimate. The sampels are counted
        from the zero lag time with the index of the first sample being 0. If
        ``tw = None`` the full time range is used.
    :type stretch_range: scalar
    :param stretch_range: Maximum amount of relative stretching.
        Stretching and compression is tested from ``-stretch_range`` to
        ``stretch_range``.
    :type stretch_steps: scalar`
    :param stretch_steps: Number of shifted version to be tested. The
        increment will be ``(2 * stretch_range) / stretch_steps``
    :type single_sided: bool
    :param single_sided: if True zero lag time is on the first sample. If
        False the zero lag time is in the center of the traces.
    :type sides: str
    :param sides: Side of the reference matrix to be used for the stretching
        estimate ('both' | 'left' | 'right' | 'single') ``single`` is used for
        one-sided signals from active sources with zero lag time is on the
        first sample. Other options assume that the zero lag time is in the
        center of the traces.


    :rtype: dictionary
    :return: **stretch_result**: dictionary with the following key-value pairs
        *corr*: :class:`~numpy.ndarray` 2d ndarray containing the correlation
        value for the best match for each row of ``mat`` and for each time
        window.
        Its dimension is: :py:func:`(len(tw),mat.shape[1])`

        *stretch*: :class:`~numpy.ndarray` 2d ndarray containing the amount
        of stretching corresponding to the best match for each row of ``mat``
        and for each time window. Stretch is a relative value corresponding to
        the negative relative velocity change -dv/v
        Its dimension is: :py:func:`(len(tw),mat.shape[1])`
    """

    mat = corr_data

    # generate the reference trace if not given (use the whole time span)
    if ref_trc is None:
        ref_trc = np.nansum(mat, axis=0) / mat.shape[0]

    # generate time window if not given (use the full length of the correlation
    # trace)
    if tw is None:
        tw = time_windows_creation([0], [int(np.floor(mat.shape[1] / 2.))])

    # taper and extend the reference trace to avoid interpolation
    # artefacts at the ends of the trace
    taper = cosTaper(len(ref_trc), 0.05)
    ref_trc *= taper

    # different values of shifting to be tested
    stretchs = np.linspace(-stretch_range, stretch_range, stretch_steps)
    time_facs = np.exp(-stretchs)

    # time axis
    if sides is not 'single':
        time_idx = np.arange(len(ref_trc)) - (len(ref_trc) - 1.) / 2.
    else:
        time_idx = np.arange(len(ref_trc))

    # create the array to hold the shifted traces
    ref_stretch = np.zeros((len(stretchs), len(ref_trc)))

    # create a spline object for the reference trace
    ref_tr_spline = UnivariateSpline(time_idx, ref_trc, s=0)

    # evaluate the spline object at different points and put in the prepared
    # array
    for (k, this_fac) in enumerate(time_facs):
        ref_stretch[k, :] = ref_tr_spline(time_idx * this_fac)

    # search best fit of the crosscorrs to one of the stretched ref_traces
    dv = velocity_change_estimete(mat, tw, ref_stretch,
                                     stretchs, sides=sides,
                                     return_sim_mat=True)

    dv['corr'] = dv['corr'].T
    dv.update({'stretch': dv['dt'].T})
    del dv['dt']
    dv.update({'stretch_vec': stretchs})

    return dv
示例#24
0
def time_stretch_estimate(corr_data, ref_trc=None, tw=None, stretch_range=0.1, stretch_steps=100, sides="both"):
    """ Time shift estimate through shifting and comparison.

    This function estimates stretching of the time axis of traces as it can
    occur if the propagation velocity changes.

    Time stretching is estimated comparing each correlation function stored
    in the ``corr_data`` matrix (one for each row) with ``stretch_steps``
    stretched versions  of reference trace stored in ``ref_trc``.
    The maximum amount of stretching may be passed in ``stretch_range``. The
    time axis is multiplied by exp(stretch).
    The best match (stretching amount and corresponding correlation value) is
    calculated on different time windows. If ``tw = None`` the stretching is
    estimated on the whole trace.

    :type corr_data: :class:`~numpy.ndarray`
    :param corr_data: 2d ndarray containing the correlation functions.
        One for each row.
    :type ref_trc: :class:`~numpy.ndarray`
    :param ref_trc: 1D array containing the reference trace to be shifted and
        compared to the individual traces in ``mat``
    :type tw: list of :class:`~numpy.ndarray` of int
    :param tw: list of 1D ndarrays holding the indices of sampels in the time
        windows to be use in the time shift estimate. The sampels are counted
        from the zero lag time with the index of the first sample being 0. If
        ``tw = None`` the full time range is used.
    :type stretch_range: scalar
    :param stretch_range: Maximum amount of relative stretching.
        Stretching and compression is tested from ``-stretch_range`` to
        ``stretch_range``.
    :type stretch_steps: scalar`
    :param stretch_steps: Number of shifted version to be tested. The
        increment will be ``(2 * stretch_range) / stretch_steps``
    :type single_sided: bool
    :param single_sided: if True zero lag time is on the first sample. If
        False the zero lag time is in the center of the traces.
    :type sides: str
    :param sides: Side of the reference matrix to be used for the stretching
        estimate ('both' | 'left' | 'right' | 'single') ``single`` is used for
        one-sided signals from active sources with zero lag time is on the
        first sample. Other options assume that the zero lag time is in the
        center of the traces.


    :rtype: dictionary
    :return: **stretch_result**: dictionary with the following key-value pairs
        *corr*: :class:`~numpy.ndarray` 2d ndarray containing the correlation
        value for the best match for each row of ``mat`` and for each time
        window.
        Its dimension is: :py:func:`(len(tw),mat.shape[1])`

        *stretch*: :class:`~numpy.ndarray` 2d ndarray containing the amount
        of stretching corresponding to the best match for each row of ``mat``
        and for each time window. Stretch is a relative value corresponding to
        the negative relative velocity change -dv/v
        Its dimension is: :py:func:`(len(tw),mat.shape[1])`
    """

    mat = corr_data

    # generate the reference trace if not given (use the whole time span)
    if ref_trc is None:
        ref_trc = np.nansum(mat, axis=0) / mat.shape[0]

    # generate time window if not given (use the full length of the correlation
    # trace)
    if tw is None:
        tw = time_windows_creation([0], [int(np.floor(mat.shape[1] / 2.0))])

    # taper and extend the reference trace to avoid interpolation
    # artefacts at the ends of the trace
    taper = cosTaper(len(ref_trc), 0.05)
    ref_trc *= taper

    # different values of shifting to be tested
    stretchs = np.linspace(-stretch_range, stretch_range, stretch_steps)
    time_facs = np.exp(-stretchs)

    # time axis
    if sides is not "single":
        time_idx = np.arange(len(ref_trc)) - (len(ref_trc) - 1.0) / 2.0
    else:
        time_idx = np.arange(len(ref_trc))

    # create the array to hold the shifted traces
    ref_stretch = np.zeros((len(stretchs), len(ref_trc)))

    # create a spline object for the reference trace
    ref_tr_spline = UnivariateSpline(time_idx, ref_trc, s=0)

    # evaluate the spline object at different points and put in the prepared
    # array
    for (k, this_fac) in enumerate(time_facs):
        ref_stretch[k, :] = ref_tr_spline(time_idx * this_fac)

    # search best fit of the crosscorrs to one of the stretched ref_traces
    dv = velocity_change_estimete(mat, tw, ref_stretch, stretchs, sides=sides, return_sim_mat=True)

    dv["corr"] = dv["corr"].T
    dv.update({"stretch": dv["dt"].T})
    del dv["dt"]
    dv.update({"stretch_vec": stretchs})

    return dv
示例#25
0
def time_shift_apply(corr_data, shift):
    """ Apply time shift to traces.

    Apply time shifts to traces e.g. to align them to a common time base.
    Such shifts can occur in corrlation traces in case of a drifting clock.
    This function ``applies`` the shifts. To correct for shift estimated with
    :class:`~miic.core.stretch_mod.time_shift_estimate` you need to apply
    negative shifts.
    Shifting is done in frequency domain with 5% tapering.

    :type corr_data: :py:class:`~numpy.ndarray`
    :param corr_data: 2d ndarray containing the correlation functions that are
        to be shifted.
        One for each row.
    :type shift: :py:class:`~numpy.ndarray`
    :param shift: ndarray with shift.shape[0] = corr_data.shape[0] containing
        the shifts in units of the sampling interval by which the trace are to
        be shifted

    :rtype: :py:class:`~numpy.ndarray`
    :return: **shifted_mat**: shifted version of the input matrix
    """
    mat = corr_data
    # check input
    # shift is just a 1d array
    if len(shift.shape) == 1:
        t_shift = np.zeros([shift.shape[0], 1])
        t_shift[:, 0] = shift
        shift = t_shift
    # shift has the wrong length
    elif shift.shape[0] != mat.shape[0]:
        print "InputError: shift.shape[0] must be equal corr_data.shape[0]"
        return 0
    # shift has multiple columns (multiple measurements for the same time)
    if shift.shape[1] > 1:
        shift = np.delete(shift, np.arange(1, shift.shape[1]), axis=1)

    # taper the reference matrix to avoid interpolation
    taper = cosTaper(mat.shape[1], 0.05)
    mat *= np.tile(taper, [mat.shape[0], 1])

    # find a suitable length for the FFT
    N = nextpow2(2 * mat.shape[1])
    w = np.zeros([1, N / 2 + 1])

    # original and shifted phase
    w[0, :] = np.linspace(0, np.pi, N / 2 + 1)
    pha = np.exp(-1j * (shift) * w)

    # Fourier Transform
    F = np.fft.rfft(mat, N, 1)

    # apply the phase shift
    sF = F * pha

    # transform to time domain
    smat = np.fft.irfft(sF)

    # cut to original size
    shifted_mat = smat[:, 0 : mat.shape[1]]
    return shifted_mat
示例#26
0
def time_shift_estimate(corr_data, ref_trc=None, tw=None, shift_range=10, shift_steps=100, single_sided=False):
    """ Time shift estimate through shifting and comparison.

    This function is intended to estimate shift of traces as they can occur
    in noise cross-correlation in case of drifting clocks.

    Time shifts are estimated comparing each correlation function stored
    in the ``corr_data`` matrix (one for each row) with ``shift_steps``
    shifted versions  of reference trace stored in ``ref_trc``.
    The maximum amount of shifting may be passed in ``shift_range``.
    The best match (shifting amount and corresponding correlation value) is
    calculated on different time windows. If ``tw = None`` the shifting is
    estimated on the whole trace.

    :type corr_data: :class:`~numpy.ndarray`
    :param corr_data: 2d ndarray containing the correlation functions.
        One for each row.
    :type ref_trc: :class:`~numpy.ndarray`
    :param ref_trc: 1D array containing the reference trace to be shifted and
        compared to the individual traces in ``mat``
    :type tw: list of :class:`~numpy.ndarray` of int
    :param tw: list of 1D ndarrays holding the indices of sampels in the time
        windows to be use in the time shift estimate. The sampels are counted
        from the zero lag time with the index of the first sample being 0. If
        ``tw = None`` the full time range is used.
    :type shift_range: scalar
    :param shift_range: Maximum amount of time shift in samples (in one
        direction).
        Shifting is tested in both directions from ``-shift_range`` to
        ``shift_range``
    :type shift_steps: scalar`
    :param shift_steps: Number of shifted version to be tested. The increment
            will be ``(2 * shift_range) / shift_steps``
    :type sinlge_sided: boolean
    :param single_sided: If ``True`` the zero lag time of the traces is in the
        first sample. If ``False`` zero lag is assumed to be in the center of
        the traces and the shifting is evaluated on the causal and acausal
        parts of the traces separately and averaged. This is done to avoid bias
        from velocity changes (stretching) in the case of strongly asymmetric
        traces.

    :rtype: dictionary
    :return: **shift_result**: dictionary with the following key-value pairs

        **corr**: :class:`~numpy.ndarray` 2d ndarray containing the correlation
                  value for the best
        match for each row of ``mat`` and for each time window.
        Its dimension is: :func:(len(tw),mat.shape[1])

        **shift**: :class:`~numpy.ndarray` 2d ndarray containing the amount of
                    shifting corresponding to the best match for each row of
                    ``mat`` and for each time window. Shift is measured in
                    units of the sampling interval.
        Its dimension is: :py:func:`(len(tw),mat.shape[1])`
    """

    mat = corr_data

    # generate the reference trace if not given (use the whole time span)
    if ref_trc is None:
        ref_trc = np.nansum(mat, axis=0) / mat.shape[0]

    # generate time window if not given (use the full length of the correlation
    # trace)
    if tw is None:
        tw = time_windows_creation([0], [int(np.floor(mat.shape[1] / 2.0))])

    # taper and extend the reference trace to avoid interpolation
    # artefacts at the ends of the trace
    taper = cosTaper(len(ref_trc), 0.05)
    ref_trc *= taper

    # different values of shifting to be tested
    shifts = np.linspace(-shift_range, shift_range, shift_steps)

    # time axis
    time_idx = np.arange(len(ref_trc))

    # create the array to hold the shifted traces
    ref_shift = np.zeros((len(shifts), len(ref_trc)))

    # create a spline object for the reference trace
    ref_tr_spline = UnivariateSpline(time_idx, ref_trc, s=0)

    # evaluate the spline object at different points and put in the prepared
    # array
    for (k, this_shift) in enumerate(shifts):
        ref_shift[k, :] = ref_tr_spline(time_idx - this_shift)

    # search best fit of the crosscorrs to one of the shifted ref_traces
    if single_sided:
        vdict = velocity_change_estimete(mat, tw, ref_shift, shifts, sides="right", return_sim_mat=True)
        corr = vdict["corr"]
        shift = vdict["dt"]
        sim_mat = vdict["sim_mat"]
    else:
        # estimate shifts for causal and acausal part individually and avarage
        # to avoid apparent shift from velocity change and asymmetric
        # amplitudes
        lvdict = velocity_change_estimete(mat, tw, ref_shift, shifts, sides="left", return_sim_mat=True)
        lcorr = lvdict["corr"]
        lshift = lvdict["dt"]
        lsim_mat = lvdict["sim_mat"]

        rvdict = velocity_change_estimete(mat, tw, ref_shift, shifts, sides="right", return_sim_mat=True)
        rcorr = rvdict["corr"]
        rshift = rvdict["dt"]
        rsim_mat = rvdict["sim_mat"]

        shift = np.zeros_like(lshift)
        corr = np.zeros_like(lshift)
        sim_mat = np.zeros_like(lsim_mat)
        for ii in range(len(tw)):
            corr[ii] = (lcorr[ii] + rcorr[ii]) / 2.0
            shift[ii] = (lshift[ii] + rshift[ii]) / 2.0
        sim_mat = (lsim_mat + rsim_mat) / 2.0

    # create the result dictionary
    dt = {"corr": corr.T, "shift": shift.T, "sim_mat": sim_mat}

    return dt
示例#27
0
def time_shift_apply(corr_data, shift):
    """ Apply time shift to traces.

    Apply time shifts to traces e.g. to align them to a common time base.
    Such shifts can occur in corrlation traces in case of a drifting clock.
    This function ``applies`` the shifts. To correct for shift estimated with
    :class:`~miic.core.stretch_mod.time_shift_estimate` you need to apply
    negative shifts.
    Shifting is done in frequency domain with 5% tapering.

    :type corr_data: :py:class:`~numpy.ndarray`
    :param corr_data: 2d ndarray containing the correlation functions that are
        to be shifted.
        One for each row.
    :type shift: :py:class:`~numpy.ndarray`
    :param shift: ndarray with shift.shape[0] = corr_data.shape[0] containing
        the shifts in units of the sampling interval by which the trace are to
        be shifted

    :rtype: :py:class:`~numpy.ndarray`
    :return: **shifted_mat**: shifted version of the input matrix
    """
    mat = corr_data
    # check input
    # shift is just a 1d array
    if len(shift.shape) == 1:
        t_shift = np.zeros([shift.shape[0], 1])
        t_shift[:, 0] = shift
        shift = t_shift
    # shift has the wrong length
    elif shift.shape[0] != mat.shape[0]:
        print 'InputError: shift.shape[0] must be equal corr_data.shape[0]'
        return 0
    # shift has multiple columns (multiple measurements for the same time)
    if shift.shape[1] > 1:
        shift = np.delete(shift, np.arange(1, shift.shape[1]), axis=1)

    # taper the reference matrix to avoid interpolation
    taper = cosTaper(mat.shape[1], 0.05)
    mat *= np.tile(taper, [mat.shape[0], 1])

    # find a suitable length for the FFT
    N = nextpow2(2 * mat.shape[1])
    w = np.zeros([1, N / 2 + 1])

    # original and shifted phase
    w[0, :] = np.linspace(0, np.pi, N / 2 + 1)
    pha = np.exp(-1j * (shift) * w)

    # Fourier Transform
    F = np.fft.rfft(mat, N, 1)

    # apply the phase shift
    sF = F * pha

    # transform to time domain
    smat = np.fft.irfft(sF)

    # cut to original size
    shifted_mat = smat[:, 0:mat.shape[1]]
    return shifted_mat