示例#1
0
 def test_np_integers(self):
     ITYPES = [
         np.int16, np.int32, np.int64, np.uint16, np.uint32, np.uint64
     ]
     for ityp in ITYPES:
         x = ityp(12345)
         testN = next_fast_len(x)
         assert_equal(testN, next_fast_len(int(x)))
示例#2
0
def myCorr(data, maxlag, plot=False, nfft=None):
    """This function takes ndimensional *data* array, computes the cross-correlation in the frequency domain
    and returns the cross-correlation function between [-*maxlag*:*maxlag*].

    :type data: :class:`numpy.ndarray`
    :param data: This array contains the fft of each timeseries to be cross-correlated.
    :type maxlag: int
    :param maxlag: This number defines the number of samples (N=2*maxlag + 1) of the CCF that will be returned.

    :rtype: :class:`numpy.ndarray`
    :returns: The cross-correlation function between [-maxlag:maxlag]
    """
    # TODO: docsting
    if nfft is None:
        s1 = np.array(data[0].shape)
        shape = s1 - 1
        # Speed up FFT by padding to optimal size for FFTPACK
        fshape = [next_fast_len(int(d)) for d in shape]
        nfft = fshape[0]

    normalized = True
    allCpl = False

    maxlag = np.round(maxlag)

    Nt = data.shape[1]

    # data = scipy.fftpack.fft(data,int(Nfft),axis=1)

    if plot:
        import matplotlib.pyplot as plt
        plt.subplot(211)
        plt.plot(np.arange(len(data[0])) * 0.05, np.abs(data[0]))
        plt.subplot(212)
        plt.plot(np.arange(len(data[1])) * 0.05, np.abs(data[1]))

    corr = np.conj(data[0]) * data[1]
    corr = np.real(scipy.fftpack.ifft(corr, nfft)) / Nt
    corr = np.concatenate((corr[-Nt + 1:], corr[:Nt + 1]))

    if plot:
        plt.figure()
        plt.plot(corr)

    if normalized:
        E = np.prod(
            np.real(
                np.sqrt(
                    np.mean(scipy.fftpack.ifft(data, n=nfft, axis=1)**2,
                            axis=1))))
        corr /= np.real(E)

    if maxlag != Nt:
        tcorr = np.arange(-Nt + 1, Nt)
        dN = np.where(np.abs(tcorr) <= maxlag)[0]
        corr = corr[dN]

    del data
    return corr
示例#3
0
 def ww(a):
     from .move2obspy import whiten
     n = next_fast_len(len(a))
     return whiten(a,
                   n,
                   1. / params.cc_sampling_rate,
                   low,
                   high,
                   returntime=True)
示例#4
0
def myCorr(data, maxlag, plot=False, nfft=None):
    """This function takes ndimensional *data* array, computes the cross-correlation in the frequency domain
    and returns the cross-correlation function between [-*maxlag*:*maxlag*].

    :type data: :class:`numpy.ndarray`
    :param data: This array contains the fft of each timeseries to be cross-correlated.
    :type maxlag: int
    :param maxlag: This number defines the number of samples (N=2*maxlag + 1) of the CCF that will be returned.

    :rtype: :class:`numpy.ndarray`
    :returns: The cross-correlation function between [-maxlag:maxlag]
    """
    # TODO: docsting
    if nfft is None:
        s1 = np.array(data[0].shape)
        shape = s1 - 1
        # Speed up FFT by padding to optimal size for FFTPACK
        fshape = [next_fast_len(int(d)) for d in shape]
        nfft = fshape[0]

    normalized = True
    allCpl = False

    maxlag = np.round(maxlag)

    Nt = data.shape[1]

    # data = scipy.fftpack.fft(data,int(Nfft),axis=1)

    if plot:
        import matplotlib.pyplot as plt
        plt.subplot(211)
        plt.plot(np.arange(len(data[0])) * 0.05, np.abs(data[0]))
        plt.subplot(212)
        plt.plot(np.arange(len(data[1])) * 0.05, np.abs(data[1]))

    corr = np.conj(data[0]) * data[1]
    corr = np.real(scipy.fftpack.ifft(corr, nfft)) / Nt
    corr = np.concatenate((corr[-Nt + 1:], corr[:Nt + 1]))

    if plot:
        plt.figure()
        plt.plot(corr)

    if normalized:
        E = np.prod(np.real(np.sqrt(
            np.mean(scipy.fftpack.ifft(data, n=nfft, axis=1) ** 2, axis=1))))
        corr /= np.real(E)

    if maxlag != Nt:
        tcorr = np.arange(-Nt + 1, Nt)
        dN = np.where(np.abs(tcorr) <= maxlag)[0]
        corr = corr[dN]

    del data
    return corr
示例#5
0
def convolve(f, g, mesh):
    f_ = f.reshape(*mesh)
    g_ = g.reshape(*mesh)
    shape = numpy.maximum(f_.shape, g_.shape)
    min_shape = numpy.array(f_.shape) + numpy.array(g_.shape) - 1
    fshape = [next_fast_len(d) for d in min_shape]
    fslice = tuple([slice(sz) for sz in shape])
    fq = numpy.fft.fftn(
        numpy.fft.ifftn(f_, s=fshape) * numpy.fft.ifftn(g_, s=fshape)).copy()
    return fq.ravel()
示例#6
0
def correlate(fft1,fft2, maxlag, Nfft=None, method='cross_correlation'):
    """This function takes ndimensional *data* array, computes the cross-correlation in the frequency domain
    and returns the cross-correlation function between [-*maxlag*:*maxlag*].

    :type fft1: :class:`numpy.ndarray`
    :param fft1: This array contains the fft of each timeseries to be cross-correlated.
    :type maxlag: int
    :param maxlag: This number defines the number of samples (N=2*maxlag + 1) of the CCF that will be returned.

    :rtype: :class:`numpy.ndarray`
    :returns: The cross-correlation function between [-maxlag:maxlag]
    """
    # Speed up FFT by padding to optimal size for FFTPACK

    if fft1.ndim == 1:
        axis = 0
    elif fft1.ndim == 2:
        axis = 1

    if Nfft is None:
        Nfft = next_fast_len(int(fft1.shape[axis]))

    maxlag = np.round(maxlag)

    Nt = fft1.shape[axis]

    corr = fft1 * np.conj(fft2)
    if method == 'deconv':
        corr /= (noise.smooth(np.abs(fft2),half_win=20) ** 2 + 
                   0.01 * np.mean(noise.smooth(np.abs(fft1),half_win=20),axis=1)[:,np.newaxis])
    elif method == 'coherence':
        corr /= (noise.smooth(np.abs(fft1),half_win=20)  + 
                   0.01 * np.mean(noise.smooth(np.abs(fft1),half_win=20),axis=1)[:,np.newaxis])
        corr /= (noise.smooth(np.abs(fft2),half_win=20)  + 
                   0.01 * np.mean(noise.smooth(np.abs(fft2),half_win=20),axis=1)[:,np.newaxis])

    corr = np.real(scipy.fftpack.ifft(corr, Nfft,axis=axis)) 
    if axis == 1:
        corr = np.concatenate((corr[:,-Nt//2 + 1:], corr[:,:Nt//2 + 1]),axis=axis)
    else:
        corr = np.concatenate((corr[-Nt//2 + 1:], corr[:Nt//2 + 1]),axis=axis)

 
    tcorr = np.arange(-Nt//2 + 1, Nt//2)
    ind = np.where(np.abs(tcorr) <= maxlag)[0]
    if axis == 1:
        corr = corr[:,ind]
    else:
        corr = corr[ind]

    return corr
示例#7
0
def numpy_normxcorr(templates, stream, pads, *args, **kwargs):
    """
    Compute the normalized cross-correlation using numpy and bottleneck.

    :param templates: 2D Array of templates
    :type templates: np.ndarray
    :param stream: 1D array of continuous data
    :type stream: np.ndarray
    :param pads: List of ints of pad lengths in the same order as templates
    :type pads: list

    :return: np.ndarray of cross-correlations
    :return: np.ndarray channels used
    """
    import bottleneck

    # Generate a template mask
    used_chans = ~np.isnan(templates).any(axis=1)
    # Currently have to use float64 as bottleneck runs into issues with other
    # types: https://github.com/kwgoodman/bottleneck/issues/164
    stream = stream.astype(np.float64)
    templates = templates.astype(np.float64)
    template_length = templates.shape[1]
    stream_length = len(stream)
    assert stream_length > template_length, "Template must be shorter than " \
                                            "stream"
    fftshape = next_fast_len(template_length + stream_length - 1)
    # Set up normalizers
    stream_mean_array = bottleneck.move_mean(
        stream, template_length)[template_length - 1:]
    stream_std_array = bottleneck.move_std(
        stream, template_length)[template_length - 1:]
    # because stream_std_array is in denominator or res, nan all 0s
    stream_std_array[stream_std_array == 0] = np.nan
    # Normalize and flip the templates
    norm = ((templates - templates.mean(axis=-1, keepdims=True)) /
            (templates.std(axis=-1, keepdims=True) * template_length))
    norm_sum = norm.sum(axis=-1, keepdims=True)
    stream_fft = np.fft.rfft(stream, fftshape)
    template_fft = np.fft.rfft(np.flip(norm, axis=-1), fftshape, axis=-1)
    res = np.fft.irfft(template_fft * stream_fft,
                       fftshape)[:, 0:template_length + stream_length - 1]
    res = ((_centered(res,
                      (templates.shape[0], stream_length - template_length +
                       1))) - norm_sum * stream_mean_array) / stream_std_array
    res[np.isnan(res)] = 0.0

    for i, pad in enumerate(pads):
        res[i] = np.append(res[i], np.zeros(pad))[pad:]
    return res.astype(np.float32), used_chans
示例#8
0
def scipy_normxcorr(templates, stream, pads):
    """
    Compute the normalized cross-correlation of multiple templates with data.

    :param templates: 2D Array of templates
    :type templates: np.ndarray
    :param stream: 1D array of continuous data
    :type stream: np.ndarray
    :param pads: List of ints of pad lengths in the same order as templates
    :type pads: list

    :return: np.ndarray of cross-correlations
    :return: np.ndarray channels used
    """
    import bottleneck
    from scipy.signal.signaltools import _centered

    # Generate a template mask
    used_chans = ~np.isnan(templates).any(axis=1)
    # Currently have to use float64 as bottleneck runs into issues with other
    # types: https://github.com/kwgoodman/bottleneck/issues/164
    stream = stream.astype(np.float64)
    templates = templates.astype(np.float64)
    template_length = templates.shape[1]
    stream_length = len(stream)
    fftshape = next_fast_len(template_length + stream_length - 1)
    # Set up normalizers
    stream_mean_array = bottleneck.move_mean(
        stream, template_length)[template_length - 1:]
    stream_std_array = bottleneck.move_std(
        stream, template_length)[template_length - 1:]
    # Normalize and flip the templates
    norm = ((templates - templates.mean(axis=-1, keepdims=True)) /
            (templates.std(axis=-1, keepdims=True) * template_length))
    norm_sum = norm.sum(axis=-1, keepdims=True)
    stream_fft = np.fft.rfft(stream, fftshape)
    template_fft = np.fft.rfft(np.flip(norm, axis=-1), fftshape, axis=-1)
    res = np.fft.irfft(template_fft * stream_fft,
                       fftshape)[:, 0:template_length + stream_length - 1]
    res = ((_centered(res, stream_length - template_length + 1)) -
           norm_sum * stream_mean_array) / stream_std_array
    res[np.isnan(res)] = 0.0
    for i in range(len(pads)):
        res[i] = np.append(res[i], np.zeros(pads[i]))[pads[i]:]
    return res.astype(np.float32), used_chans
示例#9
0
def convolve(f, g, mesh):
    f_ = f.reshape(*mesh)
    g_ = g.reshape(*mesh)
    shape = numpy.maximum(f_.shape, g_.shape)
    min_shape = numpy.array(f_.shape) + numpy.array(g_.shape) - 1

    nqtot = numpy.prod(min_shape)

    fshape = [next_fast_len(d) for d in min_shape]

    finv = numpy.fft.ifftn(f_, s=fshape)
    ginv = numpy.fft.ifftn(g_, s=fshape)
    fginv = finv * ginv
    fq = numpy.fft.fftn(fginv).copy().ravel()
    fq = fq.reshape(fshape)
    fq = fq[:min_shape[0], :min_shape[1], :min_shape[2]]
    fq = fq.reshape(nqtot)
    return fq
示例#10
0
    def __init__(self, psf_wrapper, flat_sky_proj):

        self._psf = psf_wrapper  # type: PSFWrapper
        self._flat_sky_proj = flat_sky_proj

        # Compute an image of the PSF on the current defined flat sky projection
        interpolator = PSFInterpolator(psf_wrapper, flat_sky_proj)
        psf_stamp = interpolator.point_source_image(flat_sky_proj.ra_center,
                                                    flat_sky_proj.dec_center)

        # Crop the kernel at the appropriate radius for this PSF (making sure is divisible by 2)
        kernel_radius_px = int(
            np.ceil(self._psf.kernel_radius / flat_sky_proj.pixel_size))
        pixels_to_keep = kernel_radius_px * 2

        assert pixels_to_keep <= psf_stamp.shape[0] and \
               pixels_to_keep <= psf_stamp.shape[1], \
            "The kernel is too large with respect to the model image. Enlarge your model radius."

        xoff = (psf_stamp.shape[0] - pixels_to_keep) // 2
        yoff = (psf_stamp.shape[1] - pixels_to_keep) // 2

        self._kernel = psf_stamp[yoff:-yoff, xoff:-xoff]

        assert np.isclose(
            self._kernel.sum(), 1.0, rtol=1e-2
        ), "Failed to generate proper kernel normalization: got _kernel.sum() = %f; expected 1.0+-0.01." % self._kernel.sum(
        )

        # Renormalize to exactly 1
        self._kernel = self._kernel / self._kernel.sum()

        self._expected_shape = (flat_sky_proj.npix_height,
                                flat_sky_proj.npix_width)

        s1 = np.array(self._expected_shape)
        s2 = np.array(self._kernel.shape)

        shape = s1 + s2 - 1

        self._fshape = [helper.next_fast_len(int(d)) for d in shape]
        self._fslice = tuple([slice(0, int(sz)) for sz in shape])

        self._psf_fft = rfftn(self._kernel, self._fshape)
示例#11
0
def check_and_phase_shift(trace):
    # print trace
    taper_length = 20.0
    if trace.stats.npts < 4 * taper_length * trace.stats.sampling_rate:
        trace.data = np.zeros(trace.stats.npts)
        return trace

    dt = np.mod(trace.stats.starttime.datetime.microsecond * 1.0e-6,
                trace.stats.delta)
    if (trace.stats.delta - dt) <= np.finfo(float).eps:
        dt = 0
    if dt != 0:
        if dt <= (trace.stats.delta / 2.):
            dt = -dt
#            direction = "left"
        else:
            dt = (trace.stats.delta - dt)


#            direction = "right"
        logging.debug("correcting time by %.6fs" % dt)
        trace.detrend(type="demean")
        trace.detrend(type="simple")
        trace.taper(max_percentage=None, max_length=1.0)

        n = next_fast_len(int(trace.stats.npts))
        FFTdata = scipy.fftpack.fft(trace.data, n=n)
        fftfreq = scipy.fftpack.fftfreq(n, d=trace.stats.delta)
        FFTdata = FFTdata * np.exp(1j * 2. * np.pi * fftfreq * dt)
        FFTdata = FFTdata.astype(np.complex64)
        scipy.fftpack.ifft(FFTdata, n=n, overwrite_x=True)
        trace.data = np.real(FFTdata[:len(trace.data)])
        trace.stats.starttime += dt
        del FFTdata, fftfreq
        clean_scipy_cache()
        return trace
    else:
        return trace
示例#12
0
def fftwconvolve_1d(in1, in2):
    '''
    This code taken from:
    https://stackoverflow.com/questions/32028979/speed-up-for-loop-in-convolution-for-numpy-3d-array
    and the resulting output is the full discrete linear convolution of the 
    inputs (i.e. This returns the convolution at each point of overlap), 
    which includes additional terms at the start and end of the array such 
    that if A has size N and B has size M when covolved the size is N+M-1. 
    At the end-points of the convolution, the signals do not overlap 
    completely, and boundary effects may be seen. 
     
    Args:
        in1 (array): ?
        in2 (array): ?
        
    Returns:
        array (array): Linear convolution of inputs.
    '''

    outlen = in1.shape[-1] + in2.shape[-1] - 1
    origlen = in1.shape[-1]
    n = next_fast_len(outlen)
    tr1 = pyfftw.interfaces.numpy_fft.rfft(in1, n)
    tr2 = pyfftw.interfaces.numpy_fft.rfft(in2, n)
    sh = np.broadcast(tr1, tr2).shape
    dt = np.common_type(tr1, tr2)
    pr = pyfftw.n_byte_align_empty(sh, 16, dt)
    np.multiply(tr1, tr2, out=pr)
    out = pyfftw.interfaces.numpy_fft.irfft(pr, n)

    # Find the central indices of the resulting array
    index_low = int(outlen / 2.) - int(np.floor(origlen / 2))
    index_high = int(outlen / 2.) + int(np.ceil(origlen / 2))

    # Return an array the same length as the input.
    # Boundary effects are still visible and when overlap is not
    # complete zero values are assumed I believe.
    return out[..., index_low:index_high].copy()
示例#13
0
    def test_next_opt_len(self):
        random.seed(1234)

        def nums():
            for j in range(1, 1000):
                yield j
            yield 2*2*2*2*2 * 3*3*3*3*3 * 4*4*4*4*4 + 1

        for n in nums():
            m = next_fast_len(n)
            msg = "n=%d, m=%d" % (n, m)

            assert_(m >= n, msg)

            # check regularity
            k = m
            for d in [2, 3, 5]:
                while True:
                    a, b = divmod(k, d)
                    if b == 0:
                        k = a
                    else:
                        break
            assert_equal(k, 1, err_msg=msg)
示例#14
0
    def test_next_opt_len(self):
        random.seed(1234)

        def nums():
            for j in range(1, 1000):
                yield j
            yield 2**5 * 3**5 * 4**5 + 1

        for n in nums():
            m = next_fast_len(n)
            msg = "n=%d, m=%d" % (n, m)

            assert_(m >= n, msg)

            # check regularity
            k = m
            for d in [2, 3, 5]:
                while True:
                    a, b = divmod(k, d)
                    if b == 0:
                        k = a
                    else:
                        break
            assert_equal(k, 1, err_msg=msg)
示例#15
0
文件: filter.py 项目: ahesford/pycwp
def sgimgcoeffs(img, *args, **kwargs):
	'''
	Given a 3-D image img with shape (nx, ny, nz), use Savitzky-Golay
	stencils from savgol(*args, **kwargs) to compute compute the filtered
	double-precision image coeffs with shape (nx, ny, nz, ns) such that
	coeffs[:,:,:,i] holds the convolution of img with the i-th stencil.

	If the image is of single precision, the filter correlation will be done
	in single-precision; otherwise, double precision will be used.

	The pyfftw module will be used, if available, to accelerate FFT
	correlations. Otherwise, the stock Numpy FFT will be used.
	'''
	# Create the stencils first
	stencils = savgol(*args, **kwargs)
	if not stencils: raise ValueError('Savitzky-Golay stencil list is empty')

	# Make sure the array is in double precision
	img = np.asarray(img)
	if img.ndim != 3: raise ValueError('Image img must be three-dimensional')

	# If possible, find the next-larger efficient size
	try: from scipy.fftpack.helper import next_fast_len
	except ImportError: next_fast_len = lambda x: x

	# Half-sizes of kernels along each axis
	hsizes = tuple(bsz // 2 for bsz in stencils[0].shape)

	# Padded shape for FFT convolution and the R2C FFT output
	pshape = tuple(next_fast_len(isz + 2 * bsz)
			for isz, bsz in zip(img.shape, hsizes))

	if img.dtype == np.dtype('float32'):
		ftype, ctype = np.dtype('float32'), np.dtype('complex64')
	else:
		ftype, ctype = np.dtype('float64'), np.dtype('complex128')

	try:
		import pyfftw
	except ImportError:
		from numpy.fft import rfftn, irfftn
		empty = np.empty
		use_fftw = False
	else:
		# Cache PyFFTW planning for 5 seconds
		empty = pyfftw.empty_aligned
		use_fftw = True

	# Build working and output arrays
	kernel = empty(pshape, dtype=ftype)
	output = empty(img.shape + (len(stencils),), dtype=ftype)

	if use_fftw:
		# Need to create output arrays and plan both FFTs
		krfft = empty(pshape[:-1] + (pshape[-1] // 2 + 1,), dtype=ctype)
		rfftn = pyfftw.FFTW(kernel, krfft, axes=(0, 1, 2))
		irfftn = pyfftw.FFTW(krfft, kernel,
				axes=(0, 1, 2), direction='FFTW_BACKWARD')

	m,n,p = img.shape

	# Copy the image, leaving space for boundaries
	kernel[:,:,:] = 0.
	kernel[:m,:n,:p] = img

	# For right boundaries, watch for running off left end with small arrays
	for ax, (ld, hl)  in enumerate(zip(img.shape, hsizes)):
		# Build the slice for boundary values
		lslices = [slice(None)]*3
		rslices = [slice(None)]*3

		# Left boundaries are straightforward
		lslices[ax] = slice(hl, 0, -1)
		rslices[ax] = slice(-hl, None)
		kernel[rslices] = kernel[lslices]

		# Don't walk off left edge when mirroring right boundary
		hi = ld - 1
		lo = max(hi - hl, 0)
		lslices[ax] = slice(lo, hi)
		rslices[ax] = slice(2 * hi - lo, hi, -1)
		kernel[rslices] = kernel[lslices]

	# Compute the image FFT
	if use_fftw:
		rfftn.execute()
		imfft = krfft.copy()
	else: imfft = rfftn(kernel)

	i,j,k = hsizes
	t,u,v = stencils[0].shape

	for l, stencil in enumerate(stencils):
		# Clear the kernel storage and copy the stencil
		kernel[:,:,:] = 0.
		kernel[:t,:u,:v] = stencil[::-1,::-1,::-1]
		if use_fftw:
			rfftn.execute()
			krfft[:,:,:] *= imfft
			irfftn(normalise_idft=True)
		else: kernel = irfftn(rfftn(kernel) * imfft)
		output[:,:,:,l] = kernel[i:i+m,j:j+n,k:k+p]

	return output
示例#16
0
 def test_next_opt_len_strict(self):
     hams = {
         1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 8, 8: 8, 14: 15, 15: 15,
         16: 16, 17: 18, 1021: 1024, 1536: 1536, 51200000: 51200000,
         510183360: 510183360, 510183360 + 1: 512000000,
         511000000: 512000000,
         854296875: 854296875, 854296875 + 1: 859963392,
         196608000000: 196608000000, 196608000000 + 1: 196830000000,
         8789062500000: 8789062500000, 8789062500000 + 1: 8796093022208,
         206391214080000: 206391214080000,
         206391214080000 + 1: 206624260800000,
         470184984576000: 470184984576000,
         470184984576000 + 1: 470715894135000,
         7222041363087360: 7222041363087360,
         7222041363087360 + 1: 7230196133913600,
         # power of 5    5**23
         11920928955078125: 11920928955078125,
         11920928955078125 - 1: 11920928955078125,
         # power of 3    3**34
         16677181699666569: 16677181699666569,
         16677181699666569 - 1: 16677181699666569,
         # power of 2   2**54
         18014398509481984: 18014398509481984,
         18014398509481984 - 1: 18014398509481984,
         # above this, int(ceil(n)) == int(ceil(n+1))
         19200000000000000: 19200000000000000,
         19200000000000000 + 1: 19221679687500000,
         288230376151711744: 288230376151711744,
         288230376151711744 + 1: 288325195312500000,
         288325195312500000 - 1: 288325195312500000,
         288325195312500000: 288325195312500000,
         288325195312500000 + 1: 288555831593533440,
         # power of 3    3**83
         3990838394187339929534246675572349035227 - 1:
             3990838394187339929534246675572349035227,
         3990838394187339929534246675572349035227:
             3990838394187339929534246675572349035227,
         # power of 2     2**135
         43556142965880123323311949751266331066368 - 1:
             43556142965880123323311949751266331066368,
         43556142965880123323311949751266331066368:
             43556142965880123323311949751266331066368,
         # power of 5      5**57
         6938893903907228377647697925567626953125 - 1:
             6938893903907228377647697925567626953125,
         6938893903907228377647697925567626953125:
             6938893903907228377647697925567626953125,
         # http://www.drdobbs.com/228700538
         # 2**96 * 3**1 * 5**13
         290142196707511001929482240000000000000 - 1:
             290142196707511001929482240000000000000,
         290142196707511001929482240000000000000:
             290142196707511001929482240000000000000,
         290142196707511001929482240000000000000 + 1:
             290237644800000000000000000000000000000,
         # 2**36 * 3**69 * 5**7
         4479571262811807241115438439905203543080960000000 - 1:
             4479571262811807241115438439905203543080960000000,
         4479571262811807241115438439905203543080960000000:
             4479571262811807241115438439905203543080960000000,
         4479571262811807241115438439905203543080960000000 + 1:
             4480327901140333639941336854183943340032000000000,
         # 2**37 * 3**44 * 5**42
         30774090693237851027531250000000000000000000000000000000000000 - 1:
             30774090693237851027531250000000000000000000000000000000000000,
         30774090693237851027531250000000000000000000000000000000000000:
             30774090693237851027531250000000000000000000000000000000000000,
         30774090693237851027531250000000000000000000000000000000000000 + 1:
             30778180617309082445871527002041377406962596539492679680000000,
     }
     for x, y in hams.items():
         assert_equal(next_fast_len(x), y)
示例#17
0
def mwcs(ccCurrent, ccReference, fmin, fmax, sampRate, tmin, windL, step,
         plot=False):
    """...

    :type ccCurrent: :class:`numpy.ndarray`
    :param ccCurrent: The "Current" timeseries
    :type ccReference: :class:`numpy.ndarray`
    :param ccReference: The "Reference" timeseries
    :type fmin: float
    :param fmin: The lower frequency bound to compute the dephasing
    :type fmax: float
    :param fmax: The higher frequency bound to compute the dephasing
    :type sampRate: float
    :param sampRate: The sample rate of the input timeseries
    :type tmin: float
    :param tmin: The leftmost time lag (used to compute the "time lags array")
    :type windL: float
    :param windL: The moving window length
    :type step: float
    :param step: The step to jump for the moving window
    :type plot: bool
    :param plot: If True, plots the MWCS result for each window. Defaults to
        False

    :rtype: :class:`numpy.ndarray`
    :returns: [Taxis,deltaT,deltaErr,deltaMcoh]. Taxis contains the central
        times of the windows. The three other columns contain dt, error and
        mean coherence for each window.
    """

    windL = np.int(windL * sampRate)
    step = np.int(step * sampRate)
    count = 0
    deltaT = []
    deltaErr = []
    deltaMcoh = []
    Taxis = []
    padd = 2 ** (nextpow2(windL) + 2)
    padd = next_fast_len(windL)

    # Tentative checking if enough point are used to compute the FFT
    freqVec = scipy.fftpack.fftfreq(int(padd), 1. / sampRate)[:int(padd) // 2]
    indRange = np.argwhere(np.logical_and(freqVec >= fmin,
                                          freqVec <= fmax))
    if len(indRange) < 2:
        padd = 2 ** (nextpow2(windL) + 3)

    tp = cosine_taper(windL, .85)

    timeaxis = (np.arange(len(ccCurrent)) / float(sampRate)) + tmin
    minind = 0
    maxind = windL
    while maxind <= len(ccCurrent):
        ind = minind

        cci = ccCurrent[ind:(ind + windL)].copy()
        cci = scipy.signal.detrend(cci, type='linear')
        cci -= cci.min()
        cci /= cci.max()
        cci -= np.mean(cci)
        cci *= tp

        cri = ccReference[ind:(ind + windL)].copy()
        cri = scipy.signal.detrend(cri, type='linear')
        cri -= cri.min()
        cri /= cri.max()
        cri -= np.mean(cri)
        cri *= tp

        Fcur = scipy.fftpack.fft(cci, n=int(padd))[:int(padd) // 2]
        Fref = scipy.fftpack.fft(cri, n=int(padd))[:int(padd) // 2]

        Fcur2 = np.real(Fcur) ** 2 + np.imag(Fcur) ** 2
        Fref2 = np.real(Fref) ** 2 + np.imag(Fref) ** 2

        smoother = 5

        dcur = np.sqrt(smooth(Fcur2, window='hanning', half_win=smoother))
        dref = np.sqrt(smooth(Fref2, window='hanning', half_win=smoother))

        # Calculate the cross-spectrum
        X = Fref * (Fcur.conj())
        X = smooth(X, window='hanning', half_win=smoother)
        dcs = np.abs(X)

        # Find the values the frequency range of interest
        freqVec = scipy.fftpack.fftfreq(len(X) * 2, 1. / sampRate)[
                  :int(padd) // 2]
        indRange = np.argwhere(np.logical_and(freqVec >= fmin,
                                              freqVec <= fmax))

        # Get Coherence and its mean value
        coh = getCoherence(dcs, dref, dcur)
        mcoh = np.mean(coh[indRange])

        # Get Weights
        w = 1.0 / (1.0 / (coh[indRange] ** 2) - 1.0)
        w[coh[indRange] >= 0.99] = 1.0 / (1.0 / 0.9801 - 1.0)
        w = np.sqrt(w * np.sqrt(dcs[indRange]))
        # w /= (np.sum(w)/len(w)) #normalize
        w = np.real(w)

        # Frequency array:
        v = np.real(freqVec[indRange]) * 2 * np.pi
        vo = np.real(freqVec) * 2 * np.pi

        # Phase:
        phi = np.angle(X)
        phi[0] = 0.
        phi = np.unwrap(phi)
        # phio = phi.copy()
        phi = phi[indRange]

        # Calculate the slope with a weighted least square linear regression
        # forced through the origin
        # weights for the WLS must be the variance !
        res = sm.regression.linear_model.WLS(phi, v, w ** 2).fit()

        # print "forced", np.real(res.params[0])
        # print "!forced", np.real(res2.params[0])

        m = np.real(res.params[0])
        deltaT.append(m)

        # print phi.shape, v.shape, w.shape
        e = np.sum((phi - m * v) ** 2) / (np.size(v) - 1)
        s2x2 = np.sum(v ** 2 * w ** 2)
        sx2 = np.sum(w * v ** 2)
        e = np.sqrt(e * s2x2 / sx2 ** 2)
        # print w.shape
        if plot:
            plt.figure()
            plt.suptitle('%.1fs' % (timeaxis[ind + windL // 2]))
            plt.subplot(311)
            plt.plot(cci)
            plt.plot(cri)
            ax = plt.subplot(312)
            plt.plot(vo / (2 * np.pi), phio)
            plt.scatter(v / (2 * np.pi), phi, c=w, edgecolor='none',
                        vmin=0.6, vmax=1)
            plt.subplot(313, sharex=ax)
            plt.plot(v / (2 * np.pi), coh[indRange])
            plt.axhline(mcoh, c='r')
            plt.axhline(1.0, c='k', ls='--')
            plt.xlim(-0.1, 1.5)
            plt.ylim(0, 1.5)
            plt.show()

        deltaErr.append(e)
        deltaMcoh.append(np.real(mcoh))
        Taxis.append(timeaxis[ind + windL // 2])
        count += 1

        minind += step
        maxind += step
        del Fcur, Fref
        del X
        del freqVec
        del indRange
        del w, v, e, s2x2
        del res

    if maxind > len(ccCurrent) + step:
        logging.warning("The last window was too small, but was computed")

    return np.array([Taxis, deltaT, deltaErr, deltaMcoh]).T
示例#18
0
def main():
    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s [%(levelname)s] %(message)s',
                        datefmt='%Y-%m-%d %H:%M:%S')

    logging.info('*** Starting: Compute CC ***')

    # Connection to the DB
    db = connect()

    if len(get_filters(db, all=False)) == 0:
        logging.info("NO FILTERS DEFINED, exiting")
        sys.exit()

    # Get Configuration
    params = Params()
    params.goal_sampling_rate = float(get_config(db, "cc_sampling_rate"))
    params.goal_duration = float(get_config(db, "analysis_duration"))
    params.overlap = float(get_config(db, "overlap"))
    params.maxlag = float(get_config(db, "maxlag"))
    params.min30 = float(get_config(db, "corr_duration")) * params.goal_sampling_rate
    params.windsorizing = float(get_config(db, "windsorizing"))
    params.resampling_method = get_config(db, "resampling_method")
    params.decimation_factor = int(get_config(db, "decimation_factor"))
    params.preprocess_lowpass = float(get_config(db, "preprocess_lowpass"))
    params.preprocess_highpass = float(get_config(db, "preprocess_highpass"))
    params.keep_all = get_config(db, 'keep_all', isbool=True)
    params.keep_days = get_config(db, 'keep_days', isbool=True)
    params.components_to_compute = get_components_to_compute(db)

    params.stack_method = get_config(db, 'stack_method')
    params.pws_timegate = float(get_config(db, 'pws_timegate'))
    params.pws_power = float(get_config(db, 'pws_power'))

    logging.info("Will compute %s" % " ".join(params.components_to_compute))

    while is_next_job(db, jobtype='CC'):
        jobs = get_next_job(db, jobtype='CC')
        stations = []
        pairs = []
        refs = []

        for job in jobs:
            refs.append(job.ref)
            pairs.append(job.pair)
            netsta1, netsta2 = job.pair.split(':')
            stations.append(netsta1)
            stations.append(netsta2)
            goal_day = job.day

        stations = np.unique(stations)

        logging.info("New CC Job: %s (%i pairs with %i stations)" %
                     (goal_day, len(pairs), len(stations)))
        jt = time.time()

        xlen = int(params.goal_duration * params.goal_sampling_rate)

        if ''.join(params.components_to_compute).count('R') > 0 or ''.join(params.components_to_compute).count('T') > 0:
            comps = ['Z', 'E', 'N']
            tramef_Z = np.zeros((len(stations), xlen))
            tramef_E = np.zeros((len(stations), xlen))
            tramef_N = np.zeros((len(stations), xlen))
            basetime, tramef_Z, tramef_E, tramef_N = preprocess(db, stations, comps, goal_day, params, tramef_Z, tramef_E, tramef_N)

        else:
            comps = ['Z']
            tramef_Z = np.zeros((len(stations), xlen))
            basetime, tramef_Z = preprocess(db, stations, comps, goal_day, params, tramef_Z)


        # print '##### STREAMS ARE ALL PREPARED AT goal Hz #####'
        dt = 1. / params.goal_sampling_rate

        begins = []
        ends = []
        i = 0
        while i <=  (params.goal_duration - params.min30/params.goal_sampling_rate):
            begins.append(int(i * params.goal_sampling_rate))
            ends.append(int(i * params.goal_sampling_rate + params.min30))
            i += int(params.min30/params.goal_sampling_rate * (1.0-params.overlap))

        # ITERATING OVER PAIRS #####
        for pair in pairs:
            orig_pair = pair

            logging.info('Processing pair: %s' % pair.replace(':', ' vs '))
            tt = time.time()
            station1, station2 = pair.split(':')
            pair = (np.where(stations == station1)
                    [0][0], np.where(stations == station2)[0][0])

            s1 = get_station(db, station1.split('.')[0], station1.split('.')[1])
            s2 = get_station(db, station2.split('.')[0], station2.split('.')[1])

            if s1.X:
                X0 = s1.X
                Y0 = s1.Y
                c0 = s1.coordinates

                X1 = s2.X
                Y1 = s2.Y
                c1 = s2.coordinates

                if c0 == c1:
                    coordinates = c0
                else:
                    coordinates = 'MIX'

                cplAz = np.deg2rad(azimuth(coordinates, X0, Y0, X1, Y1))
                logging.debug("Azimuth=%.1f"%np.rad2deg(cplAz))
            else:
                # logging.debug('No Coordinates found! Skipping azimuth calculation!')
                cplAz = 0.

            for components in params.components_to_compute:

                if components == "ZZ":
                    t1 = tramef_Z[pair[0]]
                    t2 = tramef_Z[pair[1]]
                elif components[0] == "Z":
                    t1 = tramef_Z[pair[0]]
                    t2 = tramef_E[pair[1]]
                elif components[1] == "Z":
                    t1 = tramef_E[pair[0]]
                    t2 = tramef_Z[pair[1]]
                else:
                    t1 = tramef_E[pair[0]]
                    t2 = tramef_E[pair[1]]
                if np.all(t1 == 0) or np.all(t2 == 0):
                    logging.debug("%s contains empty trace(s), skipping"%components)
                    continue
                del t1, t2

                if components[0] == "Z":
                    t1 = tramef_Z[pair[0]]
                elif components[0] == "R":
                    if cplAz != 0:
                        t1 = tramef_N[pair[0]] * np.cos(cplAz) +\
                             tramef_E[pair[0]] * np.sin(cplAz)
                    else:
                        t1 = tramef_E[pair[0]]

                elif components[0] == "T":
                    if cplAz != 0:
                        t1 = tramef_N[pair[0]] * np.sin(cplAz) -\
                             tramef_E[pair[0]] * np.cos(cplAz)
                    else:
                        t1 = tramef_N[pair[0]]

                if components[1] == "Z":
                    t2 = tramef_Z[pair[1]]
                elif components[1] == "R":
                    if cplAz != 0:
                        t2 = tramef_N[pair[1]] * np.cos(cplAz) +\
                             tramef_E[pair[1]] * np.sin(cplAz)
                    else:
                        t2 = tramef_E[pair[1]]
                elif components[1] == "T":
                    if cplAz != 0:
                        t2 = tramef_N[pair[1]] * np.sin(cplAz) -\
                             tramef_E[pair[1]] * np.cos(cplAz)
                    else:
                        t2 = tramef_N[pair[1]]

                trames = np.vstack((t1, t2))
                del t1, t2

                daycorr = {}
                ndaycorr = {}
                allcorr = {}
                for filterdb in get_filters(db, all=False):
                    filterid = filterdb.ref
                    daycorr[filterid] = np.zeros(get_maxlag_samples(db,))
                    ndaycorr[filterid] = 0

                for islice, (begin, end) in enumerate(zip(begins, ends)):
                    trame2h = trames[:, begin:end]
                    nfft = next_fast_len(int(trame2h.shape[1]))
                    rmsmat = np.std(trame2h, axis=1)
                    for filterdb in get_filters(db, all=False):
                        filterid = filterdb.ref
                        low = float(filterdb.low)
                        high = float(filterdb.high)
                        rms_threshold = filterdb.rms_threshold

                        # Nfft = int(params.min30)
                        # if params.min30 / 2 % 2 != 0:
                        #     Nfft = params.min30 + 2

                        trames2hWb = np.zeros((2, int(nfft)), dtype=np.complex)
                        skip = False
                        for i, station in enumerate(pair):
                            if rmsmat[i] > rms_threshold:
                                cp = cosine_taper(len(trame2h[i]),0.04)
                                trame2h[i] -= trame2h[i].mean()


                                trames2hWb[i] = whiten(
                                    trame2h[i]*cp, nfft, dt, low, high, plot=False)


                                if params.windsorizing == -1:
                                    trames2hWb[i] = np.sign(trames2hWb[i])
                                elif params.windsorizing != 0:
                                    indexes = np.where(
                                        np.abs(trames2hWb[i]) > (params.windsorizing * rmsmat[i]))[0]
                                    # clipping at windsorizing*rms
                                    trames2hWb[i][indexes] = (trames2hWb[i][indexes] / np.abs(
                                        trames2hWb[i][indexes])) * params.windsorizing * rmsmat[i]


                            else:
                                trames2hWb[i] = np.zeros(int(nfft))
                                skip = True
                                logging.debug('Slice RMS is smaller (%e) than rms_threshold (%e)!'
                                              % (rmsmat[i], rms_threshold))
                        if not skip:
                            corr = myCorr(trames2hWb, np.ceil(params.maxlag / dt), plot=False, nfft=nfft)
                            tmptime = time.gmtime(basetime + begin /
                                                  params.goal_sampling_rate)
                            thisdate = time.strftime("%Y-%m-%d", tmptime)
                            thistime = time.strftime("%Y-%m-%d %H:%M:%S",
                                                     tmptime)
                            if params.keep_all or params.keep_days:
                                ccfid = "%s_%s_%s_%s_%s" % (station1, station2,
                                                         filterid, components,
                                                         thisdate)
                                if ccfid not in allcorr:
                                    allcorr[ccfid] = {}
                                allcorr[ccfid][thistime] = corr

                            if params.keep_days:
                                if not np.any(np.isnan(corr)) and \
                                        not np.any(np.isinf(corr)):
                                    daycorr[filterid] += corr
                                    ndaycorr[filterid] += 1

                            del corr, thistime, trames2hWb

                if params.keep_all:
                    for ccfid in allcorr.keys():
                        export_allcorr(db, ccfid, allcorr[ccfid])

                if params.keep_days:
                    for ccfid in allcorr.keys():
                        station1, station2, filterid, components, date = ccfid.split('_')

                        corrs = np.asarray(list(allcorr[ccfid].values()))
                        corr = stack(db, corrs)

                        thisdate = time.strftime(
                                    "%Y-%m-%d", time.gmtime(basetime))
                        thistime = time.strftime(
                                    "%H_%M", time.gmtime(basetime))
                        add_corr(
                                db, station1.replace('.', '_'),
                                station2.replace('.', '_'), int(filterid),
                                thisdate, thistime,  params.min30 /
                                params.goal_sampling_rate,
                                components, corr,
                                params.goal_sampling_rate, day=True,
                                ncorr=corrs.shape[0])
                del trames, daycorr, ndaycorr
            logging.debug("Updating Job")
            update_job(db, goal_day, orig_pair, 'CC', 'D')

            logging.info("Finished processing this pair. It took %.2f seconds" % (time.time() - tt))
        logging.info("Job Finished. It took %.2f seconds" % (time.time() - jt))
    logging.info('*** Finished: Compute CC ***')
示例#19
0
def gen_filt_scale_rate(scale_ctr, rate_ctr, scale_params, rate_params, filt_dir,filt_out_domain='sr'):
    """
    This function generates a 2D-impulse response in the time-frequency
    domain with dilation factors S and R:
    The impulse response is denotes by h(omega, tau; S, R), where
    omega: frequency, tau: time,
    S: filter center on the scale axis, R: filter center on the rate axis

    Inputs:
    scale_ctr: filter center along the scale axis
    rate_ctr: filter center along the rate axis
    scale_params: dictionary containing the parameters of the spectral filter, including
                  scale_filt_len: length of the spectral filter impulse response
                  samprate_spec: sample rate along the freq. axis (cyc/oct)
                  type: string argument indicating the filter type
                        ('bandpass','lowpass','highpass')

    Example: scale_params = {'scale_filt_len':100,'samprate_spec':12,'type':'bandpass'}

    rate_params: dictionary containing the parameters of the temporal filter, including
                 time_const: exponent coefficient of the exponential term
                 rate_filt_len: length of the temporal filter impulse response
                 samprate_temp: sample rate along the time axis (cyc/sec)
                 type: string argument indicating the type of the filter
                      ('bandpass','lowpass','highpass')

    Example: rate_params = {'time_cons':1, rate_filt_len:200, samprate_temp:20, type:'lowpass'}

    filt_type: string type, determines the moving direction of the filter
              'none' (full s-r domain)
              'up' (upward analytic, nonzero over upper left (2nd) and lower right (4th) quadrants)
              'down' (downward analytic, nonzero over upper right (1st) and lower left (3rd) quadrants)

    filt_out_domain: string indicating the representatio domain of the filter
                     'sr' (default): filter representation in the scale-rate domain
                     'tf': filter representation in the time-domain, involves an extra 2DIFT computation
                     'all': return filter represntations in both domains

    Output:
    filt_2d_out: numpy array containing the 2D filter represented in scale-rate or time-frequency domain
                 or list of length 2 containing filter representations in both domains

    Author: Fatemeh Pishdadian ([email protected])
    """

    ### extract filter parameters

    # scale filter
    scale_filt_len = scale_params['scale_filt_len']
    samprate_spec = scale_params['samprate_spec']
    scale_filt_type = scale_params['type']

    # rate filter
    beta = rate_params['time_const']
    rate_filt_len = rate_params['rate_filt_len']
    samprate_temp = rate_params['samprate_temp']
    rate_filt_type = rate_params['type']

    ### frequency and time vectors

    # zero-pad filters to the next 5-smooth number
    scale_filt_len = helper.next_fast_len(scale_filt_len) # scale_filt_len + np.mod(scale_filt_len, 2)
    rate_filt_len = helper.next_fast_len(rate_filt_len) # rate_filt_len + np.mod(rate_filt_len, 2)

    # generate frequency and time vectors
    freq_vec = np.arange(scale_filt_len,dtype='float64')/samprate_spec
    time_vec = np.arange(rate_filt_len,dtype='float64')/samprate_temp


    ### impulse response of the original scale filter: Gaussian
    scale_filt = scale_ctr * (1 - 2 * (scale_ctr * np.pi * freq_vec)**2) * np.exp(-((scale_ctr * freq_vec * np.pi)**2))
    # make it even so the transform is real
    scale_filt = np.append(scale_filt[0:int(scale_filt_len/2)+1],scale_filt[int(scale_filt_len/2)-1:0:-1])

    ### impulse response of the original rate filter
    rate_filt = rate_ctr * (rate_ctr*time_vec)**2 * np.exp(-time_vec * beta * rate_ctr) * np.sin(2 * np.pi * rate_ctr * time_vec)
    # remove the DC element
    rate_filt = rate_filt - np.mean(rate_filt)
    # if the magnitude of dc element is set to zero by subtracting the mean of hr, make sure the phase is
    # also set to zero to avoid any computational error
    if np.abs(np.mean(rate_filt)) < 1e-16:
        correct_rate_phase = 1
    else:
        correct_rate_phase = 0

    ### scale response (Fourier transform of the scale impulse response)

    # bandpass scale filter
    scale_filt_fft = np.abs(np.fft.fft(scale_filt,n=scale_filt_len)).astype('complex128') # discard negligible imaginary parts

    # low/high-pass scale filter
    if scale_filt_type is not 'bandpass':
        scale_filt_fft_1 = scale_filt_fft[0:int(scale_filt_len/2)+1]
        scale_filt_fft_1 /= np.max(scale_filt_fft_1)
        max_idx_1 = np.squeeze(np.argwhere(scale_filt_fft_1 == np.max(scale_filt_fft_1)))

        scale_filt_fft_2 = scale_filt_fft[int(scale_filt_len/2)+1::]
        scale_filt_fft_2 /= np.max(scale_filt_fft_2)
        max_idx_2 = np.squeeze(np.argwhere(scale_filt_fft_2 == np.max(scale_filt_fft_2)))


        if scale_filt_type is 'lowpass':
            scale_filt_fft_1[0:max_idx_1] = 1
            scale_filt_fft_2[max_idx_2+1::] = 1

        elif scale_filt_type is 'highpass':
            scale_filt_fft_1[max_idx_1+1::] = 1
            scale_filt_fft_2[0:max_idx_2] = 1

        # form the full magnitude spectrum
        scale_filt_fft = np.append(scale_filt_fft_1, scale_filt_fft_2)



    ### rate response (Fourier transform of the rate impulse response)

    # band-pass rate filter
    rate_filt_fft = np.fft.fft(rate_filt, n=rate_filt_len) # rate response is complex

    # low/high-pass rate filter
    if rate_filt_type is not 'bandpass':
        rate_filt_phase = np.unwrap(np.angle(rate_filt_fft))
        if correct_rate_phase:
            rate_filt_phase[0] = 0
        rate_filt_mag = np.abs(rate_filt_fft)

        rate_filt_mag_1 = rate_filt_mag[0:int(rate_filt_len/2)+1]
        rate_filt_mag_1 /= np.max(rate_filt_mag_1)
        max_idx_1 = np.squeeze(np.argwhere(rate_filt_mag_1 == np.max(rate_filt_mag_1)))

        rate_filt_mag_2 = rate_filt_mag[int(rate_filt_len/2)+1::]
        rate_filt_mag_2 /= np.max(rate_filt_mag_2)
        max_idx_2 = np.squeeze(np.argwhere(rate_filt_mag_2 == np.max(rate_filt_mag_2)))

        if rate_filt_type is 'lowpass':
            rate_filt_mag_1[0:max_idx_1] = 1
            rate_filt_mag_2[max_idx_2+1::] = 1

        elif rate_filt_type is 'highpass':
            rate_filt_mag_1[max_idx_1+1::] = 1
            rate_filt_mag_2[0:max_idx_2+1] = 1

        # form the full magnitude spectrum
        rate_filt_mag = np.append(rate_filt_mag_1,rate_filt_mag_2)
        # form the full Fourier transform
        rate_filt_fft = rate_filt_mag * np.exp(1j * rate_filt_phase)


    ### full scale-rate impulse and transform responses

    # filt_sr_full is quadrant separable
    scale_filt_fft = np.expand_dims(scale_filt_fft,axis=1)
    rate_filt_fft = np.expand_dims(rate_filt_fft,axis=0)

    filt_sr_full = np.matmul(scale_filt_fft, rate_filt_fft)


    # normalize the filter magnitude
    filt_sr_full_mag = np.abs(filt_sr_full)
    filt_sr_full_mag /= np.max(filt_sr_full_mag)

    filt_sr_full_phase = np.angle(filt_sr_full)
    filt_sr_full = filt_sr_full_mag * np.exp(1j * filt_sr_full_phase)


    # upward or downward direction
    if filt_dir is 'up':
        # compute the upward version of the scale-rate response
        filt_sr_up = filt_sr_full
        filt_sr_up[1:int(scale_filt_len/2)+1, 1:int(rate_filt_len/2)+1] = 0
        filt_sr_up[int(scale_filt_len/2)+1::,int(rate_filt_len/2)+1::] = 0
        filt_sr_domain = filt_sr_up

    elif filt_dir is 'down':
        # compute the downward version of the scale-rate response
        filt_sr_down = filt_sr_full
        filt_sr_down[1:int(scale_filt_len/2)+1,int(rate_filt_len/2)+1::] = 0
        filt_sr_down[int(scale_filt_len/2)+1::,1:int(rate_filt_len/2)+1] = 0
        filt_sr_domain = filt_sr_down

    else:
        filt_sr_domain = filt_sr_full


    if filt_out_domain is 'sr':
        filt_out = filt_sr_domain
    else:
        filt_tf_domain = np.fft.ifft2(filt_sr_domain)

        if np.max(np.imag(filt_tf_domain)) < 1e-8:
            filt_tf_domain = np.real(filt_tf_domain)

        filt_out = filt_tf_domain

        if filt_out_domain is 'all':
            filt_out = [filt_out, filt_sr_domain]

    return filt_out
示例#20
0
def fftw_multi_normxcorr(template_array, stream_array, pad_array, seed_ids,
                         cores_inner, cores_outer):
    """
    Use a C loop rather than a Python loop - in some cases this will be fast.

    :type template_array: dict
    :param template_array:
    :type stream_array: dict
    :param stream_array:
    :type pad_array: dict
    :param pad_array:
    :type seed_ids: list
    :param seed_ids:

    rtype: np.ndarray, list
    :return: 3D Array of cross-correlations and list of used channels.
    """
    utilslib = _load_cdll('libutils')

    utilslib.multi_normxcorr_fftw.argtypes = [
        np.ctypeslib.ndpointer(dtype=np.float32,
                               flags=native_str('C_CONTIGUOUS')),
        ctypes.c_long, ctypes.c_long, ctypes.c_long,
        np.ctypeslib.ndpointer(dtype=np.float32,
                               flags=native_str('C_CONTIGUOUS')),
        ctypes.c_long,
        np.ctypeslib.ndpointer(dtype=np.float32,
                               flags=native_str('C_CONTIGUOUS')),
        ctypes.c_long,
        np.ctypeslib.ndpointer(dtype=np.intc,
                               flags=native_str('C_CONTIGUOUS')),
        np.ctypeslib.ndpointer(dtype=np.intc,
                               flags=native_str('C_CONTIGUOUS')), ctypes.c_int,
        ctypes.c_int,
        np.ctypeslib.ndpointer(dtype=np.intc, flags=native_str('C_CONTIGUOUS'))
    ]
    utilslib.multi_normxcorr_fftw.restype = ctypes.c_int
    '''
    Arguments are:
        templates (stacked [ch_1-t_1, ch_1-t_2, ..., ch_2-t_1, ch_2-t_2, ...])
        number of templates
        template length
        number of channels
        image (stacked [ch_1, ch_2, ..., ch_n])
        image length
        cross-correlations (stacked as per image)
        fft-length
        used channels (stacked as per templates)
        pad array (stacked as per templates)
    '''

    # pre processing
    used_chans = []
    template_len = template_array[seed_ids[0]].shape[1]
    for seed_id in seed_ids:
        used_chans.append(~np.isnan(template_array[seed_id]).any(axis=1))
        template_array[seed_id] = (
            (template_array[seed_id] -
             template_array[seed_id].mean(axis=-1, keepdims=True)) /
            (template_array[seed_id].std(axis=-1, keepdims=True) *
             template_len))
        template_array[seed_id] = np.nan_to_num(template_array[seed_id])
    n_channels = len(seed_ids)
    n_templates = template_array[seed_ids[0]].shape[0]
    image_len = stream_array[seed_ids[0]].shape[0]
    fft_len = next_fast_len(template_len + image_len - 1)
    template_array = np.ascontiguousarray(
        [template_array[x] for x in seed_ids], dtype=np.float32)
    stream_array = np.ascontiguousarray([stream_array[x] for x in seed_ids],
                                        dtype=np.float32)
    cccs = np.zeros((n_templates, image_len - template_len + 1), np.float32)
    used_chans_np = np.ascontiguousarray(used_chans, dtype=np.intc)
    pad_array_np = np.ascontiguousarray(
        [pad_array[seed_id] for seed_id in seed_ids], dtype=np.intc)
    variance_warnings = np.ascontiguousarray(np.zeros(n_channels),
                                             dtype=np.intc)

    # call C function
    ret = utilslib.multi_normxcorr_fftw(template_array, n_templates,
                                        template_len, n_channels, stream_array,
                                        image_len, cccs, fft_len,
                                        used_chans_np, pad_array_np,
                                        cores_outer, cores_inner,
                                        variance_warnings)
    if ret < 0:
        raise MemoryError("Memory allocation failed in correlation C-code")
    elif ret not in [0, 999]:
        print('Error in C code (possible normalisation error)')
        print('Maximum cccs %f at %s' %
              (cccs.max(), np.unravel_index(cccs.argmax(), cccs.shape)))
        print('Minimum cccs %f at %s' %
              (cccs.min(), np.unravel_index(cccs.argmin(), cccs.shape)))
        raise CorrelationError("Internal correlation error")
    elif ret == 999:
        warnings.warn("Some correlations not computed, are there "
                      "zeros in data? If not, consider increasing gain.")
    for i, variance_warning in enumerate(variance_warnings):
        if variance_warning and variance_warning > template_len:
            warnings.warn("Low variance found in {0} places for {1},"
                          " check result.".format(variance_warning,
                                                  seed_ids[i]))

    return cccs, used_chans
示例#21
0
def fftw_multi_normxcorr(template_array,
                         stream_array,
                         pad_array,
                         seed_ids,
                         cores_inner,
                         stack=True,
                         *args,
                         **kwargs):
    """
    Use a C loop rather than a Python loop - in some cases this will be fast.

    :type template_array: dict
    :param template_array:
    :type stream_array: dict
    :param stream_array:
    :type pad_array: dict
    :param pad_array:
    :type seed_ids: list
    :param seed_ids:

    rtype: np.ndarray, list
    :return: 3D Array of cross-correlations and list of used channels.
    """
    utilslib = _load_cdll('libutils')

    utilslib.multi_normxcorr_fftw.argtypes = [
        np.ctypeslib.ndpointer(dtype=np.float32,
                               flags=native_str('C_CONTIGUOUS')),
        ctypes.c_long, ctypes.c_long, ctypes.c_long,
        np.ctypeslib.ndpointer(dtype=np.float32,
                               flags=native_str('C_CONTIGUOUS')),
        ctypes.c_long,
        np.ctypeslib.ndpointer(dtype=np.float32,
                               flags=native_str('C_CONTIGUOUS')),
        ctypes.c_long,
        np.ctypeslib.ndpointer(dtype=np.intc,
                               flags=native_str('C_CONTIGUOUS')),
        np.ctypeslib.ndpointer(dtype=np.intc,
                               flags=native_str('C_CONTIGUOUS')), ctypes.c_int,
        np.ctypeslib.ndpointer(dtype=np.intc,
                               flags=native_str('C_CONTIGUOUS')),
        np.ctypeslib.ndpointer(dtype=np.intc,
                               flags=native_str('C_CONTIGUOUS')), ctypes.c_int
    ]
    utilslib.multi_normxcorr_fftw.restype = ctypes.c_int
    '''
    Arguments are:
        templates (stacked [ch_1-t_1, ch_1-t_2, ..., ch_2-t_1, ch_2-t_2, ...])
        number of templates
        template length
        number of channels
        image (stacked [ch_1, ch_2, ..., ch_n])
        image length
        cross-correlations (stacked as per image)
        fft-length
        used channels (stacked as per templates)
        pad array (stacked as per templates)
        num thread inner
        variance warnings
        missed correlation warnings (usually due to gaps)
        stack option
    '''

    # pre processing
    used_chans = []
    template_len = template_array[seed_ids[0]].shape[1]
    for seed_id in seed_ids:
        used_chans.append(~np.isnan(template_array[seed_id]).any(axis=1))
        template_array[seed_id] = (
            (template_array[seed_id] -
             template_array[seed_id].mean(axis=-1, keepdims=True)) /
            (template_array[seed_id].std(axis=-1, keepdims=True) *
             template_len))
        template_array[seed_id] = np.nan_to_num(template_array[seed_id])
    n_channels = len(seed_ids)
    n_templates = template_array[seed_ids[0]].shape[0]
    image_len = stream_array[seed_ids[0]].shape[0]
    fft_len = kwargs.get("fft_len")
    if fft_len is None:
        # In testing, 2**13 consistently comes out fastest - setting to
        # default. https://github.com/eqcorrscan/EQcorrscan/pull/285
        fft_len = min(2**13, next_fast_len(template_len + image_len - 1))
    if fft_len < template_len:
        Logger.warning(
            "FFT length of {0} is shorter than the template, setting to "
            "{1}".format(fft_len, next_fast_len(template_len + image_len - 1)))
        fft_len = next_fast_len(template_len + image_len - 1)
    template_array = np.ascontiguousarray(
        [template_array[x] for x in seed_ids], dtype=np.float32)
    multipliers = {}
    for x in seed_ids:
        # Check that stream is non-zero and above variance threshold
        if not np.all(stream_array[x] == 0) and np.var(stream_array[x]) < 1e-8:
            # Apply gain
            stream_array[x] *= MULTIPLIER
            Logger.warning("Low variance found for {0}, applying gain "
                           "to stabilise correlations".format(x))
            multipliers.update({x: MULTIPLIER})
        else:
            multipliers.update({x: 1})
    stream_array = np.ascontiguousarray([stream_array[x] for x in seed_ids],
                                        dtype=np.float32)
    ccc_length = image_len - template_len + 1
    assert ccc_length > 0, "Template must be shorter than stream"
    if stack:
        cccs = np.zeros((n_templates, ccc_length), np.float32)
    else:
        cccs = np.zeros((n_templates, n_channels, ccc_length),
                        dtype=np.float32)
    used_chans_np = np.ascontiguousarray(used_chans, dtype=np.intc)
    pad_array_np = np.ascontiguousarray(
        [pad_array[seed_id] for seed_id in seed_ids], dtype=np.intc)
    variance_warnings = np.ascontiguousarray(np.zeros(n_channels),
                                             dtype=np.intc)
    missed_correlations = np.ascontiguousarray(np.zeros(n_channels),
                                               dtype=np.intc)

    # call C function
    ret = utilslib.multi_normxcorr_fftw(template_array, n_templates,
                                        template_len, n_channels, stream_array,
                                        image_len, cccs, fft_len,
                                        used_chans_np, pad_array_np,
                                        cores_inner, variance_warnings,
                                        missed_correlations, int(stack))
    if ret < 0:
        raise MemoryError("Memory allocation failed in correlation C-code")
    elif ret > 0:
        Logger.critical('Error in C code (possible normalisation error)')
        Logger.critical(
            'Maximum cccs %f at %s' %
            (cccs.max(), np.unravel_index(cccs.argmax(), cccs.shape)))
        Logger.critical(
            'Minimum cccs %f at %s' %
            (cccs.min(), np.unravel_index(cccs.argmin(), cccs.shape)))
        Logger.critical('Recommend checking your data for spikes, clipping '
                        'or artefacts')
        raise CorrelationError("Internal correlation error")
    for i, missed_corr in enumerate(missed_correlations):
        if missed_corr:
            Logger.debug(
                "{0} correlations not computed on {1}, are there gaps in the "
                "data? If not, consider increasing gain".format(
                    missed_corr, seed_ids[i]))
    for i, variance_warning in enumerate(variance_warnings):
        if variance_warning and variance_warning > template_len:
            Logger.warning("Low variance found in {0} places for {1}, check "
                           "result.".format(variance_warning, seed_ids[i]))
    # Remove gain
    for i, x in enumerate(seed_ids):
        stream_array[i] *= multipliers[x]
    return cccs, used_chans
示例#22
0
def fftw_multi_normxcorr(template_array, stream_array, pad_array, seed_ids):
    """
    Use a C loop rather than a Python loop - in some cases this will be fast.

    :type template_array: dict
    :param template_array:
    :type stream_array: dict
    :param stream_array:
    :type pad_array: dict
    :param pad_array:
    :type seed_ids: list
    :param seed_ids:

    rtype: np.ndarray, list
    :return: 3D Array of cross-correlations and list of used channels.
    """
    utilslib = _load_cdll('libutils')

    utilslib.multi_normxcorr_fftw.argtypes = [
        np.ctypeslib.ndpointer(dtype=np.float32,
                               flags=native_str('C_CONTIGUOUS')),
        ctypes.c_int, ctypes.c_int, ctypes.c_int,
        np.ctypeslib.ndpointer(dtype=np.float32,
                               flags=native_str('C_CONTIGUOUS')),
        ctypes.c_int,
        np.ctypeslib.ndpointer(dtype=np.float32,
                               flags=native_str('C_CONTIGUOUS')),
        ctypes.c_int,
        np.ctypeslib.ndpointer(dtype=np.intc,
                               flags=native_str('C_CONTIGUOUS')),
        np.ctypeslib.ndpointer(dtype=np.intc,
                               flags=native_str('C_CONTIGUOUS'))]
    utilslib.multi_normxcorr_fftw.restype = ctypes.c_int
    '''
    Arguments are:
        templates (stacked [ch_1-t_1, ch_1-t_2, ..., ch_2-t_1, ch_2-t_2, ...])
        number of templates
        template length
        number of channels
        image (stacked [ch_1, ch_2, ..., ch_n])
        image length
        cross-correlations (stacked as per image)
        fft-length
        used channels (stacked as per templates)
        pad array (stacked as per templates)
    '''
    # pre processing
    used_chans = []
    template_len = template_array[seed_ids[0]].shape[1]
    for seed_id in seed_ids:
        used_chans.append(~np.isnan(template_array[seed_id]).any(axis=1))
        template_array[seed_id] = (
            (template_array[seed_id] -
             template_array[seed_id].mean(axis=-1, keepdims=True)) / (
                template_array[seed_id].std(axis=-1, keepdims=True) *
                template_len))
        template_array[seed_id] = np.nan_to_num(template_array[seed_id])
    n_channels = len(seed_ids)
    n_templates = template_array[seed_ids[0]].shape[0]
    image_len = stream_array[seed_ids[0]].shape[0]
    fft_len = next_fast_len(template_len + image_len - 1)
    template_array = np.ascontiguousarray([template_array[x]
                                           for x in seed_ids],
                                          dtype=np.float32)
    stream_array = np.ascontiguousarray([stream_array[x] for x in seed_ids],
                                        dtype=np.float32)
    cccs = np.zeros((n_templates, image_len - template_len + 1),
                    np.float32)
    used_chans_np = np.ascontiguousarray(used_chans, dtype=np.intc)
    pad_array_np = np.ascontiguousarray([pad_array[seed_id]
                                         for seed_id in seed_ids],
                                        dtype=np.intc)

    # call C function
    ret = utilslib.multi_normxcorr_fftw(
        template_array, n_templates, template_len, n_channels, stream_array,
        image_len, cccs, fft_len, used_chans_np, pad_array_np)
    if ret < 0:
        raise MemoryError()
    elif ret > 0:
        print('Error in C code (possible normalisation error)')
        print(cccs.max())
        print(cccs.min())
        raise MemoryError()

    return cccs, used_chans
示例#23
0
def fftw_normxcorr(templates, stream, pads, threaded=False, *args, **kwargs):
    """
    Normalised cross-correlation using the fftw library.

    Internally this function used double precision numbers, which is definitely
    required for seismic data. Cross-correlations are computed as the
    inverse fft of the dot product of the ffts of the stream and the reversed,
    normalised, templates.  The cross-correlation is then normalised using the
    running mean and standard deviation (not using the N-1 correction) of the
    stream and the sums of the normalised templates.

    This python fucntion wraps the C-library written by C. Chamberlain for this
    purpose.

    :param templates: 2D Array of templates
    :type templates: np.ndarray
    :param stream: 1D array of continuous data
    :type stream: np.ndarray
    :param pads: List of ints of pad lengths in the same order as templates
    :type pads: list
    :param threaded:
        Whether to use the threaded routine or not - note openMP and python
        multiprocessing don't seem to play nice for this.
    :type threaded: bool

    :return: np.ndarray of cross-correlations
    :return: np.ndarray channels used
    """
    utilslib = _load_cdll('libutils')

    argtypes = [
        np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,
                               flags=native_str('C_CONTIGUOUS')),
        ctypes.c_int, ctypes.c_int,
        np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,
                               flags=native_str('C_CONTIGUOUS')),
        ctypes.c_int,
        np.ctypeslib.ndpointer(dtype=np.float32,
                               flags=native_str('C_CONTIGUOUS')),
        ctypes.c_int,
        np.ctypeslib.ndpointer(dtype=np.intc,
                               flags=native_str('C_CONTIGUOUS')),
        np.ctypeslib.ndpointer(dtype=np.intc,
                               flags=native_str('C_CONTIGUOUS'))]
    restype = ctypes.c_int

    if threaded:
        func = utilslib.normxcorr_fftw_threaded
    else:
        func = utilslib.normxcorr_fftw

    func.argtypes = argtypes
    func.restype = restype

    # Generate a template mask
    used_chans = ~np.isnan(templates).any(axis=1)
    template_length = templates.shape[1]
    stream_length = len(stream)
    n_templates = templates.shape[0]
    fftshape = next_fast_len(template_length + stream_length - 1)

    # # Normalize and flip the templates
    norm = ((templates - templates.mean(axis=-1, keepdims=True)) / (
        templates.std(axis=-1, keepdims=True) * template_length))

    norm = np.nan_to_num(norm)
    ccc = np.zeros((n_templates, stream_length - template_length + 1),
                   np.float32)
    used_chans_np = np.ascontiguousarray(used_chans, dtype=np.intc)
    pads_np = np.ascontiguousarray(pads, dtype=np.intc)

    ret = func(
        np.ascontiguousarray(norm.flatten(order='C'), np.float32),
        template_length, n_templates,
        np.ascontiguousarray(stream, np.float32), stream_length,
        np.ascontiguousarray(ccc, np.float32), fftshape,
        used_chans_np, pads_np)
    if ret != 0:
        print(ret)
        raise MemoryError()

    return ccc, used_chans
示例#24
0
 def test_next_opt_len_strict(self):
     hams = {
         1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 8, 8: 8, 14: 15, 15: 15,
         16: 16, 17: 18, 1021: 1024, 1536: 1536, 51200000: 51200000,
         510183360: 510183360, 510183360 + 1: 512000000,
         511000000: 512000000,
         854296875: 854296875, 854296875 + 1: 859963392,
         196608000000: 196608000000, 196608000000 + 1: 196830000000,
         8789062500000: 8789062500000, 8789062500000 + 1: 8796093022208,
         206391214080000: 206391214080000,
         206391214080000 + 1: 206624260800000,
         470184984576000: 470184984576000,
         470184984576000 + 1: 470715894135000,
         7222041363087360: 7222041363087360,
         7222041363087360 + 1: 7230196133913600,
         # power of 5    5**23
         11920928955078125: 11920928955078125,
         11920928955078125 - 1: 11920928955078125,
         # power of 3    3**34
         16677181699666569: 16677181699666569,
         16677181699666569 - 1: 16677181699666569,
         # power of 2   2**54
         18014398509481984: 18014398509481984,
         18014398509481984 - 1: 18014398509481984,
         # above this, int(ceil(n)) == int(ceil(n+1))
         19200000000000000: 19200000000000000,
         19200000000000000 + 1: 19221679687500000,
         288230376151711744: 288230376151711744,
         288230376151711744 + 1: 288325195312500000,
         288325195312500000 - 1: 288325195312500000,
         288325195312500000: 288325195312500000,
         288325195312500000 + 1: 288555831593533440,
         # power of 3    3**83
         3990838394187339929534246675572349035227 - 1:
             3990838394187339929534246675572349035227,
         3990838394187339929534246675572349035227:
             3990838394187339929534246675572349035227,
         # power of 2     2**135
         43556142965880123323311949751266331066368 - 1:
             43556142965880123323311949751266331066368,
         43556142965880123323311949751266331066368:
             43556142965880123323311949751266331066368,
         # power of 5      5**57
         6938893903907228377647697925567626953125 - 1:
             6938893903907228377647697925567626953125,
         6938893903907228377647697925567626953125:
             6938893903907228377647697925567626953125,
         # http://www.drdobbs.com/228700538
         # 2**96 * 3**1 * 5**13
         290142196707511001929482240000000000000 - 1:
             290142196707511001929482240000000000000,
         290142196707511001929482240000000000000:
             290142196707511001929482240000000000000,
         290142196707511001929482240000000000000 + 1:
             290237644800000000000000000000000000000,
         # 2**36 * 3**69 * 5**7
         4479571262811807241115438439905203543080960000000 - 1:
             4479571262811807241115438439905203543080960000000,
         4479571262811807241115438439905203543080960000000:
             4479571262811807241115438439905203543080960000000,
         4479571262811807241115438439905203543080960000000 + 1:
             4480327901140333639941336854183943340032000000000,
         # 2**37 * 3**44 * 5**42
         30774090693237851027531250000000000000000000000000000000000000 - 1:
             30774090693237851027531250000000000000000000000000000000000000,
         30774090693237851027531250000000000000000000000000000000000000:
             30774090693237851027531250000000000000000000000000000000000000,
         30774090693237851027531250000000000000000000000000000000000000 + 1:
             30778180617309082445871527002041377406962596539492679680000000,
     }
     for x, y in hams.items():
         assert_equal(next_fast_len(x), y)
示例#25
0
def istft(y, win, nfft=None, ifftw=None, return_plan=False):
    '''
	From y, a short-time Fourier transform as a 2-D array with time-frame
	indices along the first axis and frequency-bin indices along the
	second, synthesize the corresponding signal x.

	The STFT window must be a 1-D Numpy array or compatible sequence and
	should match the forward-transform window. The hop length is always
	unity, so adjacent time frames overlap by len(win) - 1 samples.

	If nfft, the length of the DFT, is None, a default value that is the
	smallest regular number at least as large as len(win) will be used.
	Otherwise, the value of nfft will be used as specified; a ValueError
	will be raised when nfft is not at least as large as len(win).

	If y.shape[1] is equal to nfft, a C2C FFT is assumed; if y.shape[1] is
	equal to (nfft // 2) + 1, an R2C FFT is assumed; otherwise, a
	ValueError will be raised.

	If ifftw is not None and the PyFFTW module is available, it should be a
	pyfftw.FFTW object that will be used to perform an inverse transform of
	shape (len(x) - len(win) + 1, nfft). If ifftw is compatible (which
	means that the input shapes are compatible and the transform occurs
	along axis 1), the DFT is computed by calling ifftw(y). Note that this
	will overwrite the input of the FFTW object.

	If ifftw is not compatible with the input, ifftw is None or the PyFFTW
	module cannot be imported, a new inverse FFT will be planned (if pyfftw
	is available) or numpy.fft will be used instead.

	If return_plan is True, the return value will be the signal and the
	PyFFTW plan used for the inverse Fourier transform (this is useful for
	repeated calls with different inputs), or None if PyFFTW was not used.
	If return_plan is False, the return value is the synthesized signal.
	'''
    y = np.asarray(y).squeeze()
    win = np.asarray(win).squeeze()

    if y.ndim != 2 or win.ndim != 1:
        raise ValueError(
            'Input x must be 2-D, win must be 1-D (or compatible)')

    lwin = len(win)

    if nfft is None:
        try:
            from scipy.fftpack.helper import next_fast_len
        except ImportError:
            nfft = lwin
        else:
            nfft = next_fast_len(lwin)

    if nfft < lwin:
        raise ValueError('Value of nfft must be no smaller than len(win)')

    if y.shape[1] == nfft: r2c = False
    elif y.shape[1] == nfft // 2 + 1: r2c = True
    else:
        raise ValueError('Last axis of y incompatible with nfft specification')

    # Find the input data type
    ifftname = 'irfft' if r2c else 'ifft'

    try:
        import pyfftw
    except ImportError:
        use_pyfftw = False
    else:
        use_pyfftw = True

    if not use_pyfftw:
        # Use Numpy for FFTs
        ifftfunc = getattr(np.fft, ifftname)
        out = ifftfunc(y, nfft, 1)
        ifftw = None
        if return_plan: return out, None
        else: return out
    else:
        # Check validity of existing plan
        try:
            if ifftw is None or ifftw.axes != (1, ):
                raise ValueError
            if r2c == np.issubdtype(ifftw.output_dtype, np.complexfloating):
                raise ValueError
            out = ifftw(y)
        except ValueError:
            builder = getattr(pyfftw.builders, ifftname)
            ifftw = builder(y, nfft, axis=1)
            out = ifftw()

    # Apply STFT window to decomposed output
    out = out[:, :lwin] * win[np.newaxis, :]

    # Synthesize and normalize the output
    x = np.zeros((out.shape[0] + out.shape[1] - 1, ), dtype=out.dtype)
    x[:out.shape[0]] = out[:, 0]
    for i, row in enumerate(out.T[1:], 1):
        x[i:i + out.shape[0]] += row
    x /= np.sum(np.abs(win)**2)

    if return_plan: return x, ifftw
    else: return x
示例#26
0
        sta_list = sorted(glob.glob(os.path.join(tdir[ick], '*' + input_fmt)))
    if (len(sta_list) == 0):
        print('continue! no data in %s' % tdir[ick])
        continue

    # crude estimation on memory needs (assume float32)
    nsec_chunk = inc_hours / 24 * 86400
    nseg_chunk = int(np.floor((nsec_chunk - cc_len) / step))
    npts_chunk = int(nseg_chunk * cc_len * samp_freq)
    memory_size = nsta * npts_chunk * 4 / 1024**3
    if memory_size > MAX_MEM:
        raise ValueError(
            'Require %5.3fG memory but only %5.3fG provided)! Reduce inc_hours to avoid this issue!'
            % (memory_size, MAX_MEM))

    nnfft = int(next_fast_len(int(cc_len * samp_freq)))
    # open array to store fft data/info in memory
    fft_array = np.zeros((nsta, nseg_chunk * (nnfft // 2)), dtype=np.complex64)
    fft_std = np.zeros((nsta, nseg_chunk), dtype=np.float32)
    fft_flag = np.zeros(nsta, dtype=np.int16)
    fft_time = np.zeros((nsta, nseg_chunk), dtype=np.float64)
    # station information (for every channel)
    station = []
    network = []
    channel = []
    clon = []
    clat = []
    location = []
    elevation = []

    # loop through all stations
示例#27
0
def stft(x, win, nfft=None, fftw=None, return_plan=False):
    '''
	Compute D, a short-time Fourier transform of the 1-D signal x such that 
	D[n,k] is the value of the DFT for frequency bin k in time frame n. The
	method pycwp.stats.rolling_window is used to efficiently subdivide the
	signal into overlapping windows.

	The STFT window size is given by the length of win, which should also
	be a 1-D Numpy array or compatible sequence. The hop length is always
	unity, so adjacent time frames overlap by len(win) - 1 samples.

	If nfft, the length of the DFT, is None, a default value that is the
	smallest regular number at least as large as len(win) will be used.
	Otherwise, the value of nfft will be used as specified; a ValueError
	will be raised when nfft is not at least as large as len(win).

	If neither x nor win are complex, an R2C DFT will be used, which means
	the number of frequency bins in the output will be (nfft // 2) + 1;
	otherwise, a fully complex DFT will be used.

	If fftw is not None and the PyFFTW module is available, it should be a
	pyfftw.FFTW object that will be used to perform a forward FFT of shape
	(len(x) - len(win) + 1, nfft). If fftw is compatible (which means that
	fftw.input_dtype is complex iff x is complex or win is complex, the
	input shapes are compatible and the transform occurs along axis 1), the
	DFT is computed by calling fftw(input), where input is the rolling-
	window representation of x multiplied by the window function. Note
	that this will overwrite the input of the FFTW object.

	If fftw is not compatible with the input, fftw is None or the PyFFTW
	module cannot be imported, a new FFT will be planned (if pyfftw is
	available) or numpy.fft will be used instead.

	If return_plan is True, the return value will be the STFT and the
	PyFFTW plan used for the Fourier transform (this is useful for repeated
	calls with different inputs), or None if PyFFTW was not used. If
	return_plan is False, the return value is just the STFT array.
	'''
    x = np.asarray(x).squeeze()
    win = np.asarray(win).squeeze()

    if x.ndim != 1 or win.ndim != 1:
        raise ValueError('Inputs x and win must be 1-D or compatible')

    lwin = len(win)

    if nfft is None:
        try:
            from scipy.fftpack.helper import next_fast_len
        except ImportError:
            nfft = lwin
        else:
            nfft = next_fast_len(lwin)

    if nfft < lwin:
        raise ValueError('Value of nfft must be no smaller than len(win)')

    # Find the input data type
    xw = rolling_window(x, lwin) * win[np.newaxis, :]
    r2c = not np.issubdtype(xw.dtype, np.complexfloating)
    fftname = 'rfft' if r2c else 'fft'

    try:
        import pyfftw
    except ImportError:
        use_pyfftw = False
    else:
        use_pyfftw = True

    if not use_pyfftw:
        # Use Numpy for FFTs
        fftfunc = getattr(np.fft, fftname)
        out = fftfunc(xw, nfft, 1)
        if return_plan: return out, None
        else: return out

    # Check validity of existing plan
    try:
        if fftw is None or fftw.axes != (1, ):
            raise ValueError
        if r2c == np.issubdtype(fftw.input_dtype, np.complexfloating):
            raise ValueError
        out = fftw(xw)
    except ValueError:
        builder = getattr(pyfftw.builders, fftname)
        fftw = builder(xw, nfft, axis=1)
        out = fftw()

    out = out.copy()
    if return_plan: return out, fftw
    else: return out
示例#28
0
 def test_np_integers(self):
     ITYPES = [np.int16, np.int32, np.int64, np.uint16, np.uint32, np.uint64]
     for ityp in ITYPES:
         x = ityp(12345)
         testN = next_fast_len(x)
         assert_equal(testN, next_fast_len(int(x)))
示例#29
0
def gen_fbank_scale_rate(scale_ctrs,rate_ctrs,nfft_scale,nfft_rate,filt_params,comp_specgram=None,fbank_out_domain='sr'):
    """
    This function generates a scale-rate domain bank of up-/down-ward,filters.
    The filterbank will be tuned to the passband of a target signal if specified.

    Inputs:
    scale_ctrs: numpy array containing filter centers along the scale axis
    rate_ctrs: numpy array containing filter centers along the rate axis
    nfft_scale: number of scale fft points
    nfft_rate: number of rate fft points
    filt_params: dictionary containing filter parameters including:
                 samprate_spec: sample rate along the freq. axis (cyc/oct)
                 samprate_temp: sample rate along the time axis (cyc/sec)
                 time_const: exponent coefficient of the exponential term

    comp_spectgram: numpy array of size [num_freq_bin, num_time_frame] containing a complex spectrogram.
                    If provided, the function will return a filterbank that is modulated with the
                    phase of the spectrogram. Otherwise, the function will return the original set
                    of filters.

    fbank_out_domain: string indicating the representatio domain of the filter bank
                     'sr' (default): filter representation in the scale-rate domain
                     'tf': filter representation in the time-domain, involves an extra 2DIFT computation
                     'all': return filter represntations in both domains

    Output:
    fbank_2d_out: numpy array containing the 2D filter bank represented in the scale-rate or time-frequency domain
                 or list of length 2 containing filter representations in both domains

    Note: nfft_scale >= num_freq_bin and nfft_rate >= num_time_frame, where
          [num_freq_bin,num_time_frame] = np.shape(spectrogram)
    Note: the first and last filters in the scale and rate ranges are assumed
          to be lowpass and highpass respectively

    Author: Fatemeh Pishdadian ([email protected])
    """

    # check the 'comp_specgram' parameter
    if comp_specgram is None:
        mod_filter = 0
    else:
        mod_filter = 1

    ### Parameters and dimensions

    # set nfft to the next 5-smooth number
    nfft_scale = helper.next_fast_len(nfft_scale) #nfft_scale + np.mod(nfft_scale, 2)
    nfft_rate = helper.next_fast_len(nfft_rate) # nfft_rate + np.mod(nfft_rate, 2)

    num_scale_ctrs = len(scale_ctrs)
    num_rate_ctrs = len(rate_ctrs)

    beta = filt_params['time_const']
    samprate_spec = filt_params['samprate_spec']
    samprate_temp = filt_params['samprate_temp']

    scale_params = {'scale_filt_len': nfft_scale, 'samprate_spec': samprate_spec}
    rate_params = {'time_const': beta, 'rate_filt_len': nfft_rate, 'samprate_temp': samprate_temp}

    scale_filt_types = ['lowpass'] + (num_scale_ctrs-2) * ['bandpass'] + ['highpass']
    rate_filt_types = ['lowpass'] + (num_rate_ctrs-2) * ['bandpass'] + ['highpass']

    ### Generate the filterbank

    # filter modulation factor (pre-filtering stage)
    if mod_filter:
        # adjust the dimensions of the complex spectrogram
        comp_specgram = np.fft.ifft2(np.fft.fft2(comp_specgram, [nfft_scale, nfft_rate]))
        spec_phase = np.angle(comp_specgram)
        # uncomment this line to compare to the matlab code
        # otherwise fft phase difference results in large error values
        #spec_phase = np.angle(comp_specgram) * (np.abs(comp_specgram)>1e-10)
        filt_mod_factor = np.exp(1j * spec_phase)


    ### non-modulated filterbank:
    if not mod_filter:

        # return a single output domain
        if fbank_out_domain is not 'all': # 'tf' or 'sr'

            fbank_2d_out = np.zeros((num_scale_ctrs, 2 * num_rate_ctrs, nfft_scale, nfft_rate),dtype='complex128')

            for i in range(num_scale_ctrs): # iterate over scale filter centers
                scale_params['type'] = scale_filt_types[i]

                for j in range(num_rate_ctrs): # iterate over rate filter center
                    rate_params['type'] = rate_filt_types[j]

                    filt_up = gen_filt_scale_rate(scale_ctrs[i], rate_ctrs[j], scale_params, rate_params, 'up'
                                                     , filt_out_domain=fbank_out_domain)
                    filt_down = gen_filt_scale_rate(scale_ctrs[i], rate_ctrs[j], scale_params, rate_params, 'down'
                                                       , filt_out_domain=fbank_out_domain)

                    fbank_2d_out[i, num_rate_ctrs - j - 1, :, :] = filt_up
                    fbank_2d_out[i, num_rate_ctrs + j, :, :] = filt_down

        # return both output domains
        elif fbank_out_domain is 'all':

            fbank_tf_domain = np.zeros((num_scale_ctrs, 2 * num_rate_ctrs, nfft_scale, nfft_rate),dtype='complex128')
            fbank_sr_domain = np.zeros((num_scale_ctrs, 2 * num_rate_ctrs, nfft_scale, nfft_rate),dtype='complex128')

            for i in range(num_scale_ctrs): # iterate over scale filter centers
                scale_params['type'] = scale_filt_types[i]

                for j in range(num_rate_ctrs): # iterate over rate filter center
                    rate_params['type'] = rate_filt_types[j]

                    filt_up = gen_filt_scale_rate(scale_ctrs[i], rate_ctrs[j], scale_params, rate_params, 'up'
                                                  , filt_out_domain=fbank_out_domain)
                    filt_down = gen_filt_scale_rate(scale_ctrs[i], rate_ctrs[j], scale_params, rate_params, 'down'
                                                    , filt_out_domain=fbank_out_domain)

                    fbank_tf_domain[i, num_rate_ctrs - j - 1, :, :] = filt_up[0]
                    fbank_tf_domain[i, num_rate_ctrs + j, :, :] = filt_down[0]

                    fbank_sr_domain[i, num_rate_ctrs - j - 1, :, :] = filt_up[1]
                    fbank_sr_domain[i, num_rate_ctrs + j, :, :] = filt_down[1]

            fbank_2d_out = [fbank_tf_domain, fbank_sr_domain]



    ### modulated filterbank:
    if mod_filter:

        fbank_tf_domain = np.zeros((num_scale_ctrs, 2 * num_rate_ctrs, nfft_scale, nfft_rate), dtype='complex128')

        for i in range(num_scale_ctrs): # iterate over scale filter centers
            scale_params['type'] = scale_filt_types[i]

            for j in range(num_rate_ctrs): # iterate over rate filter center
                rate_params['type'] = rate_filt_types[j]

                # upward
                filt_tf_up = gen_filt_scale_rate(scale_ctrs[i], rate_ctrs[j], scale_params, rate_params,'up',
                                                 filt_out_domain='tf')
                filt_tf_up_mod = filt_tf_up * filt_mod_factor
                fbank_tf_domain[i, num_rate_ctrs - j - 1, :, :] = filt_tf_up_mod

                # downward
                filt_tf_down = gen_filt_scale_rate(scale_ctrs[i], rate_ctrs[j], scale_params, rate_params,'down',
                                                 filt_out_domain = 'tf')
                filt_tf_down_mod = filt_tf_down * filt_mod_factor
                fbank_tf_domain[i, num_rate_ctrs + j, :, :] = filt_tf_down_mod

        if fbank_out_domain is 'tf':
            fbank_2d_out = fbank_tf_domain

        elif fbank_out_domain is 'sr':
            fbank_2d_out = np.fft.fftn(fbank_tf_domain,axes=[2,3])

        elif fbank_out_domain is 'all':
            fbank_sr_domain = np.fft.fftn(fbank_tf_domain, axes=[2, 3])
            fbank_2d_out = [fbank_tf_domain, fbank_sr_domain]

    return fbank_2d_out
示例#30
0
def myCorr(data, maxlag, plot=False, nfft=None):
    """This function takes ndimensional *data* array, computes the cross-correlation in the frequency domain
    and returns the cross-correlation function between [-*maxlag*:*maxlag*].

    :type data: :class:`numpy.ndarray`
    :param data: This array contains the fft of each timeseries to be cross-correlated.
    :type maxlag: int
    :param maxlag: This number defines the number of samples (N=2*maxlag + 1) of the CCF that will be returned.

    :rtype: :class:`numpy.ndarray`
    :returns: The cross-correlation function between [-maxlag:maxlag]
    """
    if nfft is None:
        s1 = np.array(data[0].shape)
        shape = s1 - 1
        # Speed up FFT by padding to optimal size for FFTPACK
        fshape = [next_fast_len(int(d)) for d in shape]
        nfft = fshape[0]

    normalized = True
    allCpl = False

    maxlag = np.round(maxlag)
    # ~ print "np.shape(data)",np.shape(data)
    if data.shape[0] == 2:
        # ~ print "2 matrix to correlate"
        if allCpl:
            # Skipped this unused part
            pass
        else:
            K = data.shape[0]
            # couples de stations
            couples = np.concatenate((np.arange(0, K), K + np.arange(0, K)))

    Nt = data.shape[1]
    Nc = 2 * Nt - 1

    # corr = scipy.fftpack.fft(data,int(Nfft),axis=1)
    corr = data

    if plot:
        plt.subplot(211)
        plt.plot(np.arange(len(corr[0])) * 0.05, np.abs(corr[0]))
        plt.subplot(212)
        plt.plot(np.arange(len(corr[1])) * 0.05, np.abs(corr[1]))

    corr = np.conj(corr[couples[0]]) * corr[couples[1]]
    corr = np.real(scipy.fftpack.ifft(corr, nfft)) / (Nt)
    corr = np.concatenate((corr[-Nt + 1:], corr[:Nt + 1]))

    if plot:
        plt.figure()
        plt.plot(corr)

    if normalized:
        E = np.real(np.sqrt(
            np.mean(scipy.fftpack.ifft(data, n=nfft, axis=1) ** 2, axis=1)))
        normFact = E[0] * E[1]
        corr /= np.real(normFact)

    if maxlag != Nt:
        tcorr = np.arange(-Nt + 1, Nt)
        dN = np.where(np.abs(tcorr) <= maxlag)[0]
        corr = corr[dN]

    del data
    return corr
示例#31
0
hfile = '/Users/chengxin/Documents/Harvard/Kanto_basin/code/KANTO/CCF_C3/2010_01_11.h5'
with pyasdf.ASDFDataSet(hfile, mode='r') as ds:
    data_types = ds.auxiliary_data.list()

    #-----take the first data_type-----
    data_type = data_types[2]
    paths = ds.auxiliary_data[data_type].list()
    path = paths[2]

    data = ds.auxiliary_data[data_type][path].data[:]
    npts = len(data)

    #----do fft----
    nfft2 = _npts2nfft(npts)
    nfft1 = int(next_fast_len(npts))
    nfft3 = int(next_fast_len(npts * 2 + 1))
    print('nfft1 and nfft2 %d %d' % (nfft1, nfft2))

    t0 = time.time()
    spec1 = scipy.fftpack.fft(data, nfft1)
    wave1 = scipy.fftpack.ifft(spec1, nfft1)
    t1 = time.time()
    spec2 = np.fft.rfft(data, nfft2)
    wave2 = np.fft.irfft(spec2, nfft2)
    t2 = time.time()
    spec3 = scipy.fftpack.fft(data, nfft3)
    wave3 = scipy.fftpack.ifft(spec3, nfft3)
    t3 = time.time()
    print('fft and rfft takes %f s, %f s and %f s' %
          (t1 - t0, t2 - t1, t3 - t2))
示例#32
0
def whiten(data, delta, freqmin, freqmax, to_whiten=True, Nfft=None):
    """This function takes 1-dimensional *data* timeseries array,
    goes to frequency domain using fft, whitens the amplitude of the spectrum
    in frequency domain between *freqmin* and *freqmax*
    and returns the whitened fft.

    :type data: :class:`numpy.ndarray`
    :param data: Contains the 1D time series to whiten
    :type Nfft: int
    :param Nfft: The number of points to compute the FFT
    :type delta: float
    :param delta: The sampling frequency of the `data`
    :type freqmin: float
    :param freqmin: The lower frequency bound
    :type freqmax: float
    :param freqmax: The upper frequency bound

    :rtype: :class:`numpy.ndarray`
    :returns: The FFT of the input trace, whitened between the frequency bounds
    """

    # Speed up FFT by padding to optimal size for FFTPACK
    if data.ndim == 1:
        axis = 0
    elif data.ndim == 2:
        axis = 1

    if Nfft is None:
        Nfft = next_fast_len(int(data.shape[axis]))

    pad = 100
    Nfft = int(Nfft)
    freqVec = scipy.fftpack.fftfreq(Nfft, d=delta)[:Nfft // 2]

    J = np.where((freqVec >= freqmin) & (freqVec <= freqmax))[0]
    low = J[0] - pad
    if low <= 0:
        low = 1

    left = J[0]
    right = J[-1]
    high = J[-1] + pad
    if high > Nfft / 2:
        high = int(Nfft // 2)

    FFTRawSign = scipy.fftpack.fft(data, Nfft,axis=axis)

    if to_whiten:

        # Left tapering:
        if axis == 1:
            FFTRawSign[:,0:low] *= 0
            FFTRawSign[:,low:left] = np.cos(
                np.linspace(np.pi / 2., np.pi, left - low)) ** 2 * np.exp(
                1j * np.angle(FFTRawSign[:,low:left]))
            # Pass band:
            FFTRawSign[:,left:right] = np.exp(1j * np.angle(FFTRawSign[:,left:right]))
            # Right tapering:
            FFTRawSign[:,right:high] = np.cos(
                np.linspace(0., np.pi / 2., high - right)) ** 2 * np.exp(
                1j * np.angle(FFTRawSign[:,right:high]))
            FFTRawSign[:,high:Nfft + 1] *= 0

            # Hermitian symmetry (because the input is real)
            FFTRawSign[:,-(Nfft // 2) + 1:] = FFTRawSign[:,1:(Nfft // 2)].conjugate()[::-1]
        else:
            FFTRawSign[0:low] *= 0
            FFTRawSign[low:left] = np.cos(
                np.linspace(np.pi / 2., np.pi, left - low)) ** 2 * np.exp(
                1j * np.angle(FFTRawSign[low:left]))
            # Pass band:
            FFTRawSign[left:right] = np.exp(1j * np.angle(FFTRawSign[left:right]))
            # Right tapering:
            FFTRawSign[right:high] = np.cos(
                np.linspace(0., np.pi / 2., high - right)) ** 2 * np.exp(
                1j * np.angle(FFTRawSign[right:high]))
            FFTRawSign[high:Nfft + 1] *= 0

            # Hermitian symmetry (because the input is real)
            FFTRawSign[-(Nfft // 2) + 1:] = FFTRawSign[1:(Nfft // 2)].conjugate()[::-1]

    return FFTRawSign
示例#33
0
def fftw_normxcorr(templates, stream, pads, threaded=False, *args, **kwargs):
    """
    Normalised cross-correlation using the fftw library.

    Internally this function used double precision numbers, which is definitely
    required for seismic data. Cross-correlations are computed as the
    inverse fft of the dot product of the ffts of the stream and the reversed,
    normalised, templates.  The cross-correlation is then normalised using the
    running mean and standard deviation (not using the N-1 correction) of the
    stream and the sums of the normalised templates.

    This python function wraps the C-library written by C. Chamberlain for this
    purpose.

    :param templates: 2D Array of templates
    :type templates: np.ndarray
    :param stream: 1D array of continuous data
    :type stream: np.ndarray
    :param pads: List of ints of pad lengths in the same order as templates
    :type pads: list
    :param threaded:
        Whether to use the threaded routine or not - note openMP and python
        multiprocessing don't seem to play nice for this.
    :type threaded: bool

    :return: np.ndarray of cross-correlations
    :return: np.ndarray channels used
    """
    utilslib = _load_cdll('libutils')

    argtypes = [
        np.ctypeslib.ndpointer(dtype=np.float32,
                               ndim=1,
                               flags=native_str('C_CONTIGUOUS')),
        ctypes.c_long, ctypes.c_long,
        np.ctypeslib.ndpointer(dtype=np.float32,
                               ndim=1,
                               flags=native_str('C_CONTIGUOUS')),
        ctypes.c_long,
        np.ctypeslib.ndpointer(dtype=np.float32,
                               flags=native_str('C_CONTIGUOUS')),
        ctypes.c_long,
        np.ctypeslib.ndpointer(dtype=np.intc,
                               flags=native_str('C_CONTIGUOUS')),
        np.ctypeslib.ndpointer(dtype=np.intc,
                               flags=native_str('C_CONTIGUOUS')),
        np.ctypeslib.ndpointer(dtype=np.intc,
                               flags=native_str('C_CONTIGUOUS')),
        np.ctypeslib.ndpointer(dtype=np.intc, flags=native_str('C_CONTIGUOUS'))
    ]
    restype = ctypes.c_int

    if threaded:
        func = utilslib.normxcorr_fftw_threaded
    else:
        func = utilslib.normxcorr_fftw

    func.argtypes = argtypes
    func.restype = restype

    # Generate a template mask
    used_chans = ~np.isnan(templates).any(axis=1)
    template_length = templates.shape[1]
    stream_length = len(stream)
    n_templates = templates.shape[0]
    fftshape = kwargs.get("fft_len")
    if fftshape is None:
        # In testing, 2**13 consistently comes out fastest - setting to
        # default. https://github.com/eqcorrscan/EQcorrscan/pull/285
        fftshape = min(2**13,
                       next_fast_len(template_length + stream_length - 1))
    if fftshape < template_length:
        Logger.warning(
            "FFT length of {0} is shorter than the template, setting to "
            "{1}".format(fftshape,
                         next_fast_len(template_length + stream_length - 1)))
        fftshape = next_fast_len(template_length + stream_length - 1)
    # Normalize and flip the templates
    norm = ((templates - templates.mean(axis=-1, keepdims=True)) /
            (templates.std(axis=-1, keepdims=True) * template_length))

    norm = np.nan_to_num(norm)
    ccc_length = stream_length - template_length + 1
    assert ccc_length > 0, "Template must be shorter than stream"
    ccc = np.zeros((n_templates, ccc_length), np.float32)
    used_chans_np = np.ascontiguousarray(used_chans, dtype=np.intc)
    pads_np = np.ascontiguousarray(pads, dtype=np.intc)
    variance_warning = np.ascontiguousarray([0], dtype=np.intc)
    missed_corr = np.ascontiguousarray([0], dtype=np.intc)

    # Check that stream is non-zero and above variance threshold
    if not np.all(stream == 0) and np.var(stream) < 1e-8:
        # Apply gain
        stream *= MULTIPLIER
        Logger.warning("Low variance found for, applying gain "
                       "to stabilise correlations")
        multiplier = MULTIPLIER
    else:
        multiplier = 1
    ret = func(np.ascontiguousarray(norm.flatten(order='C'),
                                    np.float32), template_length, n_templates,
               np.ascontiguousarray(stream, np.float32), stream_length,
               np.ascontiguousarray(ccc, np.float32), fftshape, used_chans_np,
               pads_np, variance_warning, missed_corr)
    if ret < 0:
        raise MemoryError()
    elif ret > 0:
        Logger.critical('Error in C code (possible normalisation error)')
        Logger.critical('Maximum ccc %f at %i' % (ccc.max(), ccc.argmax()))
        Logger.critical('Minimum ccc %f at %i' % (ccc.min(), ccc.argmin()))
        Logger.critical('Recommend checking your data for spikes, clipping '
                        'or artefacts')
        raise CorrelationError("Internal correlation error")
    if missed_corr[0]:
        Logger.warning("{0} correlations not computed, are there gaps in the "
                       "data? If not, consider increasing gain".format(
                           missed_corr[0]))
    if variance_warning[0] and variance_warning[0] > template_length:
        Logger.warning(
            "Low variance found in {0} positions, check result.".format(
                variance_warning[0]))
    # Remove variance correction
    stream /= multiplier
    return ccc, used_chans
示例#34
0
def main():
    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s [%(levelname)s] %(message)s',
                        datefmt='%Y-%m-%d %H:%M:%S')

    logging.info('*** Starting: Compute AC ***')

    # Connection to the DB
    db = connect()

    if len(get_filters(db, all=False)) == 0:
        logging.info("NO FILTERS DEFINED, exiting")
        sys.exit()

    # Get Configuration
    params = Params()
    params.goal_sampling_rate = float(get_config(db, "cc_sampling_rate"))
    params.goal_duration = float(get_config(db, "analysis_duration"))
    params.overlap = float(get_config(db, "overlap"))
    params.maxlag = float(get_config(db, "maxlag"))
    params.min30 = float(get_config(db, "corr_duration")) * params.goal_sampling_rate
    params.windsorizing = float(get_config(db, "windsorizing"))
    params.resampling_method = get_config(db, "resampling_method")
    params.decimation_factor = int(get_config(db, "decimation_factor"))
    params.preprocess_lowpass = float(get_config(db, "preprocess_lowpass"))
    params.preprocess_highpass = float(get_config(db, "preprocess_highpass"))
    params.keep_all = get_config(db, 'keep_all', isbool=True)
    params.keep_days = get_config(db, 'keep_days', isbool=True)
#Raph    params.components_to_compute = get_components_to_compute(db)

    params.stack_method = get_config(db, 'stack_method')
    params.pws_timegate = float(get_config(db, 'pws_timegate'))
    params.pws_power = float(get_config(db, 'pws_power'))

    params.components_to_compute = ['Z', 'E', 'N']
    ##################################

    logging.info("Will compute %s" % " ".join(params.components_to_compute))
    ##################################
    stations_to_analyse = ["%s.%s" % (sta.net, sta.sta) for sta in get_stations(db, all=True)]#extract all stations

    pairs = []#pair of components
    ##################################
    #modified part to make pair list with comps
    ##################################
    i = 0
    for comp in params.components_to_compute:
        for newcomp in params.components_to_compute:
            if comp == newcomp:
                if i == 0:
                    pairs = np.array(':'.join([comp, newcomp]))
                    i+=1
                else:
                    pairs = np.vstack((pairs,':'.join([comp, newcomp])))
    pairs = np.hstack(pairs)
    print('Pairs = ', len(pairs))
    while is_next_job(db, jobtype='AC'):
        jobs = get_next_job(db, jobtype='AC')
        stations = []
        #Raph pairs = []
        refs = []

#Raph changed to fit AC
#        for job in jobs:
#            refs.append(job.ref)
#            pairs.append(job.pair)
#            netsta1, netsta2 = job.pair.split(':')
#            stations.append(netsta1)
#            stations.append(netsta2)
#            goal_day = job.day

        #go through job to make job array with stations
        for job in jobs:
            refs.append(job.ref)
            #pairs.append(job.pair)#find a way to pair comps   /!\
            netsta = job.pair  #just 1 station in the cell?
            stations.append(netsta)
            #stations.append(netstacomp2)
            goal_day = job.day



        stations = np.unique(stations)

        logging.info("New AC Job: %s (%i stations with %i pairs each)" %
                     (goal_day , len(stations), len(pairs)))
        jt = time.time()

        xlen = int(params.goal_duration * params.goal_sampling_rate)

#Raph changed here the trame selection

        comps = ['Z', 'E', 'N']
        tramef_Z = np.zeros((len(stations), xlen))
        tramef_E = np.zeros((len(stations), xlen))
        tramef_N = np.zeros((len(stations), xlen))
        basetime, tramef_Z, tramef_E, tramef_N = preprocess(db, stations, comps, goal_day, params, tramef_Z, tramef_E, tramef_N)# preprocessing


        # print '##### STREAMS ARE ALL PREPARED AT goal Hz #####'
        dt = 1. / params.goal_sampling_rate

        begins = []
        ends = []
        i = 0
        while i <=  (params.goal_duration - params.min30/params.goal_sampling_rate):
            begins.append(int(i * params.goal_sampling_rate))
            ends.append(int(i * params.goal_sampling_rate + params.min30))
            i += int(params.min30/params.goal_sampling_rate * (1.0-params.overlap))
        # ##########################################################################################################

        for station in stations:
            orig_pair = station
            for pair in pairs:
        # ITERATING OVER PAIRS #####
        #for pair in pairs:
                #orig_pair = pair
    
                #logging.info('Processing pair: %s' % pair.replace(':', ' vs '))
                logging.info("Processing pair %s for station %s" %(pair, station))
                tt = time.time()
                comp1,comp2=pair.split(':')
                components=comp1+comp2
                ### load trames
                #assign trames according to pair

                station_to_analyse=np.where(stations==station)

                if pair.split(':')[0]=='Z':
                    tr1=tramef_Z[station_to_analyse]
                elif pair.split(':')[0]=='E':
                    tr1=tramef_E[station_to_analyse]
                elif pair.split(':')[0]=='N':
                    tr1=tramef_N[station_to_analyse]
                if pair.split(':')[1]=='Z':
                    tr2=tramef_Z[station_to_analyse]
                elif pair.split(':')[1]=='E':
                    tr2=tramef_E[station_to_analyse]
                elif pair.split(':')[1]=='N':
                    tr2=tramef_N[station_to_analyse]

                trames=np.vstack((tr1,tr2))

                del tr1,tr2    
                daycorr = {}
                ndaycorr = {}
                allcorr = {}
                for filterdb in get_filters(db, all=False):
                    filterid = filterdb.ref
                    daycorr[filterid] = np.zeros(get_maxlag_samples(db,))
                    ndaycorr[filterid] = 0

                for islice, (begin, end) in enumerate(zip(begins, ends)):
                    trame2h = trames[:, begin:end]
                    nfft = next_fast_len(int(trame2h.shape[1]))
                    rmsmat = np.std(trame2h, axis=1)
                    for filterdb in get_filters(db, all=False):
                        filterid = filterdb.ref
                        low = float(filterdb.low)
                        high = float(filterdb.high)
                        rms_threshold = filterdb.rms_threshold

                        # Nfft = int(params.min30)
                        # if params.min30 / 2 % 2 != 0:
                        #     Nfft = params.min30 + 2

#Raph                        trames2hWb = np.zeros((2, int(nfft)), dtype=np.complex)
                        print('Shapes of trame2h ', len(trame2h[0]), len(trame2h[1]), len(trame2h))
                        trames2ht = np.zeros((2, int(len(trame2h[0]))), dtype=np.complex)
                        skip = False
                        for i, component in enumerate(pair.split(':')):#Raph changed to split pair, why is STATION still here?
                            if rmsmat[i] > rms_threshold:
                                cp = cosine_taper(len(trame2h[i]),0.04)
                                trame2h[i] -= trame2h[i].mean()

                                if params.windsorizing == -1:
                                    trame2h[i] = np.sign(trame2h[i])
                                elif params.windsorizing != 0:
                                    indexes = np.where(
                                        np.abs(trame2h[i]) > (params.windsorizing * rmsmat[i]))[0]
                                    # clipping at windsorizing*rms
                                    trame2h[i][indexes] = (trame2h[i][indexes] / np.abs(
                                        trame2h[i][indexes])) * params.windsorizing * rmsmat[i]

#Raph                                trames2hWb[i] = whiten(
#                                    trame2h[i]*cp, nfft, dt, low, high, plot=False)
                                print('Testing shapes', trame2h.shape, len(trame2h[0]))                                
                                trames2ht[i] = nowhiten(trame2h[i]*cp, nfft, dt, low, high, plot=False)
                            else:
#Raph                                trames2hWb[i] = np.zeros(int(nfft))
                                trames2ht[i] = np.zeros(int(len(trame2h[i])))
                                skip = True
                                logging.debug('Slice RMS is smaller (%e) than rms_threshold (%e)!'
                                              % (rmsmat[i], rms_threshold))
                        if not skip:
                            corr = myPCorr(trames2ht, np.ceil(params.maxlag / dt), plot=False)#Raph included trames2hFT
                            tmptime = time.gmtime(basetime + begin /
                                                  params.goal_sampling_rate)
                            thisdate = time.strftime("%Y-%m-%d", tmptime)
                            thistime = time.strftime("%Y-%m-%d %H:%M:%S",
                                                     tmptime)
                            print('the shape of the corr is ', corr.shape)
                            if params.keep_all or params.keep_days:#Raph
                                ccfid = "%s_%s_%s_%s" % (station,
                                                         filterid, components,
                                                         thisdate)
                                if ccfid not in allcorr:
                                    allcorr[ccfid] = {}
                                allcorr[ccfid][thistime] = corr

                            if params.keep_days:
                                if not np.any(np.isnan(corr)) and \
                                        not np.any(np.isinf(corr)):
                                    daycorr[filterid] += corr
                                    ndaycorr[filterid] += 1

                            del corr, thistime, trames2ht

                if params.keep_all:
                    for ccfid in allcorr.keys():
                        export_allcorr(db, ccfid, allcorr[ccfid])

                if params.keep_days:
                    for ccfid in allcorr.keys():
                        station, filterid, components, date = ccfid.split('_')#Raph

                        corrs = np.asarray(list(allcorr[ccfid].values()))
                        corr = stack(db, corrs)

                        thisdate = time.strftime(
                                    "%Y-%m-%d", time.gmtime(basetime))
                        thistime = time.strftime(
                                    "%H_%M", time.gmtime(basetime))
                        add_corr(
                                db, station.replace('.', '_'), station.replace('.', '_'), int(filterid),#Raph
                                thisdate, thistime,  params.min30 /
                                params.goal_sampling_rate,
                                components, corr,
                                params.goal_sampling_rate, day=True,
                                ncorr=corrs.shape[0])
                del trames, daycorr, ndaycorr
            logging.debug("Updating Job")
            update_job(db, goal_day, orig_pair, 'AC', 'D')

            logging.info("Finished processing this pair. It took %.2f seconds" % (time.time() - tt))
        logging.info("Job Finished. It took %.2f seconds" % (time.time() - jt))
    logging.info('*** Finished: Compute AC ***')
示例#35
0
def fftw_normxcorr(templates, stream, pads, threaded=False, *args, **kwargs):
    """
    Normalised cross-correlation using the fftw library.

    Internally this function used double precision numbers, which is definitely
    required for seismic data. Cross-correlations are computed as the
    inverse fft of the dot product of the ffts of the stream and the reversed,
    normalised, templates.  The cross-correlation is then normalised using the
    running mean and standard deviation (not using the N-1 correction) of the
    stream and the sums of the normalised templates.

    This python function wraps the C-library written by C. Chamberlain for this
    purpose.

    :param templates: 2D Array of templates
    :type templates: np.ndarray
    :param stream: 1D array of continuous data
    :type stream: np.ndarray
    :param pads: List of ints of pad lengths in the same order as templates
    :type pads: list
    :param threaded:
        Whether to use the threaded routine or not - note openMP and python
        multiprocessing don't seem to play nice for this.
    :type threaded: bool

    :return: np.ndarray of cross-correlations
    :return: np.ndarray channels used
    """
    utilslib = _load_cdll('libutils')

    argtypes = [
        np.ctypeslib.ndpointer(dtype=np.float32,
                               ndim=1,
                               flags=native_str('C_CONTIGUOUS')),
        ctypes.c_long, ctypes.c_long,
        np.ctypeslib.ndpointer(dtype=np.float32,
                               ndim=1,
                               flags=native_str('C_CONTIGUOUS')),
        ctypes.c_long,
        np.ctypeslib.ndpointer(dtype=np.float32,
                               flags=native_str('C_CONTIGUOUS')),
        ctypes.c_long,
        np.ctypeslib.ndpointer(dtype=np.intc,
                               flags=native_str('C_CONTIGUOUS')),
        np.ctypeslib.ndpointer(dtype=np.intc,
                               flags=native_str('C_CONTIGUOUS')),
        np.ctypeslib.ndpointer(dtype=np.intc, flags=native_str('C_CONTIGUOUS'))
    ]
    restype = ctypes.c_int

    if threaded:
        func = utilslib.normxcorr_fftw_threaded
    else:
        func = utilslib.normxcorr_fftw

    func.argtypes = argtypes
    func.restype = restype

    # Generate a template mask
    used_chans = ~np.isnan(templates).any(axis=1)
    template_length = templates.shape[1]
    stream_length = len(stream)
    n_templates = templates.shape[0]
    fftshape = next_fast_len(template_length + stream_length - 1)

    # # Normalize and flip the templates
    norm = ((templates - templates.mean(axis=-1, keepdims=True)) /
            (templates.std(axis=-1, keepdims=True) * template_length))

    norm = np.nan_to_num(norm)
    ccc = np.zeros((n_templates, stream_length - template_length + 1),
                   np.float32)
    used_chans_np = np.ascontiguousarray(used_chans, dtype=np.intc)
    pads_np = np.ascontiguousarray(pads, dtype=np.intc)
    variance_warning = np.ascontiguousarray([0], dtype=np.intc)

    ret = func(np.ascontiguousarray(norm.flatten(order='C'),
                                    np.float32), template_length, n_templates,
               np.ascontiguousarray(stream, np.float32), stream_length,
               np.ascontiguousarray(ccc, np.float32), fftshape, used_chans_np,
               pads_np, variance_warning)
    if ret < 0:
        raise MemoryError()
    elif ret not in [0, 999]:
        print('Error in C code (possible normalisation error)')
        print('Maximum ccc %f at %i' % (ccc.max(), ccc.argmax()))
        print('Minimum ccc %f at %i' % (ccc.min(), ccc.argmin()))
        raise CorrelationError("Internal correlation error")
    elif ret == 999:
        warnings.warn("Some correlations not computed, are there "
                      "zeros in data? If not, consider increasing gain.")
    if variance_warning[0] and variance_warning[0] > template_length:
        warnings.warn(
            "Low variance found in {0} positions, check result.".format(
                variance_warning[0]))

    return ccc, used_chans
def gen_fbank_scale_rate(scale_ctrs,
                         rate_ctrs,
                         nfft_scale,
                         nfft_rate,
                         filt_params,
                         comp_specgram=None):
    """
    This function generates a scale-rate domain bank of up-/down-ward,filters.
    The filterbank will be tuned to the passband of a target signal if specified.

    Inputs:
    scale_ctrs: numpy array containing filter centers along the scale axis
    rate_ctrs: numpy array containing filter centers along the rate axis
    nfft_scale: number of scale fft points
    nfft_rate: number of rate fft points
    filt_params: dictionary containing filter parameters including:
                 samprate_spec: sample rate along the freq. axis (cyc/oct)
                 samprate_temp: sample rate along the time axis (cyc/sec)
                 time_const: exponent coefficient of the exponential term

    comp_spectgram: numpy array of size [num_freq_bin, num_time_frame] containing a complex spectrogram.
                    If provided, the function will return a filterbank that is modulated with the
                    phase of the spectrogram. Otherwise, the function will return the original set
                    of filters.

    Output:
    fbank_tf_domain: numpy array of size [num_scale_ctr, 2*num_ref_ctr, nfft_scale, nfft_rate] containing
                     the time-frequency-doamin filterbank
    fbank_sr_domain: numpy array of size [num_scale_ctr, 2*num_ref_ctr, nfft_scale, nfft_rate] containing
                     the scale-rate-doamin filterbank

    Note: nfft_scale >= num_freq_bin and nfft_rate >= num_time_frame, where
          [num_freq_bin,num_time_frame] = np.shape(spectrogram)
    Note: the first and last filters in the scale and rate ranges are assumed
          to be lowpass and highpass respectively

    Author: Fatemeh Pishdadian ([email protected])
    """

    # check comp_specgram
    if comp_specgram is None:
        mod_filter = 0
    else:
        mod_filter = 1

    ### Parameters and dimensions

    # set nfft to the next 5-smooth number
    nfft_scale = helper.next_fast_len(
        nfft_scale)  #nfft_scale + np.mod(nfft_scale, 2)
    nfft_rate = helper.next_fast_len(
        nfft_rate)  # nfft_rate + np.mod(nfft_rate, 2)

    num_scale_ctrs = len(scale_ctrs)
    num_rate_ctrs = len(rate_ctrs)

    beta = filt_params['time_const']
    samprate_spec = filt_params['samprate_spec']
    samprate_temp = filt_params['samprate_temp']

    scale_params = {
        'scale_filt_len': nfft_scale,
        'samprate_spec': samprate_spec
    }
    rate_params = {
        'time_const': beta,
        'rate_filt_len': nfft_rate,
        'samprate_temp': samprate_temp
    }

    ### Generate the filterbank

    # filter modulation factor (pre-filtering stage)
    if mod_filter:
        # adjust the dimensions of the complex spectrogram
        comp_specgram = ifft2(fft2(comp_specgram, [nfft_scale, nfft_rate]))
        spec_phase = np.angle(comp_specgram)
        # uncomment this line to compare to the matlab code
        # otherwise fft phase difference results in large error values
        #spec_phase = np.angle(comp_specgram) * (np.abs(comp_specgram)>1e-10)
        filt_mod_factor = np.exp(1j * spec_phase)
    else:
        filt_mod_factor = 1

    fbank_tf_domain = np.zeros(
        (num_scale_ctrs, 2 * num_rate_ctrs, nfft_scale, nfft_rate),
        dtype='complex128')
    fbank_sr_domain = np.zeros(
        (num_scale_ctrs, 2 * num_rate_ctrs, nfft_scale, nfft_rate),
        dtype='complex128')

    for i in range(num_scale_ctrs):  # iterate over scale filter centers

        if i == 0:
            scale_params['type'] = 'lowpass'
        elif i == num_scale_ctrs - 1:
            scale_params['type'] = 'highpass'
        else:
            scale_params['type'] = 'bandpass'

        for j in range(num_rate_ctrs):  # iterate over rate filter centers

            if j == 0:
                rate_params['type'] = 'lowpass'
            elif j == num_rate_ctrs - 1:
                rate_params['type'] = 'highpass'
            else:
                rate_params['type'] = 'bandpass'

            # generate two analytic filters (one upward and one downward)
            # for the current (scale_ctr, rate_ctr)

            # upward
            filt_tf_up, _ = gen_filt_scale_rate(scale_ctrs[i], rate_ctrs[j],
                                                scale_params, rate_params,
                                                'up')
            filt_tf_up_mod = filt_tf_up * filt_mod_factor
            fbank_tf_domain[i, num_rate_ctrs - j - 1, :, :] = filt_tf_up_mod
            filt_sr_up = fft2(filt_tf_up_mod)
            fbank_sr_domain[i, num_rate_ctrs - j - 1, :, :] = filt_sr_up

            # downward
            filt_tf_down, _ = gen_filt_scale_rate(scale_ctrs[i], rate_ctrs[j],
                                                  scale_params, rate_params,
                                                  'down')
            filt_tf_down_mod = filt_tf_down * filt_mod_factor
            fbank_tf_domain[i, num_rate_ctrs + j, :, :] = filt_tf_down_mod
            filt_sr_down = fft2(filt_tf_down_mod)
            fbank_sr_domain[i, num_rate_ctrs + j, :, :] = filt_sr_down

    return fbank_tf_domain, fbank_sr_domain
示例#37
0
    def __call__(self, img1, img2=None, normalization=None):
        ''' Run the cross correlation on an image/curve or against two
                images/curves
            Parameters
            ----------
            img1 : 1D or 2D np.ndarray
                The image (or curve) to run the cross correlation on
            img2 : 1D or 2D np.ndarray
                If not set to None, run cross correlation of this image (or
                curve) against img1. Default is None.
            normalization : string or list of strings
                normalization types. If not set, use internally saved
                normalization parameters
            Returns
            -------
            ccorrs : 1d or 2d np.ndarray
                An image of the correlation. The zero correlation is
                located at shape//2 where shape is the 1 or 2-tuple
                shape of the array
        '''
        if normalization is None:
            normalization = self.normalization

        if img2 is None:
            self_correlation = True
        else:
            self_correlation = False

        ccorrs = list()

        pos = self.pos
        #loop over individual regions
        for reg in tqdm(range(self.nids)):
            #for reg in tqdm(range(self.nids)): #for py3.5
            ii = self.pii[pos[reg]:pos[reg + 1]]
            jj = self.pjj[pos[reg]:pos[reg + 1]]
            i = ii.copy() - self.offsets[reg, 0]
            j = jj.copy() - self.offsets[reg, 1]
            # set up size for fft with padding
            shape = 2 * self.sizes[reg, :] - 1
            fshape = [next_fast_len(int(d)) for d in shape]
            #fslice = tuple([slice(0, int(sz)) for sz in shape])

            submask = np.zeros(self.sizes[reg, :])
            submask[i, j] = 1
            mma1 = np.fft.rfftn(submask, fshape)  #for mask
            #do correlation by ffts
            maskcor = np.fft.irfftn(mma1 * mma1.conj(), fshape)  #[fslice])
            #maskcor = _centered(np.fft.fftshift(maskcor), self.sizes[reg,:]) #make smaller??
            maskcor = _centered(maskcor, self.sizes[reg, :])  #make smaller??
            # choose some small value to threshold
            maskcor *= maskcor > .5
            tmpimg = np.zeros(self.sizes[reg, :])
            tmpimg[i, j] = img1[ii, jj]
            im1 = np.fft.rfftn(tmpimg, fshape)  #image 1
            if self_correlation:
                #ccorr = np.real(np.fft.ifftn(im1 * im1.conj(), fshape)[fslice])
                ccorr = np.fft.irfftn(im1 * im1.conj(), fshape)  #[fslice])
                #ccorr = np.fft.fftshift(ccorr)
                ccorr = _centered(ccorr, self.sizes[reg, :])
            else:
                ndim = img1.ndim
                tmpimg2 = np.zeros_like(tmpimg)
                tmpimg2[i, j] = img2[ii, jj]
                im2 = np.fft.rfftn(tmpimg2, fshape)  #image 2
                ccorr = np.fft.irfftn(im1 * im2.conj(), fshape)  #[fslice])
                #ccorr = _centered(np.fft.fftshift(ccorr), self.sizes[reg,:])
                ccorr = _centered(ccorr, self.sizes[reg, :])
            # now handle the normalizations
            if 'symavg' in normalization:
                mim1 = np.fft.rfftn(tmpimg * submask, fshape)
                Icorr = np.fft.irfftn(mim1 * mma1.conj(), fshape)  #[fslice])
                #Icorr = _centered(np.fft.fftshift(Icorr), self.sizes[reg,:])
                Icorr = _centered(Icorr, self.sizes[reg, :])
                # do symmetric averaging
                if self_correlation:
                    Icorr2 = np.fft.irfftn(mma1 * mim1.conj(),
                                           fshape)  #[fslice])
                    #Icorr2 = _centered(np.fft.fftshift(Icorr2), self.sizes[reg,:])
                    Icorr2 = _centered(Icorr2, self.sizes[reg, :])
                else:
                    mim2 = np.fft.rfftn(tmpimg2 * submask, fshape)
                    Icorr2 = np.fft.irfftn(mma1 * mim2.conj(), fshape)
                    #Icorr2 = _centered(np.fft.fftshift(Icorr2), self.sizes[reg,:])
                    Icorr2 = _centered(Icorr2, self.sizes[reg, :])
                # there is an extra condition that Icorr*Icorr2 != 0
                w = np.where(
                    np.abs(Icorr * Icorr2) > 0)  #DO WE NEED THIS (use i,j).
                ccorr[w] *= maskcor[w] / Icorr[w] / Icorr2[w]
                #print 'size:',tmpimg.shape,Icorr.shape

            if 'regular' in normalization:
                # only run on overlapping regions for correlation
                w = np.where(maskcor > .5)

                if self_correlation:
                    ccorr[w] /= maskcor[w] * np.average(tmpimg[w])**2
                else:
                    ccorr[w] /= maskcor[w] * np.average(
                        tmpimg[w]) * np.average(tmpimg2[w])
            ccorrs.append(ccorr)

        if len(ccorrs) == 1:
            ccorrs = ccorrs[0]

        return ccorrs
示例#38
0
def main():
    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s [%(levelname)s] %(message)s',
                        datefmt='%Y-%m-%d %H:%M:%S')

    logging.info('*** Starting: Compute CC ***')

    # Connection to the DB
    db = connect()

    if len(get_filters(db, all=False)) == 0:
        logging.info("NO FILTERS DEFINED, exiting")
        sys.exit()

    # Get Configuration
    params = Params()
    params.goal_sampling_rate = float(get_config(db, "cc_sampling_rate"))
    params.goal_duration = float(get_config(db, "analysis_duration"))
    params.overlap = float(get_config(db, "overlap"))
    params.maxlag = float(get_config(db, "maxlag"))
    params.min30 = float(get_config(db, "corr_duration")) * params.goal_sampling_rate
    params.windsorizing = float(get_config(db, "windsorizing"))
    params.resampling_method = get_config(db, "resampling_method")
    params.decimation_factor = int(get_config(db, "decimation_factor"))
    params.preprocess_lowpass = float(get_config(db, "preprocess_lowpass"))
    params.preprocess_highpass = float(get_config(db, "preprocess_highpass"))
    params.keep_all = get_config(db, 'keep_all', isbool=True)
    params.keep_days = get_config(db, 'keep_days', isbool=True)
    params.components_to_compute = get_components_to_compute(db)

    params.stack_method = get_config(db, 'stack_method')
    params.pws_timegate = float(get_config(db, 'pws_timegate'))
    params.pws_power = float(get_config(db, 'pws_power'))

    logging.info("Will compute %s" % " ".join(params.components_to_compute))

    while is_next_job(db, jobtype='CC'):
        jobs = get_next_job(db, jobtype='CC')
        stations = []
        pairs = []
        refs = []

        for job in jobs:
            refs.append(job.ref)
            pairs.append(job.pair)
            netsta1, netsta2 = job.pair.split(':')
            stations.append(netsta1)
            stations.append(netsta2)
            goal_day = job.day

        stations = np.unique(stations)

        logging.info("New CC Job: %s (%i pairs with %i stations)" %
                     (goal_day, len(pairs), len(stations)))
        jt = time.time()

        xlen = int(params.goal_duration * params.goal_sampling_rate)

        if ''.join(params.components_to_compute).count('R') > 0 or ''.join(params.components_to_compute).count('T') > 0:
            comps = ['Z', 'E', 'N']
            tramef_Z = np.zeros((len(stations), xlen))
            tramef_E = np.zeros((len(stations), xlen))
            tramef_N = np.zeros((len(stations), xlen))
            basetime, tramef_Z, tramef_E, tramef_N = preprocess(db, stations, comps, goal_day, params, tramef_Z, tramef_E, tramef_N)

        else:
            comps = ['Z']
            tramef_Z = np.zeros((len(stations), xlen))
            basetime, tramef_Z = preprocess(db, stations, comps, goal_day, params, tramef_Z)


        # print '##### STREAMS ARE ALL PREPARED AT goal Hz #####'
        dt = 1. / params.goal_sampling_rate

        begins = []
        ends = []
        i = 0
        while i <=  (params.goal_duration - params.min30/params.goal_sampling_rate):
            begins.append(int(i * params.goal_sampling_rate))
            ends.append(int(i * params.goal_sampling_rate + params.min30))
            i += int(params.min30/params.goal_sampling_rate * (1.0-params.overlap))

        # ITERATING OVER PAIRS #####
        for pair in pairs:
            orig_pair = pair

            logging.info('Processing pair: %s' % pair.replace(':', ' vs '))
            tt = time.time()
            station1, station2 = pair.split(':')
            pair = (np.where(stations == station1)
                    [0][0], np.where(stations == station2)[0][0])

            s1 = get_station(db, station1.split('.')[0], station1.split('.')[1])
            s2 = get_station(db, station2.split('.')[0], station2.split('.')[1])

            if s1.X:
                X0 = s1.X
                Y0 = s1.Y
                c0 = s1.coordinates

                X1 = s2.X
                Y1 = s2.Y
                c1 = s2.coordinates

                if c0 == c1:
                    coordinates = c0
                else:
                    coordinates = 'MIX'

                cplAz = np.deg2rad(azimuth(coordinates, X0, Y0, X1, Y1))
                logging.debug("Azimuth=%.1f"%np.rad2deg(cplAz))
            else:
                # logging.debug('No Coordinates found! Skipping azimuth calculation!')
                cplAz = 0.

            for components in params.components_to_compute:

                if components == "ZZ":
                    t1 = tramef_Z[pair[0]]
                    t2 = tramef_Z[pair[1]]
                elif components[0] == "Z":
                    t1 = tramef_Z[pair[0]]
                    t2 = tramef_E[pair[1]]
                elif components[1] == "Z":
                    t1 = tramef_E[pair[0]]
                    t2 = tramef_Z[pair[1]]
                else:
                    t1 = tramef_E[pair[0]]
                    t2 = tramef_E[pair[1]]
                if np.all(t1 == 0) or np.all(t2 == 0):
                    logging.debug("%s contains empty trace(s), skipping"%components)
                    continue
                del t1, t2

                if components[0] == "Z":
                    t1 = tramef_Z[pair[0]]
                elif components[0] == "R":
                    if cplAz != 0:
                        t1 = tramef_N[pair[0]] * np.cos(cplAz) +\
                             tramef_E[pair[0]] * np.sin(cplAz)
                    else:
                        t1 = tramef_E[pair[0]]

                elif components[0] == "T":
                    if cplAz != 0:
                        t1 = tramef_N[pair[0]] * np.sin(cplAz) -\
                             tramef_E[pair[0]] * np.cos(cplAz)
                    else:
                        t1 = tramef_N[pair[0]]

                if components[1] == "Z":
                    t2 = tramef_Z[pair[1]]
                elif components[1] == "R":
                    if cplAz != 0:
                        t2 = tramef_N[pair[1]] * np.cos(cplAz) +\
                             tramef_E[pair[1]] * np.sin(cplAz)
                    else:
                        t2 = tramef_E[pair[1]]
                elif components[1] == "T":
                    if cplAz != 0:
                        t2 = tramef_N[pair[1]] * np.sin(cplAz) -\
                             tramef_E[pair[1]] * np.cos(cplAz)
                    else:
                        t2 = tramef_N[pair[1]]

                trames = np.vstack((t1, t2))
                del t1, t2

                daycorr = {}
                ndaycorr = {}
                allcorr = {}
                for filterdb in get_filters(db, all=False):
                    filterid = filterdb.ref
                    daycorr[filterid] = np.zeros(get_maxlag_samples(db,))
                    ndaycorr[filterid] = 0

                for islice, (begin, end) in enumerate(zip(begins, ends)):
                    trame2h = trames[:, begin:end]
                    nfft = next_fast_len(int(trame2h.shape[1]))
                    rmsmat = np.std(trame2h, axis=1)
                    for filterdb in get_filters(db, all=False):
                        filterid = filterdb.ref
                        low = float(filterdb.low)
                        high = float(filterdb.high)
                        rms_threshold = filterdb.rms_threshold

                        # Nfft = int(params.min30)
                        # if params.min30 / 2 % 2 != 0:
                        #     Nfft = params.min30 + 2

                        trames2hWb = np.zeros((2, int(nfft)), dtype=np.complex)
                        skip = False
                        for i, station in enumerate(pair):
                            if rmsmat[i] > rms_threshold:
                                cp = cosine_taper(len(trame2h[i]),0.04)
                                trame2h[i] -= trame2h[i].mean()

                                if params.windsorizing == -1:
                                    trame2h[i] = np.sign(trame2h[i])
                                elif params.windsorizing != 0:
                                    indexes = np.where(
                                        np.abs(trame2h[i]) > (params.windsorizing * rmsmat[i]))[0]
                                    # clipping at windsorizing*rms
                                    trame2h[i][indexes] = (trame2h[i][indexes] / np.abs(
                                        trame2h[i][indexes])) * params.windsorizing * rmsmat[i]

                                trames2hWb[i] = whiten(
                                    trame2h[i]*cp, nfft, dt, low, high, plot=False)
                            else:
                                trames2hWb[i] = np.zeros(int(nfft))
                                skip = True
                                logging.debug('Slice RMS is smaller (%e) than rms_threshold (%e)!'
                                              % (rmsmat[i], rms_threshold))
                        if not skip:
                            corr = myCorr(trames2hWb, np.ceil(params.maxlag / dt), plot=False, nfft=nfft)
                            tmptime = time.gmtime(basetime + begin /
                                                  params.goal_sampling_rate)
                            thisdate = time.strftime("%Y-%m-%d", tmptime)
                            thistime = time.strftime("%Y-%m-%d %H:%M:%S",
                                                     tmptime)
                            if params.keep_all or params.keep_days:
                                ccfid = "%s_%s_%s_%s_%s" % (station1, station2,
                                                         filterid, components,
                                                         thisdate)
                                if ccfid not in allcorr:
                                    allcorr[ccfid] = {}
                                allcorr[ccfid][thistime] = corr

                            if params.keep_days:
                                if not np.any(np.isnan(corr)) and \
                                        not np.any(np.isinf(corr)):
                                    daycorr[filterid] += corr
                                    ndaycorr[filterid] += 1

                            del corr, thistime, trames2hWb

                if params.keep_all:
                    for ccfid in allcorr.keys():
                        export_allcorr(db, ccfid, allcorr[ccfid])

                if params.keep_days:
                    for ccfid in allcorr.keys():
                        station1, station2, filterid, components, date = ccfid.split('_')

                        corrs = np.asarray(list(allcorr[ccfid].values()))
                        corr = stack(db, corrs)

                        thisdate = time.strftime(
                                    "%Y-%m-%d", time.gmtime(basetime))
                        thistime = time.strftime(
                                    "%H_%M", time.gmtime(basetime))
                        add_corr(
                                db, station1.replace('.', '_'),
                                station2.replace('.', '_'), int(filterid),
                                thisdate, thistime,  params.min30 /
                                params.goal_sampling_rate,
                                components, corr,
                                params.goal_sampling_rate, day=True,
                                ncorr=corrs.shape[0])
                del trames, daycorr, ndaycorr
            logging.debug("Updating Job")
            update_job(db, goal_day, orig_pair, 'CC', 'D')

            logging.info("Finished processing this pair. It took %.2f seconds" % (time.time() - tt))
        logging.info("Job Finished. It took %.2f seconds" % (time.time() - jt))
    logging.info('*** Finished: Compute CC ***')