Example #1
0
def box_filter(data, smoothat):
    '''
    Smooths data by convolving with a size smoothat box
    provide smoothat in units of frames i.e. samples (not ms or seconds)
    
    Parameters
    ----------
    x : np.array
        One-dimensional numpy array of the signal to be filtred
    window : positive int
        Filtering window length in samples
    mode : string, default 'same'
        If 'same', the returned signal will have the same time-base and
        length as the original signal. if 'valid', edges which do not
        have the full window length will be trimmed
    
    Returns
    -------
    np.array :
        One-dimensional filtered signal
    '''
    N = len(data)
    assert len(data.shape) == 1
    padded = np.zeros(2 * N, dtype=data.dtype)
    padded[N // 2:N // 2 + N] = data
    padded[:N // 2] = data[N // 2:0:-1]
    padded[N // 2 + N:] = data[-1:N // 2 - 1:-1]
    smoothed = fftconvolve(padded, np.ones(smoothat) / float(smoothat), 'same')
    return smoothed[N // 2:N // 2 + N]
Example #2
0
def calcRFI(background, amplitude, fraction, deltat, deltaf, exponent, enhance,
            nf, nt):
    """
    Get time-frequency plane of RFI.
     
    :param background: background level of data per channel
    :param amplitude: maximal amplitude of RFI per channel
    :param fraction: fraction of RFI dominated pixels per channel
    :param deltat: time scale of rfi decay (in units of pixels)
    :param deltaf: frequency scale of rfi decay (in units of pixels)
    :param exponent: exponent of rfi model (either 1 or 2)
    :param enhance: enhancement factor relative to fraction
    :param nf: number of frequency channels
    :param nt: number of time steps
    :returns RFI: time-frequency plane of RFI 
    """
    lgb = np.log(background)
    lgA = np.log(amplitude)
    d = lgA - lgb
    # choose size of kernel such that the rfi is roughly an order of magnitude
    # below the background even for the strongest RFI
    Nk = int(ceil(np.amax(d))) + 3
    t = np.arange(nt)
    if exponent == 1:
        n = d * d * (2. * deltaf * deltat / 3.0)
    elif exponent == 2:
        n = d * (deltaf * deltat * pi * .5)
    else:
        raise ValueError('Exponent must be 1 or 2, not %d' % exponent)
    neff = fraction * enhance * nt / n
    N = np.minimum(random.poisson(neff, nf), nt)
    RFI = np.zeros((nf, nt))
    dt = int(ceil(.5 * deltat))
    # the negative indices really are a hack right now
    neginds = []
    for i in range(nf):
        #         trfi = choice(t, N[i], replace = False)
        trfi = random.permutation(t)[:N[i]]
        #         trfi = randint(0,nt,N[i])
        r = random.rand(N[i])
        tA = np.exp(r * d[i] + lgb[i])
        r = np.where(random.rand(N[i]) > .5, 1, -1)
        sinds = []
        for j in range(dt):
            fac = (-1)**j * (j + 1) * dt
            sinds.append(((trfi + fac * r) % nt))
        neginds.append(concatenate(sinds))
        RFI[i, trfi] = tA
    k = kernel(deltaf, deltat, nf, nt, Nk, exponent)
    RFI = fftconvolve(RFI, k, mode='same')
    #     neginds = np.unique(concatenate(neginds))
    #     RFI[:,neginds] *= -1
    df = int(ceil(deltaf))
    for i, idxs in enumerate(neginds):
        mif = np.maximum(0, i - df)
        maf = np.minimum(nf, i + df)
        RFI[mif:maf, idxs] *= -1
    return RFI
Example #3
0
def calcRFI(background, amplitude, fraction, deltat, deltaf, exponent, enhance,
           nf, nt):
    """
    Get time-frequency plane of RFI.
     
    :param background: background level of data per channel
    :param amplitude: maximal amplitude of RFI per channel
    :param fraction: fraction of RFI dominated pixels per channel
    :param deltat: time scale of rfi decay (in units of pixels)
    :param deltaf: frequency scale of rfi decay (in units of pixels)
    :param exponent: exponent of rfi model (either 1 or 2)
    :param enhance: enhancement factor relative to fraction
    :param nf: number of frequency channels
    :param nt: number of time steps
    :returns RFI: time-frequency plane of RFI 
    """
    lgb = np.log(background)
    lgA = np.log(amplitude)
    d = lgA - lgb
    # choose size of kernel such that the rfi is roughly an order of magnitude
    # below the background even for the strongest RFI
    Nk = int(ceil(np.amax(d))) + 3
    t = np.arange(nt)
    if exponent == 1:
        n = d * d * (2. * deltaf * deltat / 3.0)
    elif exponent == 2:
        n = d * (deltaf * deltat * pi *.5)
    else:
        raise ValueError('Exponent must be 1 or 2, not %d'%exponent)
    neff = fraction * enhance * nt / n
    N = np.minimum(random.poisson(neff, nf), nt)
    RFI = np.zeros((nf,nt))
    dt = int(ceil(.5 * deltat))
    # the negative indices really are a hack right now
    neginds = []
    for i in range(nf):
#         trfi = choice(t, N[i], replace = False)
        trfi = random.permutation(t)[:N[i]]
#         trfi = randint(0,nt,N[i])
        r = random.rand(N[i])
        tA = np.exp(r * d[i] + lgb[i])
        r = np.where(random.rand(N[i]) > .5, 1, -1)
        sinds = []
        for j in range(dt):
            fac = (-1)**j * (j + 1) * dt
            sinds.append(((trfi + fac * r) % nt))
        neginds.append(concatenate(sinds))
        RFI[i,trfi] = tA
    k = kernel(deltaf, deltat, nf, nt, Nk, exponent)
    RFI = fftconvolve(RFI, k, mode = 'same')
#     neginds = np.unique(concatenate(neginds))
#     RFI[:,neginds] *= -1
    df = int(ceil(deltaf))
    for i, idxs in enumerate(neginds):
        mif = np.maximum(0, i-df)
        maf = np.minimum(nf, i+df)
        RFI[mif:maf,idxs] *= -1
    return RFI
Example #4
0
    def flag_glitches(self, signal, flags):

        flags_out = np.zeros_like(flags)

        # Search for outliers in raw samples
        good = flags == 0
        for _ in range(10):
            med = np.median(signal[good])
            dist = np.median((signal[good] - med)**2)**.5
            bad = np.abs(signal - med) > 10 * dist
            bad[np.logical_not(good)] = False
            nbad = np.sum(bad)
            if nbad == 0:
                break
            good[bad] = False
        bad = np.logical_and(flags == 0, np.logical_not(good))
        nbad = np.sum(bad)
        if nbad != 0:
            # extend the flags
            bad = fftconvolve(bad, np.ones(1000), mode='same') >= 1
            flags_out[bad] = True

        # Search for outliers in a median-smoothed signal
        good = flags == 0
        wkernel = 1000
        for _ in range(10):
            fsignal = flagged_running_average(
                signal - med, np.logical_not(good), wkernel) + med
            med = np.median(fsignal[good])
            dist = np.median((fsignal[good] - med)**2)**.5
            bad = np.abs(fsignal - med) > 10 * dist
            bad[np.logical_not(good)] = False
            nbad = np.sum(bad)
            if nbad == 0:
                break
            good[bad] = False
        bad = np.logical_and(flags == 0, np.logical_not(good))
        nbad = np.sum(bad)
        if nbad != 0:
            # extend the flags
            bad = fftconvolve(bad, np.ones(wkernel), mode='same') >= 1
            flags_out[bad] = True
        return flags_out
Example #5
0
def box_filter(data,smoothat):
    '''
    Smooths data by convolving with a size smoothat box
    provide smoothat in units of frames i.e. samples (not ms or seconds)
    '''
    N = len(data)
    assert len(shape(data))==1
    padded = zeros(2*N,dtype=data.dtype)
    padded[N//2:N//2+N]=data
    padded[:N//2]=data[N//2:0:-1]
    padded[N//2+N:]=data[-1:N//2-1:-1]
    smoothed = fftconvolve(padded,ones(smoothat)/float(smoothat),'same')
    return smoothed[N//2:N//2+N]
Example #6
0
def cross_covar(data1, data2, max_lag_samples, normalize_traces):
    
    
    
# remove mean and normalize; this should have no effect on the energy-normalized #correlation result, but may avoid precision issues if trace values are very small
    if normalize_traces:
        scale1 = 1./np.max(np.abs(data1))
        scale2 = 1./np.max(np.abs(data2))
        data1*=scale1
        data2*=scale2
        
    data1-=np.mean(data1)
    data2-=np.mean(data2)
        
    # Make the data more convenient for C function np.correlate
    data1 = np.ascontiguousarray(data1, np.float32)
    data2 = np.ascontiguousarray(data2, np.float32)
    
    # Obtain correlation via np.correlate (relatively fast)
    #ccv = np.correlate(data1,data2,mode='same')
    # scipy.fftconvolve is way faster!
    ccv = fftconvolve(data1,data2[::-1],mode='same')
    if normalize_traces:
        ccv /= (scale1*scale2) 
   
    # Get the signal energy; most people normalize by the square root of that
    ren1 = np.correlate(data1,data1,mode='valid')[0]
    ren2 = np.correlate(data2,data2,mode='valid')[0]
    
    
    # Get the window rms
    rms1 = sqrt(ren1 / len(data1))
    rms2 = sqrt(ren2 / len(data2)) 
    
    # A further parameter to 'see' impulsive events: range of standard deviations
    nsmp = int(len(data1)/4)
    std1 = np.zeros(4)
    std2 = np.zeros(4)
    for i in range(4):
        std1[i] = np.std(data1[i*nsmp:(i+1)*nsmp])
        std2[i] = np.std(data2[i*nsmp:(i+1)*nsmp])

    rng1 = np.max(std1)/np.min(std1)
    rng2 = np.max(std2)/np.min(std2)
    params = (rms1,rms2,ren1,ren2,rng1,rng2)
    
    
    #return ccv[i1:i2], params
    return my_centered(ccv,2*max_lag_samples+1),params
Example #7
0
    def calc_pot(self):
        """Calculate the potential (self.pot) given the convolution kernel
        The potential is just : G * Mass_density * convolution_kernel
        Using Fourier transform.

        """
        if self.verbose:
            print("Calculating the potential ... \n")

        # Initialise array with zeroes
        self.convol = fftconvolve(
            self.kernel, self.discF, mode='same'
        )  # Convolution of the kernel with the weight (mass density)

        ##== Saving this into the discmodel class
        self.pot = -Ggrav.value * self.convol
Example #8
0
def ImageDerivatives2D(i, sigma, type):
    "Image derivatives with sigma"
    (x,y) = np.mgrid[floor(-3*sigma):ceil(3*sigma)+1,floor(-3*sigma):ceil(3*sigma)+1]
    
    if type is 'x':
        kernel = -(x/(2 * pi * sigma**4)) * np.exp(-(x**2 + y**2)/(2 * sigma**2))
    elif type is 'y':
        kernel = -(y/(2 * pi * sigma**4)) * np.exp(-(x**2 + y**2)/(2 * sigma**2))
    elif type is 'xx':
        kernel = 1/(2 * pi * sigma**4) * (x**2 / sigma**2 - 1) * np.exp(-(x**2 + y**2)/(2 * sigma**2))
    elif type is 'yy':
        kernel = 1/(2 * pi * sigma**4) * (y**2 / sigma**2 - 1) * np.exp(-(x**2 + y**2)/(2 * sigma**2))
    elif type in ['xy', 'yx']:
        kernel = 1/(2 * pi * sigma**6) * (x * y) * np.exp(-(x**2 + y**2)/(2 * sigma**2))
    
    return fftconvolve(i, kernel, 'same')
Example #9
0
def autocorrelation(x, lags=None):
    '''
    Computes the normalized autocorrelation over the specified
    time-lags using convolution. Autocorrelation is normalized
    such that the zero-lag autocorrelation is 1.

    TODO, fix: For long
    lags it uses FFT, but has a different normalization from the
    time-domain implementation for short lags. In practice this
    will not matter.
    
    Parameters
    ----------
    x : 1d array
        Data for which to compute autocorrelation function
    lags : int
        Number of time-lags over which to compute the ACF. Default
        is min(200,len(x))

    Returns
    -------
    ndarray
        Autocorrelation function, length 2*lags + 1
    '''
    x = np.array(x)
    x -= np.mean(x)
    N = len(x)
    if lags is None:
        lags = min(200, N)
    # TODO: TUNE THIS CONSTANT
    if lags > 0.5 * np.log2(N):
        # Use FFT for long lags
        result = np.float32(fftconvolve(x, x[::-1], 'same'))
        M = len(result) // 2
        result *= 1. / result[M]
        return result[M - lags:M + lags + 1]
    else:
        # Use time domain for short lags
        result = np.zeros(lags * 2 + 1, 'float')
        zerolag = np.var(x)
        result[lags] = zerolag
        for i in range(1, lags):
            result[i + lags] = result[lags - i] = np.mean(x[i:] * x[:-i])
        result *= 1. / zerolag
        return result
Example #10
0
def autocorrelation(x,lags=None):
    '''
    Computes the normalized autocorrelation over the specified
    time-lags using convolution. Autocorrelation is normalized
    such that the zero-lag autocorrelation is 1.

    This was written in haste and needs some work. For long
    lags it uses FFT, but has a different normalization from the
    time-domain implementation for short lags. In practice this
    will not matter, but formally it's good to be rigorous.

    Parameters
    ----------
    x : 1d array
        Data for which to compute autocorrelation function
    lags : int
        Number of time-lags over which to compute the ACF. Default
        is min(200,len(x))

    Returns
    -------
    ndarray
        Autocorrelation function, length 2*lags + 1
    '''
    x = np.array(x)
    x -= np.mean(x)
    N = len(x)
    if lags is None:
        lags = min(200,N)
    # TODO: TUNE THIS CONSTANT
    if lags>0.5*np.log2(N):
        # Use FFT for long lags
        result = np.float32(fftconvolve(x,x[::-1],'same'))
        M = len(result)//2
        result *= 1./result[M]
        return result[M-lags:M+lags+1]
    else:
        # Use time domain for short lags
        result  = np.zeros(lags*2+1,'float')
        zerolag = np.var(x)
        result[lags] = zerolag
        for i in range(1,lags):
            result[i+lags] = result[lags-i] = np.mean(x[i:]*x[:-i])
        result *= 1./zerolag
        return result
Example #11
0
def cross_covar(data1, data2, max_lag_samples, normalize, params=False):

    #ToDo: deal with params

    # remove mean and normalize; this should have no effect on the energy-normalized
    #correlation result, but may avoid precision issues if trace values are very small
    #if normalize:
    #    scale1 = 1./np.max(np.abs(data1))
    #    scale2 = 1./np.max(np.abs(data2))
    #    data1*=scale1
    #    data2*=scale2

    if len(data1) == 0 or len(data2) == 0:
        return ([], [])

    data1 -= np.mean(data1)
    data2 -= np.mean(data2)

    # Make the data more convenient for C function np.correlate

    data1 = np.ascontiguousarray(data1, np.float32)
    data2 = np.ascontiguousarray(data2, np.float32)

    if params:
        params = get_correlation_params(data1, data2)
        ren1, ren2 = params[2:4]
    else:
        ren1 = np.correlate(data1, data1, mode='valid')[0]
        ren2 = np.correlate(data2, data2, mode='valid')[0]

    if ren1 == 0.0 or ren2 == 0.0 and normalize:
        return ([], [])

    # scipy.fftconvolve is way faster than np.correlate, and zeropads for non-circular convolution
    ccv = fftconvolve(data1[::-1], data2, mode='same')

    #if normalize:
    #    ccv /= (scale1*scale2)

    if normalize:
        ccv /= (sqrt(ren1) * sqrt(ren2))

    return my_centered(ccv, 2 * max_lag_samples + 1), params
Example #12
0
def filter_array(a, window, cyclic = False):
	"""
	Filter an array using the window function.  The transformation is
	done in place.  The data are assumed to be 0 outside of their
	domain of definition.  The window function must have an odd number
	of samples in each dimension;  this is done so that it is always
	clear which sample is at the window's centre, which helps prevent
	phase errors.  If the window function's size exceeds that of the
	data in one or more dimensions, the largest allowed central portion
	of the window function in the affected dimensions will be used.
	This is done silently;  to determine if window function truncation
	will occur, check for yourself that your window function is smaller
	than your data in all dimensions.
	"""
	assert not cyclic	# no longer supported, maybe in future
	# check that the window and the data have the same number of
	# dimensions
	dims = len(a.shape)
	if dims != len(window.shape):
		raise ValueError("array and window dimensions mismatch")
	# check that all of the window's dimensions have an odd size
	if 0 in map((1).__and__, window.shape):
		raise ValueError("window size is not an odd integer in at least 1 dimension")
	# determine how much of the window function can be used
	window_slices = []
	for d in xrange(dims):
		if window.shape[d] > a.shape[d]:
			# largest odd integer <= size of a
			n = ((a.shape[d] + 1) // 2) * 2 - 1
			first = (window.shape[d] - n) // 2
			window_slices.append(slice(first, first + n))
		else:
			window_slices.append(slice(0, window.shape[d]))
	# FIXME:  in numpy >= 1.7.0 there is copyto().  is that better?
	a.flat = signaltools.fftconvolve(a, window[window_slices], mode = "same").flat
	return a
Example #13
0
def fftcorrelatend(template, A):
    """
    Perform a 2D fft correlation using fftconvolve.
    """
    return fftconvolve(rot90(template, 2), A)
Example #14
0
def fftcorrelatend(template, A):
    """
    Perform a 2D fft correlation using fftconvolve.
    """
    return fftconvolve(rot90(template,2), A)