Example #1
0
    def time_step_fftpack(self, dt, Nsteps=1):
        """
        Perform a series of time-steps via the time-dependent Schrodinger
        Equation.

        Parameters
        ----------
        dt : float
            The small time interval over which to integrate
        Nsteps : float, optional
            The number of intervals to compute.  The total change in time at
            the end of this method will be dt * Nsteps (default = 1)
        """
        assert Nsteps >= 0
        self.dt = dt
        if Nsteps > 0:
            self.psi_mod_x *= self.x_evolve_half
            for num_iter in xrange(Nsteps - 1):
                self.psi_mod_k = fftpack.fft(self.psi_mod_x)
                self.psi_mod_k *= self.k_evolve
                self.psi_mod_x = fftpack.ifft(self.psi_mod_k)
                self.psi_mod_x *= self.x_evolve
            self.psi_mod_k = fftpack.fft(self.psi_mod_x)
            self.psi_mod_k *= self.k_evolve
            self.psi_mod_x = fftpack.ifft(self.psi_mod_k)
            self.psi_mod_x *= self.x_evolve_half
            self.psi_mod_k = fftpack.fft(self.psi_mod_x)
            self.psi_mod_x /= self.norm
            self.psi_mod_k = fftpack.fft(self.psi_mod_x)
            self.t += dt * Nsteps
        return None
Example #2
0
def faststcorrelate(
    input1, input2, windowtype="hann", nperseg=32, weighting="None", displayplots=False
):
    """Perform correlation between short-time Fourier transformed arrays."""
    nfft = nperseg
    noverlap = nperseg - 1
    onesided = False
    boundary = "even"
    freqs, times, thestft1 = signal.stft(
        input1,
        fs=1.0,
        window=windowtype,
        nperseg=nperseg,
        noverlap=noverlap,
        nfft=nfft,
        detrend="linear",
        return_onesided=onesided,
        boundary=boundary,
        padded=True,
        axis=-1,
    )

    freqs, times, thestft2 = signal.stft(
        input2,
        fs=1.0,
        window=windowtype,
        nperseg=nperseg,
        noverlap=noverlap,
        nfft=nfft,
        detrend="linear",
        return_onesided=onesided,
        boundary=boundary,
        padded=True,
        axis=-1,
    )

    acorrfft1 = thestft1 * np.conj(thestft1)
    acorrfft2 = thestft2 * np.conj(thestft2)
    acorr1 = np.roll(fftpack.ifft(acorrfft1, axis=0).real, nperseg // 2, axis=0)[nperseg // 2, :]
    acorr2 = np.roll(fftpack.ifft(acorrfft2, axis=0).real, nperseg // 2, axis=0)[nperseg // 2, :]
    normfacs = np.sqrt(acorr1 * acorr2)
    product = thestft1 * np.conj(thestft2)
    stcorr = np.roll(fftpack.ifft(product, axis=0).real, nperseg // 2, axis=0)
    for i in range(len(normfacs)):
        stcorr[:, i] /= normfacs[i]

    timestep = times[1] - times[0]
    corrtimes = np.linspace(
        -timestep * (nperseg // 2), timestep * (nperseg // 2), num=nperseg, endpoint=False,
    )

    return corrtimes, times, stcorr
Example #3
0
def _remove_stripe_based_filtering_sinogram(sinogram, sigma, size):
    """
    Algorithm 2 in the paper. Remove stripes using the filtering technique.
    Angular direction is along the axis 0
    ---------
    Parameters: - sinogram: 2D array.
                - sigma: sigma of the Gaussian window which is used to separate
                        the low-pass and high-pass components of the intensity
                        profiles of each column.
                - size: window size of the median filter.
    ---------
    Return:     - stripe-removed sinogram.
    """
    pad = 150  # To reduce artifacts caused by FFT
    sinogram = np.transpose(sinogram)
    sinogram2 = np.pad(sinogram, ((0, 0), (pad, pad)), mode='reflect')
    (_, ncol) = sinogram2.shape
    window = signal.gaussian(ncol, std=sigma)
    listsign = np.power(-1.0, np.arange(ncol))
    sinosmooth = np.zeros_like(sinogram)
    for i, sinolist in enumerate(sinogram2):
        # sinosmooth[i] = np.real(ifft(fft(sinolist*listsign)*window)*listsign)[pad:ncol-pad]
        sinosmooth[i] = np.real(
            fft_vo.ifft(fft_vo.fft(sinolist * listsign) * window) *
            listsign)[pad:ncol - pad]
    sinosharp = sinogram - sinosmooth
    sinosmooth_cor = median_filter(sinosmooth, (size, 1))
    return np.transpose(sinosmooth_cor + sinosharp)
Example #4
0
def IFT_1D(phi, F, axis=-1):
    """
    Inverse Fourier transform the Faraday dispersion function F(phi)
    to obtain the complex linear polarization spectrum P(lambda^2).
    The function uses the FFT to approximate the inverse continuous
    Fourier transform of a discretely sampled function.
       
    Function returns ls and P, which approximate P(ls).
    
    Parameters
    ----------
    phi : array_like
        regularly sampled array of Faraday depth phi.
        phi is assumed to be regularly spaced, i.e.
        phi = phi0 + Dphi * np.arange(N)
    F   : array_like
        Complex Faraday dispersion function
    axis : int
        axis along which to perform fourier transform.

    Returns
    -------
    ls  : ndarray
        lambda squared of the calculated linear polarization 
        spectrum.
    P   : ndarray
        Complex linear polarization spectrum.
    """

    assert phi.ndim == 1
    assert F.shape[axis] == phi.shape[0]

    N = len(phi)
    if N % 2 != 0:
        raise ValueError("Number of samples must be even")

    phi0 = phi[0]
    Dphi = phi[1] - phi[0]

    ls0 = -0.5 / Dphi
    Dls = 1. / (N * Dphi)
    ls = ls0 + Dls * np.arange(N)

    shape = np.ones(F.ndim, dtype=int)
    shape[axis] = N

    ls_calc = ls.reshape(shape)
    phi_calc = phi.reshape(shape)

    F_prime = F * np.exp(2j * np.pi * ls0 * phi_calc)
    P_prime = fft.ifft(F_prime, axis=axis)
    P = N * Dphi * np.exp(2j * np.pi * phi0 *
                          (ls_calc - ls0)) * P_prime  #/ np.pi
    ls = ls * np.pi

    return ls, P
Example #5
0
def periodogram_mean(func, fe, n_data, f_zero=None):
    """
    Function calculating the theoretical mean of the periodogram (defined as the
    squared modulus of the fft devided by fe*n_data) given the theoretical PSD (func)
    , the sampling frequency fe and the number of points n_data.


    @param func: function of one parameter giving the PSD as a function of frequency
    @type func : function
    @param fe: sampling frequency
    @type fe : scalar (float)
    @param n_data: number of points of the periodogram
    @type n_data : scalar (integer)

    @return:
        P_mean : Periodogram expectation (n_data-vector)
    """

    # 1. Calculation of the autocovariance function Rn
    power = np.int(np.log(n_data) / np.log(2.)) + 4
    # Number of points for the integration
    N_points = 2**power
    # N_points = 3*n_data

    k_points = np.arange(0, N_points)
    frequencies = fe * (k_points / np.float(N_points) - 0.5)

    if f_zero is None:
        f_zero = fe / (N_points * 10.)
    i = np.where(frequencies == 0)
    frequencies[i] = f_zero
    Z = func(frequencies)
    n = np.arange(0, n_data)
    Z_ifft = ifft(Z)
    R = fe / np.float(N_points) * (
        Z[0] * 0.5 * (np.exp(1j * np.pi * n) - np.exp(-1j * np.pi * n)) +
        N_points * Z_ifft[0:n_data] * np.exp(-1j * np.pi * n))
    # 2. Calculation of the of the periodogram mean vector
    X = R[0:n_data] * (1. - np.abs(n) / np.float(n_data))

    return 1. / fe * (fft(X) + n_data * ifft(X) - R[0]), R[0:n_data]
 def process_frames(self, data):
     sinogram = np.copy(data[0])
     ratio = self.parameters['ratio']
     pad = 100
     ncolpad = self.width1 + 2 * pad
     centerc = np.int16(np.ceil((ncolpad - 1) * 0.5))
     ulist = 1.0 * (np.arange(0, ncolpad) - centerc) / ncolpad
     listfactor = 1.0 + ratio * ulist**2
     sinopad = np.pad(sinogram, ((0, 0), (pad, pad)), mode='edge')
     sinophase = np.zeros((self.height1, ncolpad), dtype=np.float32)
     for i in range(0, self.height1):
         sinophase[i] = np.real(fft.ifft(np.fft.ifftshift(
             np.fft.fftshift(fft.fft(sinopad[i])) / listfactor)))
     return sinophase[:, pad:ncolpad - pad]
Example #7
0
 def process_frames(self, data):
     sinogram = np.transpose(np.copy(data[0]))
     sinogram2 = np.pad(sinogram, ((0, 0), (self.pad, self.pad)),
                        mode='reflect')
     size = np.clip(np.int16(self.parameters['size']), 1, self.width1 - 1)
     sinosmooth = np.zeros_like(sinogram)
     for i, sinolist in enumerate(sinogram2):
         sinosmooth[i] = np.real(
             fft.ifft(fft.fft(sinolist * self.listsign) * self.window) *
             self.listsign)[self.pad:self.height1 - self.pad]
     sinosharp = sinogram - sinosmooth
     sinosmooth_cor = np.transpose(
         self.remove_stripe_based_sorting(self.matindex,
                                          np.transpose(sinosmooth), size))
     return np.transpose(sinosmooth_cor + sinosharp)
Example #8
0
 def process_frames(self, data):
     sinogram = np.copy(data[0])
     ratio = self.parameters['ratio']
     pad = 100
     ncolpad = self.width1 + 2 * pad
     centerc = np.int16(np.ceil((ncolpad - 1) * 0.5))
     ulist = 1.0 * (np.arange(0, ncolpad) - centerc) / ncolpad
     listfactor = 1.0 + ratio * ulist**2
     sinopad = np.pad(sinogram, ((0, 0), (pad, pad)), mode='edge')
     sinophase = np.zeros((self.height1, ncolpad), dtype=np.float32)
     for i in range(0, self.height1):
         sinophase[i] = np.real(
             fft.ifft(
                 np.fft.ifftshift(
                     np.fft.fftshift(fft.fft(sinopad[i])) / listfactor)))
     return sinophase[:, pad:ncolpad - pad]
def fir_window_bp(delta, fl, fh):
    """
    Finite impulse response, bandpass.
    This filter doesn't work exactly like the matlab version due to some fourier transform imprecisions.
    Consider replacing the transform calls to the FFTW versions.
    """
    b = firwin(delta.shape[0]+1, (fl*2, fh*2), pass_zero=False)[:-1]
    m = delta.shape[1]
    batches = 20
    batch_size = int(m / batches) + 1
    temp = fft(ifftshift(b))
    out = zeros(delta.shape, dtype=delta.dtype)
    for i in range(batches):
        indexes = (batch_size*i, min((batch_size*(i+1), m)))
        freq = fft(delta[:,indexes[0]:indexes[1]], axis=0)*tile(temp, (delta.shape[2],indexes[1]-indexes[0], 1)).swapaxes(0,2)
        out[:, indexes[0]:indexes[1]] = real(ifft(freq, axis=0))
    return out
Example #10
0
    def apply_IRS(self, data, srate, nbits):
        """ Apply telephone handset BW [300, 3200] Hz """
        raise NotImplementedError('Under construction!')
        from pyfftw.interfaces import scipy_fftpack as fftw
        n = data.shape[0]
        # find next pow of 2 which is greater or eq to n
        pow_of_2 = 2**(np.ceil(np.log2(n)))

        align_filter_dB = np.array([[0, -200], [50, -40], [100,
                                                           -20], [125, -12],
                                    [160, -6], [200, 0], [250, 4], [300, 6],
                                    [350, 8], [400, 10], [500, 11], [600, 12],
                                    [700, 12], [800, 12], [1000, 12],
                                    [1300, 12], [1600, 12], [2000, 12],
                                    [2500, 12], [3000, 12], [3250, 12],
                                    [3500, 4], [4000, -200], [5000, -200],
                                    [6300, -200], [8000, -200]])
        print('align filter dB shape: ', align_filter_dB.shape)
        num_of_points, trivial = align_filter_dB.shape
        overallGainFilter = interp1d(align_filter_dB[:, 0], align_filter[:, 1],
                                     1000)

        x = np.zeros((pow_of_2))
        x[:data.shape[0]] = data

        x_fft = fftw.fft(x, pow_of_2)

        freq_resolution = srate / pow_of_2

        factorDb = interp1d(align_filter_dB[:, 0],
                            align_filter_dB[:, 1],
                                           list(range(0, (pow_of_2 / 2) + 1) *\
                                                freq_resolution)) - \
                                           overallGainFilter
        factor = 10**(factorDb / 20)

        factor = [factor, np.fliplr(factor[1:(pow_of_2 / 2 + 1)])]
        x_fft = x_fft * factor

        y = fftw.ifft(x_fft, pow_of_2)

        data_filtered = y[:n]
        return data_filtered
Example #11
0
def remove_stripe_based_filtering(sinogram, sigma, size, dim=1):
    """
    Remove stripe artifacts in a sinogram using the filtering technique,
    algorithm 2 in Ref. [1]. Angular direction is along the axis 0.

    Parameters
    ----------
    sinogram : array_like
        2D array. Sinogram image
    sigma : int
        Sigma of the Gaussian window used to separate the low-pass and
        high-pass components of the intensity profile of each column.
    size : int
        Window size of the median filter.
    dim : {1, 2}, optional
        Dimension of the window.

    Returns
    -------
    array_like
         2D array. Stripe-removed sinogram.

    References
    ----------
    .. [1] https://doi.org/10.1364/OE.26.028396
    """
    pad = min(150, int(0.1 * sinogram.shape[0]))
    sinogram = np.transpose(sinogram)
    sino_pad = np.pad(sinogram, ((0, 0), (pad, pad)), mode='reflect')
    (_, ncol) = sino_pad.shape
    window = gaussian(ncol, std=sigma)
    list_sign = np.power(-1.0, np.arange(ncol))
    sino_smooth = np.copy(sinogram)
    for i, sino_1d in enumerate(sino_pad):
        sino_smooth[i] = np.real(
            fft.ifft(fft.fft(sino_1d * list_sign) * window) *
            list_sign)[pad:ncol - pad]
    sino_sharp = sinogram - sino_smooth
    if dim == 2:
        sino_smooth_cor = median_filter(sino_smooth, (size, size))
    else:
        sino_smooth_cor = median_filter(sino_smooth, (size, 1))
    return np.transpose(sino_smooth_cor + sino_sharp)
Example #12
0
def spectral_whitening(tr,
                       smooth=None,
                       filter=None,
                       waterlevel=1e-8,
                       mask_again=True):
    """
    Apply spectral whitening to data

    Data is divided by its smoothed (Default: None) amplitude spectrum.

    :param tr: trace to manipulate
    :param smooth: length of smoothing window in Hz
        (default None -> no smoothing)
    :param filter: filter spectrum with bandpass after whitening
        (tuple with min and max frequency)
    :param waterlevel: waterlevel relative to mean of spectrum
    :param mask_again: weather to mask array after this operation again and
        set the corresponding data to 0

    :return: whitened data
    """
    sr = tr.stats.sampling_rate
    data = tr.data
    data = _fill_array(data, fill_value=0)
    mask = np.ma.getmask(data)
    nfft = next_fast_len(len(data))
    spec = fft(data, nfft)
    spec_ampl = np.abs(spec)
    spec_ampl /= np.max(spec_ampl)
    if smooth:
        smooth = int(smooth * nfft / sr)
        spec_ampl = ifftshift(smooth_func(fftshift(spec_ampl), smooth))
    # save guard against division by 0
    spec_ampl[spec_ampl < waterlevel] = waterlevel
    spec /= spec_ampl
    if filter is not None:
        spec *= _filter_resp(*filter, sr=sr, N=len(spec), whole=True)[1]
    ret = np.real(ifft(spec, nfft)[:len(data)])
    if mask_again:
        ret = _fill_array(ret, mask=mask, fill_value=0)
    tr.data = ret
    return tr
Example #13
0
def fourier_operator(f, mode, N, idx=None):

    if (mode == 1):  # Forward operator
        x = f1_linear(fftw.fft(f) / np.sqrt(N))
        if idx is not None:
            out = x[idx]
        else:
            out = x

    else:

        if idx is not None:
            x = np.zeros(N, dtype=np.complex128)
            x[idx] = f
        else:
            x = f

        out = fftw.ifft(f1_linear_inv(x)) * np.sqrt(N)

    return out
def remove_stripe_based_filtering_sorting(sinogram, sigma, size, dim=1):
    """
    Combination of algorithm 2 and algorithm 3 in [1].
    Removing stripes using the filtering and sorting technique.
    Angular direction is along the axis 0.

    Parameters
    ----------
    sinogram : float
        2D array.
    sigma : int
        Sigma of the Gaussian window used to separate the low-pass and
        high-pass components of the intensity profile of each column.
    size : int
        Window size of the median filter.
    dim : {1, 2}, optional
        Dimension of the window.

    Returns
    -------
    float
        2D array. Stripe-removed sinogram.
    """
    pad = 150  # To reduce artifacts caused by FFT
    sinogram = np.transpose(sinogram)
    sinopad = np.pad(sinogram, ((0, 0), (pad, pad)), mode='reflect')
    (_, ncol) = sinopad.shape
    window = gaussian(ncol, std=sigma)
    listsign = np.power(-1.0, np.arange(ncol))
    sinosmooth = np.copy(sinogram)
    for i, sinolist in enumerate(sinopad):
        # sinosmooth[i] = np.real(ifft(fft(
        #     sinolist * listsign) * window) * listsign)[pad:ncol-pad]
        sinosmooth[i] = np.real(
            fft.ifft(fft.fft(sinolist * listsign) * window) *
            listsign)[pad:ncol - pad]
    sinosharp = sinogram - sinosmooth
    sinosmooth_cor = np.transpose(
        remove_stripe_based_sorting(np.transpose(sinosmooth), size, dim))
    return np.transpose(sinosmooth_cor + sinosharp)
Example #15
0
 def apply_filter(self, mat, window, pattern, pad_width):
     (nrow, ncol) = mat.shape
     if pattern == "PROJECTION":
         top_drop = 10  # To remove the time stamp at some data
         mat_pad = np.pad(mat[top_drop:],
                          ((pad_width + top_drop, pad_width),
                           (pad_width, pad_width)),
                          mode="edge")
         win_pad = np.pad(window, pad_width, mode="edge")
         mat_dec = fft.ifft2(
             fft.fft2(-np.log(mat_pad)) / fft.ifftshift(win_pad))
         mat_dec = np.abs(mat_dec[pad_width:pad_width + nrow,
                                  pad_width:pad_width + ncol])
     else:
         mat_pad = np.pad(-np.log(mat), ((0, 0), (pad_width, pad_width)),
                          mode='edge')
         win_pad = np.pad(window, ((0, 0), (pad_width, pad_width)),
                          mode="edge")
         mat_fft = np.fft.fftshift(fft.fft(mat_pad), axes=1) / win_pad
         mat_dec = fft.ifft(np.fft.ifftshift(mat_fft, axes=1))
         mat_dec = np.abs(mat_dec[:, pad_width:pad_width + ncol])
     return np.float32(np.exp(-mat_dec))
    def _applyEulerianVideoMagnification(self):

        timestamp = timeit.default_timer()

        if self._useGrayOverlay:
            smallImage = cv2.cvtColor(self._image,
                                      cv2.COLOR_BGR2GRAY).astype(numpy.float32)
        else:
            smallImage = self._image.astype(numpy.float32)

        # Downsample the image using a pyramid technique.
        i = 0
        while i < self._numPyramidLevels:
            smallImage = cv2.pyrDown(smallImage)
            i += 1
        if self._useLaplacianPyramid:
            smallImage[:] -= \
                cv2.pyrUp(cv2.pyrDown(smallImage))

        historyLength = len(self._historyTimestamps)

        if historyLength < self._maxHistoryLength - 1:

            # Append the new image and timestamp to the
            # history.
            self._history[historyLength] = smallImage
            self._historyTimestamps.append(timestamp)

            # The history is still not full, so wait.
            return

        if historyLength == self._maxHistoryLength - 1:
            # Append the new image and timestamp to the
            # history.
            self._history[historyLength] = smallImage
            self._historyTimestamps.append(timestamp)
        else:
            # Drop the oldest image and timestamp from the
            # history and append the new ones.
            self._history[:-1] = self._history[1:]
            self._historyTimestamps.popleft()
            self._history[-1] = smallImage
            self._historyTimestamps.append(timestamp)

        # The history is full, so process it.

        # Find the average length of time per frame.
        startTime = self._historyTimestamps[0]
        endTime = self._historyTimestamps[-1]
        timeElapsed = endTime - startTime
        timePerFrame = \
                timeElapsed / self._maxHistoryLength
        fps = 1.0 / timePerFrame
        wx.CallAfter(self._fpsStaticText.SetLabel, 'FPS:  %.1f' % fps)

        # Apply the temporal bandpass filter.
        fftResult = fft(self._history, axis=0, threads=self._numFFTThreads)
        frequencies = fftfreq(self._maxHistoryLength, d=timePerFrame)
        lowBound = (numpy.abs(frequencies - self._minHz)).argmin()
        highBound = (numpy.abs(frequencies - self._maxHz)).argmin()
        fftResult[:lowBound] = 0j
        fftResult[highBound:-highBound] = 0j
        fftResult[-lowBound:] = 0j
        ifftResult = ifft(fftResult, axis=0, threads=self._numIFFTThreads)

        # Amplify the result and overlay it on the
        # original image.
        overlay = numpy.real(ifftResult[-1]) * \
                          self._amplification
        i = 0
        while i < self._numPyramidLevels:
            overlay = cv2.pyrUp(overlay)
            i += 1
        if self._useGrayOverlay:
            overlay = cv2.cvtColor(overlay, cv2.COLOR_GRAY2BGR)
        cv2.add(self._image, overlay, self._image, dtype=cv2.CV_8U)
Example #17
0
 def iffty(ar):
     return ifft(ar,axis=1)
Example #18
0
 def compute_x_from_k(self):
     self.psi_mod_x = fftpack.ifft(self.psi_mod_k)
Example #19
0
 def _set_psi_k(self, psi_k):
     assert psi_k.shape == self.x.shape
     self.psi_mod_k = psi_k * np.exp(1j * self.x[0] * self.dk
                                     * np.arange(self.N))
     self.psi_mod_x = fftpack.ifft(self.psi_mod_k)
     self.psi_mod_k = fftpack.fft(self.psi_mod_x)
Example #20
0
def periodogram_mean_masked(func,
                            fe,
                            n_data,
                            n_freq,
                            mask,
                            n_points=None,
                            n_conv=None,
                            normal=True):
    """
    Function calculating the theoretical mean of the periodogram of a masked
    signal (defined as the squared modulus of the fft devided by fe*n_data) 
    given the theoretical PSD (func), the sampling frequency fe and the number
    of points n_data.

    @param func: function of one parameter giving the PSD as a function of
    frequency
    @type func : function
    @param fe: sampling frequency
    @type fe : scalar (float)
    @param n_data: number of points of the periodogram
    @type n_data : scalar (integer)
    @param mask: mask vetor  M[i] = 1 if data is available, 0 otherwise
    @type mask : (n_data x 1) array
    @param n_freq: number of frequency point where to compute the periodogram
    @type n_freq : scalar (integer)

    @return:
        P_mean : Periodogram expectation (n_data-vector)
    """

    if n_points == None:
        # 1. Calculation of the autocovariance function Rn
        power = np.int(np.log(2 * n_data) / np.log(2.))  # + 1
        # Number of points for the integration
        n_points = 2**power

    k_points = np.arange(0, n_points)
    frequencies = fe * (k_points / np.float(n_points) - 0.5)
    i = np.where(frequencies == 0)
    frequencies[i] = fe / (n_points)
    Z = func(frequencies)
    n = np.arange(0, n_data)
    Z_ifft = ifft(Z)
    R = fe / np.float(n_points) * (Z[0] * 0.5 * (np.exp(1j * np.pi * n) \
                                                 - np.exp(-1j * np.pi * n)) + n_points * Z_ifft[0:n_data] * np.exp(
        -1j * np.pi * n))

    if n_conv == None:
        n_conv = 2 * n_data - 1
    # 2. Calculation of the sample autocovariance of the mask
    fx = fft(mask, n_conv)
    # print("FFT of M is done with N_points")
    # fx = fft(M, N_points)

    if normal:
        K2 = np.sum(mask**2)
    else:
        K2 = n_data
    lambda_N = np.real(ifft(fx * np.conj(fx))) / K2

    # 3. Calculation of the of the periodogram mean vector
    X = R[0:n_data] * lambda_N[0:n_data]

    Pm = 1. / fe * (fft(X, n_freq) + n_freq * ifft(X, n_freq) -
                    R[0] * lambda_N[0])

    return Pm
Example #21
0
def stransform(h, Fs):
    '''
    Compute S-Transform without for loops

    Converted from MATLAB code written by Kalyan S. Dash

    Converted by Geoffrey Barrett, CUMC

    h - an 1xN vector representing timeseries data, units will most likely by uV

    returns the stockwell transform, representing the values of all frequencies from 0-> Fs/2 (nyquist) for each time
    '''

    h = np.asarray(h, dtype=float)

    # scipy.io.savemat('stransform_numpy.mat', {'h': h})

    h = h.reshape((1, len(h)))  # uV

    n = h.shape[1]

    num_voices = int(Fs / 2)
    '''
    if n is None:
        n = h.shape[1]

    print(n)
    '''

    # n_half = num_voices

    n_half = np.fix(n / 2)

    n_half = int(n_half)

    odd_n = 1

    if n_half * 2 == n:
        odd_n = 0

    f = np.concatenate((np.arange(n_half + 1), np.arange(
        -n_half + 1 - odd_n,
        0))) / n  # array that goes 0-> 0.5 and then -0.5 -> 0 [2*n_half,]

    Hft = fftw.fft(h, axis=1)  # uV, [1xn]

    Hft = conj_nonzeros(Hft)

    # compute all frequency domain Guassians as one matrix

    invfk = np.divide(1, f[1:n_half +
                           1])  # matrix of inverse frequencies in Hz, [n_half]

    invfk = invfk.reshape((len(invfk), 1))

    W = np.multiply(
        2 * np.pi * np.tile(f, (n_half, 1)),  # [n_half, f]
        np.tile(invfk.reshape((len(invfk), 1)), (1, n)),  # [n_half(invfk) x n]
    )  # n_half x len(f)

    G = np.exp((-W**2) / 2)  # Gaussian in freq domain
    G = np.asarray(G, dtype=np.complex)  # n_half x len(f)

    # Compute Toeplitz matrix with the shifted fft(h)

    HW = scipy.linalg.toeplitz(Hft[0, :n_half + 1].T,
                               np.conj(Hft))  # n_half + 1 x len(h)
    # HW = scipy.linalg.toeplitz(Hft[0,:n_half+1].T, Hft)

    # exclude the first row, corresponding to zero frequency

    HW = HW[1:n_half + 1, :]  # n_half x len(h)

    # compute the stockwell transform

    cwt = np.multiply(HW, G)

    ST = fftw.ifft(cwt, axis=-1)  # compute voices

    # add the zero freq row

    # print(np.mean(h, axis=1))

    st0 = np.multiply(np.mean(h, axis=1), np.ones((1, n)))

    ST = np.vstack((st0, ST))

    return ST
Example #22
0
def fold(fh, comm, samplerate, fedge, fedge_at_top, nchan,
         nt, ntint, ngate, ntbin, ntw, dm, fref, phasepol,
         dedisperse='incoherent',
         do_waterfall=True, do_foldspec=True, verbose=True,
         progress_interval=100, rfi_filter_raw=None, rfi_filter_power=None,
         return_fits=False):
    """
    FFT data, fold by phase/time and make a waterfall series

    Folding is done from the position the file is currently in

    Parameters
    ----------
    fh : file handle
        handle to file holding voltage timeseries
    comm: MPI communicator or None
        will use size, rank attributes
    samplerate : Quantity
        rate at which samples were originally taken and thus double the
        band width (frequency units)
    fedge : float
        edge of the frequency band (frequency units)
    fedge_at_top: bool
        whether edge is at top (True) or bottom (False)
    nchan : int
        number of frequency channels for FFT
    nt, ntint : int
        total number nt of sets, each containing ntint samples in each file
        hence, total # of samples is nt*ntint, with each sample containing
        a single polarisation
    ngate, ntbin : int
        number of phase and time bins to use for folded spectrum
        ntbin should be an integer fraction of nt
    ntw : int
        number of time samples to combine for waterfall (does not have to be
        integer fraction of nt)
    dm : float
        dispersion measure of pulsar, used to correct for ism delay
        (column number density)
    fref: float
        reference frequency for dispersion measure
    phasepol : callable
        function that returns the pulsar phase for time in seconds relative to
        start of the file that is read.
    dedisperse : None or string (default: incoherent).
        None, 'incoherent', 'coherent', 'by-channel'.
        Note: None really does nothing
    do_waterfall, do_foldspec : bool
        whether to construct waterfall, folded spectrum (default: True)
    verbose : bool or int
        whether to give some progress information (default: True)
    progress_interval : int
        Ping every progress_interval sets
    return_fits : bool (default: False)
        return a subint fits table for rank == 0 (None otherwise)

    """
    assert dedisperse in (None, 'incoherent', 'by-channel', 'coherent')
    need_fine_channels = dedisperse in ['by-channel', 'coherent']
    assert nchan % fh.nchan == 0
    if dedisperse == 'by-channel' and fh.nchan > 1:
        oversample = nchan // fh.nchan
        assert ntint % oversample == 0
    else:
        oversample = 1

    if dedisperse == 'coherent' and fh.nchan > 1:
        warnings.warn("Doing coherent dedispersion on channelized data. "
                      "May get artefacts!")

    if comm is None:
        mpi_rank = 0
        mpi_size = 1
    else:
        mpi_rank = comm.rank
        mpi_size = comm.size

    npol = getattr(fh, 'npol', 1)
    assert npol == 1 or npol == 2
    if verbose > 1 and mpi_rank == 0:
        print("Number of polarisations={}".format(npol))

    # initialize folded spectrum and waterfall
    # TODO: use estimated number of points to set dtype
    if do_foldspec:
        foldspec = np.zeros((ntbin, nchan, ngate, npol**2), dtype=np.float32)
        icount = np.zeros((ntbin, nchan, ngate), dtype=np.int32)
    else:
        foldspec = None
        icount = None

    if do_waterfall:
        nwsize = nt*ntint//ntw//oversample
        waterfall = np.zeros((nwsize, nchan, npol**2), dtype=np.float64)
    else:
        waterfall = None

    if verbose and mpi_rank == 0:
        print('Reading from {}'.format(fh))

    nskip = fh.tell()/fh.blocksize
    if nskip > 0:
        if verbose and mpi_rank == 0:
            print('Starting {0} blocks = {1} bytes out from start.'
                  .format(nskip, nskip*fh.blocksize))

    dt1 = (1./samplerate).to(u.s)
    # need 2*nchan real-valued samples for each FFT
    if fh.telescope == 'lofar':
        dtsample = fh.dtsample
    else:
        dtsample = nchan // oversample * 2 * dt1
    tstart = dtsample * ntint * nskip

    # pre-calculate time delay due to dispersion in coarse channels
    # for channelized data, frequencies are known

    tb = -1. if fedge_at_top else +1.
    if fh.nchan == 1:
        if getattr(fh, 'data_is_complex', False):
            # for complex data, really each complex sample consists of
            # 2 real ones, so multiply dt1 by 2.
            freq = fedge + tb * fftfreq(nchan, 2.*dt1.value) * u.Hz
            if dedisperse == 'coherent':
                fcoh = fedge + tb * fftfreq(nchan*ntint, 2.*dt1.value) * u.Hz
                fcoh.shape = (-1, 1)
            elif dedisperse == 'by-channel':
                fcoh = freq + (tb * fftfreq(
                    ntint, 2.*dtsample.value) * u.Hz)[:, np.newaxis]
        else:
            freq = fedge + tb * rfftfreq(nchan*2, dt1.value)[::2] * u.Hz
            if dedisperse == 'coherent':
                fcoh = fedge + tb * rfftfreq(nchan*ntint*2,
                                             dt1.value)[::2] * u.Hz
                fcoh.shape = (-1, 1)
            elif dedisperse == 'by-channel':
                fcoh = freq + tb * fftfreq(
                    ntint, dtsample.value)[:, np.newaxis] * u.Hz
        freq_in = freq
    else:
        # input frequencies may not be the ones going out
        freq_in = fh.frequencies
        if oversample == 1:
            freq = freq_in
        else:
            freq = (freq_in[:, np.newaxis] + tb * u.Hz *
                    rfftfreq(oversample*2, dtsample.value/2.)[::2])
        # same as fine = rfftfreq(2*ntint, dtsample.value/2.)[::2]
        fcoh = freq_in[np.newaxis, :] + tb * u.Hz * rfftfreq(
            ntint*2, dtsample.value/2.)[::2, np.newaxis]
        # print('fedge_at_top={0}, tb={1}'.format(fedge_at_top, tb))
    ifreq = freq.ravel().argsort()

    # pre-calculate time offsets in (input) channelized streams
    dt = dispersion_delay_constant * dm * (1./freq_in**2 - 1./fref**2)

    if need_fine_channels:
        # pre-calculate required turns due to dispersion.
        #
        # set frequency relative to which dispersion is coherently corrected
        if dedisperse == 'coherent':
            _fref = fref
        else:
            _fref = freq_in[np.newaxis, :]
        # (check via eq. 5.21 and following in
        # Lorimer & Kramer, Handbook of Pulsar Astronomy
        dang = (dispersion_delay_constant * dm * fcoh *
                (1./_fref-1./fcoh)**2) * u.cycle
        with u.set_enabled_equivalencies(u.dimensionless_angles()):
            dd_coh = np.exp(dang * 1j).conj().astype(np.complex64)

        # add dimension for polarisation
        dd_coh = dd_coh[..., np.newaxis]

    # Calculate the part of the whole file this node should handle.
    size_per_node = (nt-1)//mpi_size + 1
    start_block = mpi_rank*size_per_node
    end_block = min((mpi_rank+1)*size_per_node, nt)
    for j in range(start_block, end_block):
        if verbose and j % progress_interval == 0:
            print('#{:4d}/{:4d} is doing {:6d}/{:6d} [={:6d}/{:6d}]; '
                  'time={:18.12f}'
                  .format(mpi_rank, mpi_size, j+1, nt,
                          j-start_block+1, end_block-start_block,
                          (tstart+dtsample*j*ntint).value))  # time since start

        # Just in case numbers were set wrong -- break if file ends;
        # better keep at least the work done.
        try:
            raw = fh.seek_record_read(int((nskip+j)*fh.blocksize),
                                      fh.blocksize)
        except(EOFError, IOError) as exc:
            print("Hit {0!r}; writing data collected.".format(exc))
            break
        if verbose >= 2:
            print("#{:4d}/{:4d} read {} items"
                  .format(mpi_rank, mpi_size, raw.size), end="")

        if npol == 2:  # multiple polarisations
            raw = raw.view(raw.dtype.fields.values()[0][0])

        if fh.nchan == 1:  # raw.shape=(ntint*npol)
            raw = raw.reshape(-1, npol)
        else:              # raw.shape=(ntint, nchan*npol)
            raw = raw.reshape(-1, fh.nchan, npol)

        if rfi_filter_raw is not None:
            raw, ok = rfi_filter_raw(raw)
            if verbose >= 2:
                print("... raw RFI (zap {0}/{1})"
                      .format(np.count_nonzero(~ok), ok.size), end="")

        if np.can_cast(raw.dtype, np.float32):
            vals = raw.astype(np.float32)
        else:
            assert raw.dtype.kind == 'c'
            vals = raw

        if fh.nchan == 1:
            # have real-valued time stream of complex baseband
            # if we need some coherentdedispersion, do FT of whole thing,
            # otherwise to output channels
            if raw.dtype.kind == 'c':
                ftchan = len(vals) if dedisperse == 'coherent' else nchan
                vals = fft(vals.reshape(-1, ftchan, npol), axis=1,
                           overwrite_x=True, **_fftargs)
            else:  # real data
                ftchan = len(vals) // 2 if dedisperse == 'coherent' else nchan
                vals = rfft(vals.reshape(-1, ftchan*2, npol), axis=1,
                            overwrite_x=True, **_fftargs)
                if vals.dtype.kind == 'f':  # this depends on version, sigh.
                    # rfft: Re[0], Re[1], Im[1],.,Re[n/2-1], Im[n/2-1], Re[n/2]
                    # re-order to normal fft format (like Numerical Recipes):
                    # Re[0], Re[n], Re[1], Im[1], .... (channel 0 junk anyway)
                    vals = (np.hstack((vals[:, :1], vals[:, -1:],
                                       vals[:, 1:-1]))
                            .reshape(-1, ftchan, 2 * npol))
                    if npol == 2:  # reorder pol & real/imag
                        vals1 = vals[:, :, 1]
                        vals[:, :, 1] = vals[:, :, 2]
                        vals[:, :, 2] = vals1
                        vals = vals.reshape(-1, ftchan, npol, 2)
                else:
                    vals[:, 0] = vals[:, 0].real + 1j * vals[:, -1].real
                    vals = vals[:, :-1]

                vals = vals.view(np.complex64).reshape(-1, ftchan, npol)

            # for incoherent,            vals.shape=(ntint, nchan, npol)
            # for others, (1, ntint*nchan, npol) -> (ntint*nchan, 1, npol)
            if need_fine_channels:
                if dedisperse == 'by-channel':
                    fine = fft(vals, axis=0, overwrite_x=True, **_fftargs)
                else:
                    fine = vals.reshape(-1, 1, npol)

        else:  # data already channelized
            if need_fine_channels:
                fine = fft(vals, axis=0, overwrite_x=True, **_fftargs)
                # have fine.shape=(ntint, fh.nchan, npol)

        if need_fine_channels:
            # Dedisperse.
            fine *= dd_coh

            # if dedisperse == 'by-channel' and oversample > 1:
                # fine.shape=(ntint*oversample, chan_in, npol)
                #           =(coarse,fine,fh.chan, npol)
                #  -> reshape(oversample, ntint, fh.nchan, npol)
                # want (ntint=fine, fh.nchan, oversample, npol) -> .transpose
                # fine = (fine.reshape(nchan / fh.nchan, -1, fh.nchan, npol)
                #         .transpose(1, 2, 0, 3)
                #         .reshape(-1, nchan, npol))

            # now fine.shape=(ntint, nchan, npol)  w/ nchan=1 for coherent
            vals = ifft(fine, axis=0, overwrite_x=True, **_fftargs)

            if dedisperse == 'coherent' and nchan > 1 and fh.nchan == 1:
                # final FT to get requested channels
                vals = vals.reshape(-1, nchan, npol)
                vals = fft(vals, axis=1, overwrite_x=True, **_fftargs)
            elif dedisperse == 'by-channel' and oversample > 1:
                vals = vals.reshape(-1, oversample, fh.nchan, npol)
                vals = fft(vals, axis=1, overwrite_x=True, **_fftargs)
                vals = vals.transpose(0, 2, 1, 3).reshape(-1, nchan, npol)

            # vals[time, chan, pol]
            if verbose >= 2:
                print("... dedispersed", end="")

        if npol == 1:
            power = vals.real**2 + vals.imag**2
        else:
            p0 = vals[..., 0]
            p1 = vals[..., 1]
            power = np.empty(vals.shape[:-1] + (4,), np.float32)
            power[..., 0] = p0.real**2 + p0.imag**2
            power[..., 1] = p0.real*p1.real + p0.imag*p1.imag
            power[..., 2] = p0.imag*p1.real - p0.real*p1.imag
            power[..., 3] = p1.real**2 + p1.imag**2

        if verbose >= 2:
            print("... power", end="")

        # current sample positions and corresponding time in stream
        isr = j*(ntint // oversample) + np.arange(ntint // oversample)
        tsr = (isr*dtsample*oversample)[:, np.newaxis]

        if rfi_filter_power is not None:
            power = rfi_filter_power(power, tsr.squeeze())
            print("... power RFI", end="")

        # correct for delay if needed
        if dedisperse in ['incoherent', 'by-channel']:
            # tsample.shape=(ntint/oversample, nchan_in)
            tsr = tsr - dt

        if do_waterfall:
            # # loop over corresponding positions in waterfall
            # for iw in xrange(isr[0]//ntw, isr[-1]//ntw + 1):
            #     if iw < nwsize:  # add sum of corresponding samples
            #         waterfall[iw, :] += np.sum(power[isr//ntw == iw],
            #                                    axis=0)[ifreq]
            iw = np.round((tsr / dtsample / oversample).to(1)
                          .value / ntw).astype(int)
            for k, kfreq in enumerate(ifreq):  # sort in frequency while at it
                iwk = iw[:, (0 if iw.shape[1] == 1 else kfreq // oversample)]
                iwk = np.clip(iwk, 0, nwsize-1, out=iwk)
                iwkmin = iwk.min()
                iwkmax = iwk.max()+1
                for ipow in range(npol**2):
                    waterfall[iwkmin:iwkmax, k, ipow] += np.bincount(
                        iwk-iwkmin, power[:, kfreq, ipow], iwkmax-iwkmin)
            if verbose >= 2:
                print("... waterfall", end="")

        if do_foldspec:
            ibin = (j*ntbin) // nt  # bin in the time series: 0..ntbin-1

            # times and cycles since start time of observation.
            tsample = tstart + tsr
            phase = (phasepol(tsample.to(u.s).value.ravel())
                     .reshape(tsample.shape))
            # corresponding PSR phases
            iphase = np.remainder(phase*ngate, ngate).astype(np.int)

            for k, kfreq in enumerate(ifreq):  # sort in frequency while at it
                iph = iphase[:, (0 if iphase.shape[1] == 1
                                 else kfreq // oversample)]
                # sum and count samples by phase bin
                for ipow in range(npol**2):
                    foldspec[ibin, k, :, ipow] += np.bincount(
                        iph, power[:, kfreq, ipow], ngate)
                icount[ibin, k, :] += np.bincount(
                    iph, power[:, kfreq, 0] != 0., ngate)

            if verbose >= 2:
                print("... folded", end="")

        if verbose >= 2:
            print("... done")

    #Commented out as workaround, this was causing "Referenced before assignment" errors with JB data
    #if verbose >= 2 or verbose and mpi_rank == 0:
    #    print('#{:4d}/{:4d} read {:6d} out of {:6d}'
    #          .format(mpi_rank, mpi_size, j+1, nt))

    if npol == 1:
        if do_foldspec:
            foldspec = foldspec.reshape(foldspec.shape[:-1])
        if do_waterfall:
            waterfall = waterfall.reshape(waterfall.shape[:-1])

    return foldspec, icount, waterfall
Example #23
0
def fold(file1, samplerate, fmid, nchan,
         nt, ntint, nhead, ngate, ntbin, ntw, dm, fref, phasepol,
         coherent=False, do_waterfall=True, do_foldspec=True, verbose=True,
         progress_interval=100):
    """FFT Effelsberg data, fold by phase/time and make a waterfall series

    Parameters
    ----------
    file1 : string
        name of the file holding voltage timeseries
    samplerate : float
        rate at which samples were originally taken and thus band width
        (frequency units))
    fmid : float
        mid point of the frequency band (frequency units)
    nchan : int
        number of frequency channels for FFT
    nt, ntint : int
        total number nt of sets, each containing ntint samples in each file
        hence, total # of samples is nt*(2*ntint), with each sample containing
        real,imag for two polarisations
    nhead : int
        number of bytes to skip before reading (usually 4096 for Effelsberg)
    ngate, ntbin : int
        number of phase and time bins to use for folded spectrum
        ntbin should be an integer fraction of nt
    ntw : int
        number of time samples to combine for waterfall (does not have to be
        integer fraction of nt)
    dm : float
        dispersion measure of pulsar, used to correct for ism delay
        (column number density)
    fref: float
        reference frequency for dispersion measure
    phasepol : callable
        function that returns the pulsar phase for time in seconds relative to
        start of part of the file that is read (i.e., ignoring nhead)
    do_waterfall, do_foldspec : bool
        whether to construct waterfall, folded spectrum (default: True)
    verbose : bool
        whether to give some progress information (default: True)
    progress_interval : int
        Ping every progress_interval sets
    """

    # initialize folded spectrum and waterfall
    foldspec2 = np.zeros((nchan, ngate, ntbin))
    nwsize = nt*ntint//ntw
    waterfall = np.zeros((nchan, nwsize))

    # size in bytes of records read from file (each nchan contains 4 bytes:
    # real,imag for 2 polarisations).
    recsize = 4*nchan*ntint
    if verbose:
        print('Reading from {}'.format(file1))

    myopen = gzip.open if '.gz' in file1 else open
    with myopen(file1, 'rb', recsize) as fh1:

        if nhead > 0:
            if verbose:
                print('Skipping {0} bytes'.format(nhead))
            fh1.seek(nhead)

        foldspec = np.zeros((nchan, ngate))
        icount = np.zeros((nchan, ngate))

        # gosh, fftpack has everything; used to calculate with:
        # fband / nchan * (np.mod(np.arange(nchan)+nchan/2, nchan)-nchan/2)
        if coherent:
            # pre-calculate required turns due to dispersion
            fcoh = (fmid +
                    fftfreq(nchan*ntint, (1./samplerate).to(u.s).value) * u.Hz)
            # (check via eq. 5.21 and following in
            # Lorimer & Kramer, Handbook of Pulsar Astrono
            dang = (dispersion_delay_constant * dm * fcoh *
                    (1./fref-1./fcoh)**2) * 360. * u.deg
            dedisperse = np.exp(dang.to(u.rad).value * 1j
                                ).conj().astype(np.complex64)
        else:
            # pre-calculate time delay due to dispersion
            freq = fmid + fftfreq(nchan, (1./samplerate).to(u.s).value) * u.Hz
            dt = (dispersion_delay_constant * dm *
                  (1./freq**2 - 1./fref**2)).to(u.s).value

        dtsample = (nchan/samplerate).to(u.s).value

        for j in xrange(nt):
            if verbose and (j+1) % progress_interval == 0:
                print('Doing {:6d}/{:6d}; time={:18.12f}'.format(
                    j+1, nt, dtsample*j*ntint))   # equivalent time since start

            # just in case numbers were set wrong -- break if file ends
            # better keep at least the work done
            try:
                # data stored as series of two two-byte complex numbers,
                # one for each polarization
                raw = np.fromstring(fh1.read(recsize),
                                    dtype=np.int8).reshape(-1,2,2)
            except:
                break

            # use view for fast conversion from float to complex
            vals = raw.astype(np.float32).view(np.complex64).squeeze()
            # vals[i_int * i_block, i_pol]
            if coherent:
                fine = fft(vals, axis=0, overwrite_x=True, **_fftargs)
                fine *= dedisperse[:,np.newaxis]
                vals = ifft(fine, axis=0, overwrite_x=True, **_fftargs)

            chan = fft(vals.reshape(-1, nchan, 2), axis=1, overwrite_x=True,
                       **_fftargs)
            # chan[i_int, i_block, i_pol]
            power = np.sum(chan.real**2+chan.imag**2, axis=-1)

            # current sample positions in stream
            isr = j*ntint + np.arange(ntint)

            if do_waterfall:
                # loop over corresponding positions in waterfall
                for iw in xrange(isr[0]//ntw, isr[-1]//ntw + 1):
                    if iw < nwsize:  # add sum of corresponding samples
                        waterfall[:,iw] += np.sum(power[isr//ntw == iw],
                                                  axis=0)

            if do_foldspec:
                tsample = dtsample*isr  # times since start

                for k in xrange(nchan):
                    if coherent:
                        t = tsample  # already dedispersed
                    else:
                        t = tsample - dt[k]  # dedispersed times
                    phase = phasepol(t)  # corresponding PSR phases
                    iphase = np.remainder(phase*ngate,
                                          ngate).astype(np.int)
                    # sum and count samples by phase bin
                    foldspec[k] += np.bincount(iphase, power[:,k], ngate)
                    icount[k] += np.bincount(iphase, None, ngate)

                ibin = j*ntbin//nt  # bin in the time series: 0..ntbin-1
                if (j+1)*ntbin//nt > ibin:  # last addition to bin?
                    # get normalised flux in each bin (where any were added)
                    nonzero = icount > 0
                    nfoldspec = np.where(nonzero, foldspec/icount, 0.)
                    # subtract phase average and store
                    nfoldspec -= np.where(nonzero,
                                          np.sum(nfoldspec, 1, keepdims=True) /
                                          np.sum(nonzero, 1, keepdims=True), 0)
                    foldspec2[:,:,ibin] = nfoldspec
                    # reset for next iteration
                    foldspec *= 0
                    icount *= 0

    if verbose:
        print('read {0:6d} out of {1:6d}'.format(j+1, nt))

    if do_foldspec:
        # swap two halfs in frequency, so that freq increases monotonically
        foldspec2 = fftshift(foldspec2, axes=0)

    if do_waterfall:
        nonzero = waterfall == 0.
        waterfall -= np.where(nonzero,
                              np.sum(waterfall, 1, keepdims=True) /
                              np.sum(nonzero, 1, keepdims=True), 0.)
        # swap two halfs in frequency, so that freq increases monotonically
        waterfall = fftshift(waterfall, axes=0)

    return foldspec2, waterfall
Example #24
0
def fold(fh,
         comm,
         samplerate,
         fedge,
         fedge_at_top,
         nchan,
         nt,
         ntint,
         ngate,
         ntbin,
         ntw,
         dm,
         fref,
         phasepol,
         dedisperse='incoherent',
         do_waterfall=True,
         do_foldspec=True,
         verbose=True,
         progress_interval=100,
         rfi_filter_raw=None,
         rfi_filter_power=None,
         return_fits=False):
    """
    FFT data, fold by phase/time and make a waterfall series

    Folding is done from the position the file is currently in

    Parameters
    ----------
    fh : file handle
        handle to file holding voltage timeseries
    comm: MPI communicator or None
        will use size, rank attributes
    samplerate : Quantity
        rate at which samples were originally taken and thus double the
        band width (frequency units)
    fedge : float
        edge of the frequency band (frequency units)
    fedge_at_top: bool
        whether edge is at top (True) or bottom (False)
    nchan : int
        number of frequency channels for FFT
    nt, ntint : int
        total number nt of sets, each containing ntint samples in each file
        hence, total # of samples is nt*ntint, with each sample containing
        a single polarisation
    ngate, ntbin : int
        number of phase and time bins to use for folded spectrum
        ntbin should be an integer fraction of nt
    ntw : int
        number of time samples to combine for waterfall (does not have to be
        integer fraction of nt)
    dm : float
        dispersion measure of pulsar, used to correct for ism delay
        (column number density)
    fref: float
        reference frequency for dispersion measure
    phasepol : callable
        function that returns the pulsar phase for time in seconds relative to
        start of the file that is read.
    dedisperse : None or string (default: incoherent).
        None, 'incoherent', 'coherent', 'by-channel'.
        Note: None really does nothing
    do_waterfall, do_foldspec : bool
        whether to construct waterfall, folded spectrum (default: True)
    verbose : bool or int
        whether to give some progress information (default: True)
    progress_interval : int
        Ping every progress_interval sets
    return_fits : bool (default: False)
        return a subint fits table for rank == 0 (None otherwise)

    """
    assert dedisperse in (None, 'incoherent', 'by-channel', 'coherent')
    assert nchan % fh.nchan == 0
    if dedisperse == 'by-channel':
        oversample = nchan // fh.nchan
        assert ntint % oversample == 0
    else:
        oversample = 1

    if dedisperse == 'coherent' and fh.nchan > 1:
        raise ValueError("For coherent dedispersion, data must be "
                         "unchannelized before folding.")

    if comm is None:
        mpi_rank = 0
        mpi_size = 1
    else:
        mpi_rank = comm.rank
        mpi_size = comm.size

    npol = getattr(fh, 'npol', 1)
    assert npol == 1 or npol == 2
    if verbose > 1 and mpi_rank == 0:
        print("Number of polarisations={}".format(npol))

    # initialize folded spectrum and waterfall
    # TODO: use estimated number of points to set dtype
    if do_foldspec:
        foldspec = np.zeros((ntbin, nchan, ngate, npol**2), dtype=np.float32)
        icount = np.zeros((ntbin, nchan, ngate), dtype=np.int32)
    else:
        foldspec = None
        icount = None

    if do_waterfall:
        nwsize = nt * ntint // ntw
        waterfall = np.zeros((nwsize, nchan, npol**2), dtype=np.float64)
    else:
        waterfall = None

    if verbose and mpi_rank == 0:
        print('Reading from {}'.format(fh))

    nskip = fh.tell() / fh.blocksize
    if nskip > 0:
        if verbose and mpi_rank == 0:
            print('Starting {0} blocks = {1} bytes out from start.'.format(
                nskip, nskip * fh.blocksize))

    dt1 = (1. / samplerate).to(u.s)
    # need 2*nchan real-valued samples for each FFT
    if fh.telescope == 'lofar':
        dtsample = fh.dtsample
    else:
        dtsample = nchan // oversample * 2 * dt1
    tstart = dtsample * ntint * nskip

    # pre-calculate time delay due to dispersion in coarse channels
    # for channelized data, frequencies are known

    if fh.nchan == 1:
        if getattr(fh, 'data_is_complex', False):
            # for complex data, really each complex sample consists of
            # 2 real ones, so multiply dt1 by 2.
            if fedge_at_top:
                freq = fedge - fftfreq(nchan, 2. * dt1.value) * u.Hz
            else:
                freq = fedge + fftfreq(nchan, 2. * dt1.value) * u.Hz
        else:
            if fedge_at_top:
                freq = fedge - rfftfreq(nchan * 2, dt1.value)[::2] * u.Hz
            else:
                freq = fedge + rfftfreq(nchan * 2, dt1.value)[::2] * u.Hz
        freq_in = freq
    else:
        # input frequencies may not be the ones going out
        freq_in = fh.frequencies
        if oversample == 1:
            freq = freq_in
        else:
            if fedge_at_top:
                freq = (freq_in[:, np.newaxis] -
                        u.Hz * fftfreq(oversample, dtsample.value))
            else:
                freq = (freq_in[:, np.newaxis] +
                        u.Hz * fftfreq(oversample, dtsample.value))
    ifreq = freq.ravel().argsort()

    # pre-calculate time offsets in (input) channelized streams
    dt = dispersion_delay_constant * dm * (1. / freq_in**2 - 1. / fref**2)

    if dedisperse in ['coherent', 'by-channel']:
        # pre-calculate required turns due to dispersion
        if fedge_at_top:
            fcoh = (freq_in[np.newaxis, :] -
                    u.Hz * fftfreq(ntint, dtsample.value)[:, np.newaxis])
        else:
            fcoh = (freq_in[np.newaxis, :] +
                    u.Hz * fftfreq(ntint, dtsample.value)[:, np.newaxis])

        # set frequency relative to which dispersion is coherently corrected
        if dedisperse == 'coherent':
            _fref = fref
        else:
            _fref = freq_in[np.newaxis, :]
        # (check via eq. 5.21 and following in
        # Lorimer & Kramer, Handbook of Pulsar Astronomy
        dang = (dispersion_delay_constant * dm * fcoh *
                (1. / _fref - 1. / fcoh)**2) * u.cycle

        with u.set_enabled_equivalencies(u.dimensionless_angles()):
            dd_coh = np.exp(dang * 1j).conj().astype(np.complex64)

        # add dimension for polarisation
        dd_coh = dd_coh[..., np.newaxis]

    # Calculate the part of the whole file this node should handle.
    size_per_node = (nt - 1) // mpi_size + 1
    start_block = mpi_rank * size_per_node
    end_block = min((mpi_rank + 1) * size_per_node, nt)
    for j in range(start_block, end_block):
        if verbose and j % progress_interval == 0:
            print('#{:4d}/{:4d} is doing {:6d}/{:6d} [={:6d}/{:6d}]; '
                  'time={:18.12f}'.format(
                      mpi_rank, mpi_size, j + 1, nt, j - start_block + 1,
                      end_block - start_block,
                      (tstart +
                       dtsample * j * ntint).value))  # time since start

        # Just in case numbers were set wrong -- break if file ends;
        # better keep at least the work done.
        try:
            raw = fh.seek_record_read(int((nskip + j) * fh.blocksize),
                                      fh.blocksize)
        except (EOFError, IOError) as exc:
            print("Hit {0!r}; writing data collected.".format(exc))
            break
        if verbose >= 2:
            print("#{:4d}/{:4d} read {} items".format(mpi_rank, mpi_size,
                                                      raw.size),
                  end="")

        if npol == 2:  # multiple polarisations
            raw = raw.view(raw.dtype.fields.values()[0][0])

        if fh.nchan == 1:  # raw.shape=(ntint*npol)
            raw = raw.reshape(-1, npol)
        else:  # raw.shape=(ntint, nchan*npol)
            raw = raw.reshape(-1, fh.nchan, npol)

        if rfi_filter_raw is not None:
            raw, ok = rfi_filter_raw(raw)
            if verbose >= 2:
                print("... raw RFI (zap {0}/{1})".format(
                    np.count_nonzero(~ok), ok.size),
                      end="")

        if np.can_cast(raw.dtype, np.float32):
            vals = raw.astype(np.float32)
        else:
            assert raw.dtype.kind == 'c'
            vals = raw

        if fh.nchan == 1:
            # have real-valued time stream of complex baseband
            # if we need some coherentdedispersion, do FT of whole thing,
            # otherwise to output channels
            if raw.dtype.kind == 'c':
                ftchan = nchan if dedisperse == 'incoherent' else len(vals)
                vals = fft(vals.reshape(-1, ftchan, npol),
                           axis=1,
                           overwrite_x=True,
                           **_fftargs)
            else:  # real data
                ftchan = nchan if dedisperse == 'incoherent' else len(
                    vals) // 2
                vals = rfft(vals.reshape(-1, ftchan * 2, npol),
                            axis=1,
                            overwrite_x=True,
                            **_fftargs)
                # rfft: Re[0], Re[1], Im[1], ..., Re[n/2-1], Im[n/2-1], Re[n/2]
                # re-order to normal fft format (like Numerical Recipes):
                # Re[0], Re[n], Re[1], Im[1], .... (channel 0 is junk anyway)
                vals = np.hstack(
                    (vals[:, 0], vals[:, -1], vals[:,
                                                   1:-1])).view(np.complex64)
            # for incoherent, vals.shape=(ntint, nchan, npol) -> OK
            # for others, have           (1, ntint*nchan, npol)
            # reshape(nchan, ntint) gives rough as slowly varying -> .T
            if dedisperse != 'incoherent':
                fine = vals.reshape(nchan, -1, npol).transpose(1, 0, 2)
                # now have fine.shape=(ntint, nchan, npol)

        else:  # data already channelized
            if dedisperse == 'by-channel':
                fine = fft(vals, axis=0, overwrite_x=True, **_fftargs)
                # have fine.shape=(ntint, fh.nchan, npol)

        if dedisperse in ['coherent', 'by-channel']:
            fine *= dd_coh
            # rechannelize to output channels
            if oversample > 1 and dedisperse == 'by-channel':
                # fine.shape=(ntint*oversample, chan_in, npol)
                #           =(coarse,fine,fh.chan, npol)
                #  -> reshape(oversample, ntint, fh.nchan, npol)
                # want (ntint=fine, fh.nchan, oversample, npol) -> .transpose
                fine = (fine.reshape(oversample, -1, fh.nchan, npol).transpose(
                    1, 2, 0, 3).reshape(-1, nchan, npol))
            # now, for both,     fine.shape=(ntint, nchan, npol)
            vals = ifft(fine, axis=0, overwrite_x=True, **_fftargs)
            # vals[time, chan, pol]
            if verbose >= 2:
                print("... dedispersed", end="")

        if npol == 1:
            power = vals.real**2 + vals.imag**2
        else:
            p0 = vals[..., 0]
            p1 = vals[..., 1]
            power = np.empty(vals.shape[:-1] + (4, ), np.float32)
            power[..., 0] = p0.real**2 + p0.imag**2
            power[..., 1] = p0.real * p1.real + p0.imag * p1.imag
            power[..., 2] = p0.imag * p1.real - p0.real * p1.imag
            power[..., 3] = p1.real**2 + p1.imag**2

        if verbose >= 2:
            print("... power", end="")

        if rfi_filter_power is not None:
            power = rfi_filter_power(power)
            print("... power RFI", end="")

        # current sample positions in stream
        isr = j * (ntint // oversample) + np.arange(ntint // oversample)

        if do_waterfall:
            # loop over corresponding positions in waterfall
            for iw in xrange(isr[0] // ntw, isr[-1] // ntw + 1):
                if iw < nwsize:  # add sum of corresponding samples
                    waterfall[iw, :] += np.sum(power[isr // ntw == iw],
                                               axis=0)[ifreq]
            if verbose >= 2:
                print("... waterfall", end="")

        if do_foldspec:
            ibin = (j * ntbin) // nt  # bin in the time series: 0..ntbin-1

            # times since start
            tsample = (tstart + isr * dtsample * oversample)[:, np.newaxis]
            # correct for delay if needed
            if dedisperse in ['incoherent', 'by-channel']:
                # tsample.shape=(ntint/oversample, nchan_in)
                tsample = tsample - dt

            phase = (phasepol(tsample.to(u.s).value.ravel()).reshape(
                tsample.shape))
            # corresponding PSR phases
            iphase = np.remainder(phase * ngate, ngate).astype(np.int)

            for k, kfreq in enumerate(ifreq):  # sort in frequency while at it
                iph = iphase[:, (0 if iphase.shape[1] == 1 else kfreq //
                                 oversample)]
                # sum and count samples by phase bin
                for ipow in xrange(npol**2):
                    foldspec[ibin, k, :,
                             ipow] += np.bincount(iph, power[:, kfreq, ipow],
                                                  ngate)
                icount[ibin,
                       k, :] += np.bincount(iph, power[:, kfreq, 0] != 0.,
                                            ngate)

            if verbose >= 2:
                print("... folded", end="")

        if verbose >= 2:
            print("... done")

    #Commented out as workaround, this was causing "Referenced before assignment" errors with JB data
    #if verbose >= 2 or verbose and mpi_rank == 0:
    #    print('#{:4d}/{:4d} read {:6d} out of {:6d}'
    #          .format(mpi_rank, mpi_size, j+1, nt))

    if npol == 1:
        if do_foldspec:
            foldspec = foldspec.reshape(foldspec.shape[:-1])
        if do_waterfall:
            waterfall = waterfall.reshape(waterfall.shape[:-1])

    return foldspec, icount, waterfall
Example #25
0
 def ifftx(ar):
     return ifft(ar, axis=0)
Example #26
0
 def ifftx(ar):
     return ifft(ar,axis=0)
Example #27
0
    def _applyEulerianVideoMagnification(self, image):
        
        timestamp = timeit.default_timer()

        if self._useGrayOverlay:
            smallImage = cv2.cvtColor(
                    image, cv2.COLOR_BGR2GRAY).astype(
                            numpy.float32)
        else:
            smallImage = image.astype(numpy.float32)

        # Downsample the image using a pyramid technique.
        i = 0
        while i < self._numPyramidLevels:
            smallImage = cv2.pyrDown(smallImage)
            i += 1
        if self._useLaplacianPyramid:
            smallImage[:] -= \
                cv2.pyrUp(cv2.pyrDown(smallImage))

        historyLength = len(self._historyTimestamps)
        
        if historyLength < self._maxHistoryLength - 1:
            
            # Append the new image and timestamp to the
            # history.
            self._history[historyLength] = smallImage
            self._historyTimestamps.append(timestamp)
            
            # The history is still not full, so wait.
            return
        
        if historyLength == self._maxHistoryLength - 1:
            # Append the new image and timestamp to the
            # history.
            self._history[historyLength] = smallImage
            self._historyTimestamps.append(timestamp)
        else:
            # Drop the oldest image and timestamp from the
            # history and append the new ones.
            self._history[:-1] = self._history[1:]
            self._historyTimestamps.popleft()
            self._history[-1] = smallImage
            self._historyTimestamps.append(timestamp)
        
        # The history is full, so process it.
        
        # Find the average length of time per frame.
        startTime = self._historyTimestamps[0]
        endTime = self._historyTimestamps[-1]
        timeElapsed = endTime - startTime
        timePerFrame = \
                timeElapsed / self._maxHistoryLength
        #print 'FPS:', 1.0 / timePerFrame
        
        # Apply the temporal bandpass filter.
        fftResult = fft(self._history, axis=0,
                        threads=self._numFFTThreads)
        frequencies = fftfreq(
                self._maxHistoryLength, d=timePerFrame)
        lowBound = (numpy.abs(
                frequencies - self._minHz)).argmin()
        highBound = (numpy.abs(
                frequencies - self._maxHz)).argmin()
        fftResult[:lowBound] = 0j
        fftResult[highBound:-highBound] = 0j
        fftResult[-lowBound:] = 0j
        ifftResult = ifft(fftResult, axis=0,
                          threads=self._numIFFTThreads)

        # Amplify the result and overlay it on the
        # original image.
        overlay = numpy.real(ifftResult[-1]) * \
                          self._amplification
        i = 0
        while i < self._numPyramidLevels:
            overlay = cv2.pyrUp(overlay)
            i += 1
        if self._useGrayOverlay:
            overlay = cv2.cvtColor(overlay,
                                   cv2.COLOR_GRAY2BGR)
        cv2.convertScaleAbs(image + overlay, image)
Example #28
0
 def iffty(ar):
     return ifft(ar, axis=1)