예제 #1
0
    def read(self, samples):

        raw = self.fh.read(samples)
        raw = np.swapaxes(raw, 1, 2)

        nyq_pad = np.zeros((raw.shape[0], 1, self.npol), dtype=raw.dtype)
        raw = np.concatenate((raw, nyq_pad), axis=1)
 
        # Get pseudo-timestream
        pd = irfft(raw, axis=1)
        # Set up for deconvolution
        fpd = rfft(pd, axis=0)
        del pd

        lh = np.zeros((raw.shape[0], self.h.shape[1]))
        lh[:self.h.shape[0]] = self.h
        fh = rfft(lh, axis=0).conj()
        del lh
        
        # FT of Wiener deconvolution kernel
        fg = fh.conj() / (np.abs(fh)**2 + (1/self.sn)**2)
        # Deconvolve and get deconvolved timestream
        rd = irfft(fpd * fg[..., np.newaxis],
                          axis=0).reshape(-1, self.npol)

        # view as a record array
        return rd.astype('f4')
예제 #2
0
    def read(self, samples):

        raw = self.fh.read(samples)
        raw = np.swapaxes(raw, 1, 2)

        nyq_pad = np.zeros((raw.shape[0], 1, self.npol), dtype=raw.dtype)
        raw = np.concatenate((raw, nyq_pad), axis=1)

        # Get pseudo-timestream
        pd = irfft(raw, axis=1)
        # Set up for deconvolution
        fpd = rfft(pd, axis=0)
        del pd

        lh = np.zeros((raw.shape[0], self.h.shape[1]))
        lh[:self.h.shape[0]] = self.h
        fh = rfft(lh, axis=0).conj()
        del lh

        # FT of Wiener deconvolution kernel
        fg = fh.conj() / (np.abs(fh)**2 + (1 / self.sn)**2)
        # Deconvolve and get deconvolved timestream
        rd = irfft(fpd * fg[..., np.newaxis], axis=0).reshape(-1, self.npol)

        # view as a record array
        return rd.astype('f4')
예제 #3
0
    def batchreconstructcompact(self, img):
        nim = img.shape[0]
        r = np.mod(nim, 6)
        if r > 0:  # pad with empty frames so total number of frames is divisible by 6
            img = np.concatenate((img, np.zeros((6 - r, self.N, self.N), np.single)))
            nim = nim + 6 - r
        nim3 = nim // 3
        imf = fft.rfft2(img) * self._prefilter[:, 0:self.N // 2 + 1]
        img2 = np.zeros([nim, 2 * self.N, 2 * self.N], dtype=np.single)
        for i in range(0, nim, 3):
            self._carray1[:, 0:self.N // 2, 0:self.N // 2 + 1] = imf[i:i + 3, 0:self.N // 2, 0:self.N // 2 + 1]
            self._carray1[:, 3 * self.N // 2:2 * self.N, 0:self.N // 2 + 1] = imf[i:i + 3, self.N // 2:self.N,
                                                                              0:self.N // 2 + 1]
            img2[i:i + 3, :, :] = fft.irfft2(self._carray1) * self._reconfactor
        res = np.zeros((nim3, 2 * self.N, 2 * self.N), dtype=np.single)

        imgf = fft.rfft(img2[:, :self.N, :self.N], nim, 0)[:nim3 // 2 + 1, :, :]
        res[:, :self.N, :self.N] = fft.irfft(imgf, nim3, 0)
        imgf = fft.rfft(img2[:, :self.N, self.N:2 * self.N], nim, 0)[:nim3 // 2 + 1, :, :]
        res[:, :self.N, self.N:2 * self.N] = fft.irfft(imgf, nim3, 0)
        imgf = fft.rfft(img2[:, self.N:2 * self.N, :self.N], nim, 0)[:nim3 // 2 + 1, :, :]
        res[:, self.N:2 * self.N, :self.N] = fft.irfft(imgf, nim3, 0)
        imgf = fft.rfft(img2[:, self.N:2 * self.N, self.N:2 * self.N], nim, 0)[:nim3 // 2 + 1, :, :]
        res[:, self.N:2 * self.N, self.N:2 * self.N] = fft.irfft(imgf, nim3, 0)

        res = fft.irfft2(fft.rfft2(res) * self._postfilter[:, :self.N + 1])
        return res
예제 #4
0
    def seek_record_read(self, offset, size):
        if offset % self.recordsize != 0 or size % self.recordsize != 0:
            raise ValueError("size and offset must be an integer number of records")

        raw = self.fh_raw.seek_record_read(offset, size)

        if self.npol == 2 and self._raw_data_class == AROCHIMERawData:
            raw = raw.view(raw.dtype.fields.values()[0][0])

        raw = raw.reshape(-1, self.fh_raw.nchan, self.npol)
        nyq_pad = np.zeros((raw.shape[0], 1, self.npol), dtype=raw.dtype)
        raw = np.concatenate((raw, nyq_pad), axis=1)
        # Get pseudo-timestream
        pd = irfft(raw, axis=1, **_rfftargs)
        # Set up for deconvolution
        fpd = rfft(pd, axis=0, **_rfftargs)
        del pd
        if self.fh is None or self.fh.shape[0] != fpd.shape[0]:
            lh = np.zeros((raw.shape[0], self.h.shape[1]))
            lh[:self.h.shape[0]] = self.h
            self.fh = rfft(lh, axis=0, **_rfftargs).conj()
            del lh
        # FT of Wiener deconvolution kernel
        fg = self.fh.conj() / (np.abs(self.fh)**2 + (1/self.sn)**2)
        # Deconvolve and get deconvolved timestream
        rd = irfft(fpd * fg[..., np.newaxis],
                          axis=0, **_rfftargs).reshape(-1, self.npol)
        # select actual part requested
        self.offset = offset + size
        # view as a record array
        return rd.astype('f4')
예제 #5
0
    def seek_record_read(self, offset, size):
        if offset % self.recordsize != 0 or size % self.recordsize != 0:
            raise ValueError(
                "size and offset must be an integer number of records")

        raw = self.fh_raw.seek_record_read(offset, size)

        if self.npol == 2 and self._raw_data_class == AROCHIMERawData:
            raw = raw.view(raw.dtype.fields.values()[0][0])

        raw = raw.reshape(-1, self.fh_raw.nchan, self.npol)
        nyq_pad = np.zeros((raw.shape[0], 1, self.npol), dtype=raw.dtype)
        raw = np.concatenate((raw, nyq_pad), axis=1)
        # Get pseudo-timestream
        pd = irfft(raw, axis=1, **_rfftargs)
        # Set up for deconvolution
        fpd = rfft(pd, axis=0, **_rfftargs)
        del pd
        if self.fh is None or self.fh.shape[0] != fpd.shape[0]:
            lh = np.zeros((raw.shape[0], self.h.shape[1]))
            lh[:self.h.shape[0]] = self.h
            self.fh = rfft(lh, axis=0, **_rfftargs).conj()
            del lh
        # FT of Wiener deconvolution kernel
        fg = self.fh.conj() / (np.abs(self.fh)**2 + (1 / self.sn)**2)
        # Deconvolve and get deconvolved timestream
        rd = irfft(fpd * fg[..., np.newaxis], axis=0,
                   **_rfftargs).reshape(-1, self.npol)
        # select actual part requested
        self.offset = offset + size
        # view as a record array
        return rd.astype('f4')
예제 #6
0
def phaseFitCorrelate(waveA, waveB, noPhase=False):
    #tries to determine the correlation (and returns two "correlated" graphs) by convolving and fitting the phase

    #first get the ffts of both waveforms
    waveAX, waveAY = waveA
    fA, fftA = genFFT(waveAX, waveAY)
    waveBX, waveBY = waveB
    fB, fftB = genFFT(waveBX, waveBY)

    #then find the convolution
    conv = np.conj(fftA) * fftB

    #find the unwrapped phase of the convolution
    convM, convP = complexToGainAndPhase(conv)
    convP = np.unwrap(convP)

    #then fit the phase of that, weighted by magnitude of the system
    phaseFit = mf.weightedLeastSqrs(fB, convP, convM)
    slope = phaseFit[0]
    yint = phaseFit[1]
    if noPhase:
        yint = 0

    #now find the unwrapped phase of the waveform you want to shift
    magB, phaseB = complexToGainAndPhase(fftB)
    phaseB = np.unwrap(phaseB)

    #shift it by the fit
    phaseB -= mf.lambdaLin([slope, 0, yint], fB)

    #then transform it back into the time domain
    fftB = gainAndPhaseToComplex(magB, phaseB)
    waveBY = fftw.irfft(fftB)

    return waveBY, [slope, yint]
예제 #7
0
def compPhaseShifts4(y):

    try:
        sns.set_palette(sns.color_palette("husl", 20))
    except:
        pass

    fig, ax = lab.subplots()

    fft = fftw.rfft(y)
    shifts = np.arange(-np.pi, np.pi, 0.01)

    for tZero in range(310, 330):
        print tZero
        causalityRatio = []

        for i in range(0, len(shifts)):
            shiftedFFT = fftPhaseShift(fft, shifts[i])
            shifted = fftw.irfft(shiftedFFT)
            causalityRatio.append(
                np.sum(shifted[:tZero]**2) / np.sum(shifted[tZero:]**2))

        ax.plot(shifts, causalityRatio, label=tZero)

    ax.legend()
    fig.show()

    return
예제 #8
0
def pink(N, state=None):
    """
    Pink noise.

    :param N: Amount of samples.
    :param state: State of PRNG.
    :type state: :class:`np.random.RandomState`

    Pink noise has equal power in bands that are proportionally wide.
    Power density decreases with 3 dB per octave.

    """
    # This method uses the filter with the following coefficients.
    #b = np.array([0.049922035, -0.095993537, 0.050612699, -0.004408786])
    #a = np.array([1, -2.494956002, 2.017265875, -0.522189400])
    #return lfilter(B, A, np.random.randn(N))
    # Another way would be using the FFT
    #x = np.random.randn(N)
    #X = rfft(x) / N
    state = np.random.RandomState() if state is None else state
    uneven = N % 2
    X = state.randn(N // 2 + 1 +
                    uneven) + 1j * state.randn(N // 2 + 1 + uneven)
    S = np.sqrt(np.arange(len(X)) + 1.)  # +1 to avoid divide by zero
    y = (irfft(X / S)).real
    if uneven:
        y = y[:-1]
    return normalize(y)
예제 #9
0
def BasebandProcess(ar_data, band, SR, dt, N, DN, offset, i, dd_coh, ngate):
    print('ar_data', ar_data)
    fh = mark4.open(ar_data,
                    'rs',
                    ntrack=64,
                    decade=2010,
                    sample_rate=SR,
                    thread_ids=[2 * band, 2 * band + 1])
    fh.seek(offset + i * (N - DN))
    t0 = fh.tell(unit='time')
    print('t0', t0)
    t1 = t0.mjd
    print('t1', t1)
    print('N-DN', N - DN)
    print('dt', dt)
    print('ngate', ngate)
    # note ph is equilibrium to ((ph)*P0) / (P0/ngate) % ngate
    ph, ncycle = FindPhase(t0, N - DN, dt, ngate)
    ph %= ngate
    print('ph3', ph)
    z = fh.read(N)
    z = rfft(z, axis=0, **_fftargs)
    z *= dd_coh[..., np.newaxis]
    z = irfft(z, axis=0, **_fftargs)[:-DN]
    z = z.astype(np.float32)
    z = z * z
    return ph, z
예제 #10
0
def pink(N, state=None):
    """
    Pink noise. 
    
    :param N: Amount of samples.
    :param state: State of PRNG.
    :type state: :class:`np.random.RandomState`
    
    Pink noise has equal power in bands that are proportionally wide.
    Power density decreases with 3 dB per octave.
    
    """
    # This method uses the filter with the following coefficients.
    #b = np.array([0.049922035, -0.095993537, 0.050612699, -0.004408786])
    #a = np.array([1, -2.494956002, 2.017265875, -0.522189400])
    #return lfilter(B, A, np.random.randn(N))
    # Another way would be using the FFT
    #x = np.random.randn(N)
    #X = rfft(x) / N  
    state = np.random.RandomState() if state is None else state
    uneven = N%2
    X = state.randn(N//2+1+uneven) + 1j * state.randn(N//2+1+uneven)
    S = np.sqrt(np.arange(len(X))+1.) # +1 to avoid divide by zero
    y = (irfft(X/S)).real
    if uneven:
        y = y[:-1]
    return normalize(y)
예제 #11
0
def create_brown_noise_regular_ts(n_t, par=1.0):
    """Create random time series with brown noise.

    Parameters
    ----------
    n_t: int
        the number of time steps of the timeseries.
    par: float
        the parameter which determines the scale of the variations.

    Returns
    -------
    times: np.ndarray
        the times sampled.
    values: np.ndarray
        the values of the signal in each times sampled.

    """
    uneven = n_t % 2
    X = np.random.randn(n_t//2+1+uneven) + 1j*np.random.randn(n_t//2+1+uneven)
    S = np.arange(len(X))+1
    y = (irfft(X/S)).real
    if uneven:
        y = y[:-1]
    values = y*np.sqrt(par/(np.abs(y)**2.0).mean())
    times = np.arange(n_t)
    assert(len(values) == len(times))
    return times, values
    def propagate_to_base(self, layer_props):
        """
        Given a stack of layers in layer_props, propagate the surface seismograms down to the base
        of the bottom layer.

        :param layer_props: List of LayerProps through which to propagate the surface seismograms.
        :type layer_props: list(seismic.model_properties.LayerProps)
        :return: (Vr, Vz) velocity components time series per event at the bottom of the stack of layers.
        :rtype: numpy.array
        """
        # Propagate from surface to the bottom of the layers provided
        fv_base = WfContinuationSuFluxComputer._propagate_layers(self._fv0, self._w, layer_props, self._p)

        num_pos_freq_terms = (fv_base.shape[2] + 1) // 2
        # Velocities and stresses at bottom of stack of layers
        v_base = irfft(fv_base[:, :, :num_pos_freq_terms], self._npts, axis=2)

        # Recover source data amplitudes (undo normalization)
        v_base = np.moveaxis(v_base, 0, -1)
        v_base = v_base * self._max_vz
        v_base = np.moveaxis(v_base, -1, 0)

        # Return just the velocity components (Vr, Vz) and throw away the stresses
        vel_rz_base = v_base[:, :2, :].real
        # Negate the z-component to restore polarity swapped during original data ingestion.
        vel_rz_base[:, 1, :] = -vel_rz_base[:, 1, :]
        assert np.allclose(v_base[:, :2, :].imag, 0.0)

        return vel_rz_base
예제 #13
0
파일: noise.py 프로젝트: SachsLab/mspacman
def pink(*N, mean=0, std=1):
    """
    =================================
    Pink Noise
    -------------------
    Pink noise has equal power in bands that are proportionally wide.
    Power density decreases with 3 dB per octave.
    =================================
    """
    if len(N) < 2:
        N = tuple((1, N[0]))

    n1, n2 = N
    uneven = n2 % 2

    X = np.random.randn(n1, n2 // 2 + 1 + uneven) + 1j * np.random.randn(
        n1, n2 // 2 + 1 + uneven)

    S = np.sqrt(np.arange(X.shape[-1]) + 1)  # +1 to avoid divide by zero

    y = (fft.irfft(X / S)).real
    if uneven:
        y = y[:, :-1]

    # Normalize the results to the white noise
    y = normalize(y, white(*N, mean=mean, std=std))
    if y.ndim < 2:
        return y.flatten()
    else:
        return y
 def generate(self):
     X = np.random.randn(
         self.N // 2 + 1 +
         self.uneven) + 1j * np.random.randn(self.N // 2 + 1 + self.uneven)
     y = (irfft(X / self.S)).real
     if self.uneven:
         y = y[:-1]
     return normalize(y)
예제 #15
0
def timePhaseShift(Y, shift):
    fft = fftw.rfft(Y)
    gainLin, phase = complexToGainAndPhase(fft)
    phase += shift
    #    phase[phase>=np.pi] -= np.pi
    #    phase[phase<=-np.pi] += np.pi
    fft = gainAndPhaseToComplex(gainLin, phase)
    outY = fftw.irfft(fft)
    return outY
예제 #16
0
def _shift(trace, shift):
    """Shift trace by given time and correct starttime => interpolation"""
    msg = ('interpolate trace %s with starttime %s to shift by %.6fs '
           '(Fourier method)')
    log.debug(msg, trace.id, trace.stats.starttime, shift)
    nfft = next_fast_len(len(trace))
    spec = rfft(trace.data, nfft)
    freq = rfftfreq(nfft, trace.stats.delta)
    spec *= np.exp(-2j * np.pi * freq * shift)
    trace.data = irfft(spec, nfft)[:len(trace)]
    trace.stats.starttime -= shift
    return trace
예제 #17
0
    def readPFB(self, samples):

        import timeit
        start_time = timeit.default_timer()

        raw = self.read(samples)
        raw = np.swapaxes(raw, 1, 2)

        nyq_pad = np.zeros((raw.shape[0], 1, self.npol), dtype=raw.dtype)
        raw = np.concatenate((raw, nyq_pad), axis=1)

        # Get pseudo-timestream
        pd = irfft(raw, axis=1)
        # Set up for deconvolution

        fpd = rfft(pd, axis=0)
        del pd

        #if not 'han' in self.__dict__ :
        lh = np.zeros((raw.shape[0], self.h.shape[1]))
        lh[:self.h.shape[0]] = self.h

        self.han = rfft(lh, axis=0).conj()
        del lh

        # FT of Wiener deconvolution kernel
        fg = self.han.conj() / (np.abs(self.han)**2 + (1 / self.sn)**2)
        print(fpd.shape)
        # Deconvolve and get deconvolved timestream
        rd = irfft(fpd * fg[..., np.newaxis], axis=0)
        print(rd.shape)

        rd = rd.reshape(-1, self.npol)

        print(timeit.default_timer() - start_time)

        # view as a record array
        return rd.astype('f4')
예제 #18
0
def makeCausalTime(y, tZero):
    """
    If you have the time series, this makes it easier (but slightly slower!
    Two more FFTS!)
    
    you have to provide the pulse (in bins)

    """

    fft = fftw.rfft(y)
    shifted = makeCausalFFT(fft, tZero)
    yOut = fftw.irfft(shifted)

    return yOut
예제 #19
0
def brown(N):
    """
    Violet noise.
    
    :param N: Amount of samples.
    
    Power decreases with -3 dB per octave.
    Power density decreases with 6 dB per octave. 

    """
    x = np.random.randn(N)
    X = rfft(x) / N
    S = (np.arange(len(X)) + 1)  # Filter
    y = (irfft(X / S)).real[0:N]
    return normalise(y)
예제 #20
0
def violet(N):
    """
    Violet noise. Power increases with 6 dB per octave. 
    
    :param N: Amount of samples.
    
    Power increases with +9 dB per octave.
    Power density increases with +6 dB per octave. 
    
    """
    x = np.random.randn(N)
    X = rfft(x) / N
    S = (np.arange(len(X)))# Filter
    y = (irfft(X*S)).real[0:N]
    return normalise(y)
예제 #21
0
def blue(N):
    """
    Blue noise. 
    
    :param N: Amount of samples.
    
    Power increases with 6 dB per octave.
    Power density increases with 3 dB per octave. 
    
    """
    x = np.random.randn(N)
    X = rfft(x) / N
    S = np.sqrt(np.arange(len(X)))# Filter
    y = (irfft(X*S)).real[0:N]
    return normalise(y)
예제 #22
0
def brown(N):
    """
    Violet noise.
    
    :param N: Amount of samples.
    
    Power decreases with -3 dB per octave.
    Power density decreases with 6 dB per octave. 

    """
    x = np.random.randn(N)
    X = rfft(x) / N
    S = (np.arange(len(X))+1)# Filter
    y = (irfft(X/S)).real[0:N]
    return normalise(y)
예제 #23
0
def violet(N):
    """
    Violet noise. Power increases with 6 dB per octave. 
    
    :param N: Amount of samples.
    
    Power increases with +9 dB per octave.
    Power density increases with +6 dB per octave. 
    
    """
    x = np.random.randn(N)
    X = rfft(x) / N
    S = (np.arange(len(X)))  # Filter
    y = (irfft(X * S)).real[0:N]
    return normalise(y)
예제 #24
0
def blue(N):
    """
    Blue noise. 
    
    :param N: Amount of samples.
    
    Power increases with 6 dB per octave.
    Power density increases with 3 dB per octave. 
    
    """
    x = np.random.randn(N)
    X = rfft(x) / N
    S = np.sqrt(np.arange(len(X)))  # Filter
    y = (irfft(X * S)).real[0:N]
    return normalise(y)
예제 #25
0
def violet(N):
    """
    Violet noise. Power increases with 6 dB per octave. 
    
    :param N: Amount of samples.
    
    Power increases with +9 dB per octave.
    Power density increases with +6 dB per octave. 
    
    """
    uneven = N%2
    X = np.random.randn(N//2+1+uneven) + 1j * np.random.randn(N//2+1+uneven)
    S = (np.arange(len(X)))# Filter
    y = (irfft(X*S)).real
    if uneven:
        y = y[:-1]
    return normalize(y)
예제 #26
0
def violet(N: int) -> np.ndarray:
    """
    Violet noise. Power increases with 6 dB per octave.

    * N: Amount of samples.

    Power increases with +9 dB per octave.
    Power density increases with +6 dB per octave.

    https://github.com/python-acoustics
    """
    x = white(N)
    X = rfft(x) / N
    S = np.arange(X.size)  # Filter
    y = irfft(X * S).real[0:N]

    return normalise(y)
예제 #27
0
def brown(N):
    """
    Violet noise.

    :param N: Amount of samples.

    Power decreases with -3 dB per octave.
    Power density decreases with 6 dB per octave.
    """
    uneven = N % 2
    X = np.random.randn(N // 2 + 1 +
                        uneven) + 1j * np.random.randn(N // 2 + 1 + uneven)
    S = (np.arange(len(X)))  # Filter
    y = (irfft(X * S)).real
    if uneven:
        y = y[:-1]
    return normalize(y)
예제 #28
0
파일: stft.py 프로젝트: SachsLab/pytf
def istft(X, nsamp=None, binsize=1024, overlap_factor=.5, hopsize=None):
    """ Inverse STFT.

    Parameters:
    -----------
    X: ndarray, (n_ch, n_win, binsize // 2) or (n_ch, n_win, n_fr, binsize // 2)

    binsize: int
        Window size for processing FFT on.

    overlap_factor: float
        The percentage of overlapping between consecutive windows.

    hopsize: int
        The sample size required to jump to the next row.

    Return:
    -------
    x: ndarray, (n_ch, n_fr, n_samp)
    """
    # Sanity check
    if X.ndim not in [3, 4]:
        raise ValueError(
            "The dimension of 'X' is not valid as an output from stft! Double check 'X'."
        )

    if X.ndim == 3:  # add a n_fr dimension
        X = X[:, :, np.newaxis, :]

    if (X.shape[-1] - 1) * 2 != binsize:
        raise ValueError("The 'binsize' must match the length of X.shape[-1].")

    hopsize = binsize * (1 - overlap_factor) if hopsize is None else hopsize

    # Process
    x_ = fft.irfft(X, n=binsize, axis=-1, planner_effort='FFTW_ESTIMATE')

    # Reconstructing the signal using overlap-add
    x = overlap_add(x_, binsize=binsize, overlap_factor=overlap_factor)

    # Clean up the signal
    if nsamp is not None:
        x = x[:, :, binsize // 2:nsamp + binsize // 2]

    return x
예제 #29
0
def violet(N, state=None):
    """
    Violet noise. Power increases with 6 dB per octave.
    :param N: Amount of samples.
    :param state: State of PRNG.
    :type state: :class:`np.random.RandomState`
    Power increases with +9 dB per octave.
    Power density increases with +6 dB per octave.
    """
    state = np.random.RandomState() if state is None else state
    uneven = N % 2
    X = state.randn(N // 2 + 1 +
                    uneven) + 1j * state.randn(N // 2 + 1 + uneven)
    S = (np.arange(len(X)))  # Filter
    y = (irfft(X * S)).real
    if uneven:
        y = y[:-1]
    return normalize(y, 1)
예제 #30
0
def blue(N):
    """
    Blue noise. 
    
    :param N: Amount of samples.
    
    Power increases with 6 dB per octave.
    Power density increases with 3 dB per octave. 
    
    """
    uneven = N % 2
    X = np.random.randn(N // 2 + 1 +
                        uneven) + 1j * np.random.randn(N // 2 + 1 + uneven)
    S = np.sqrt(np.arange(len(X)))  # Filter
    y = (irfft(X * S)).real
    if uneven:
        y = y[:-1]
    return normalize(y)
예제 #31
0
def makeCausalFFT(fft, tZero):
    """
    http://cdn.teledynelecroy.com/files/whitepapers/14-wp6paper_doshi.pdf
    
    Basically when taking the S21 of a cable or signal (which we are doing,
    just a forward gain complex phasor,), Fractional sample offsets distort the
    signal from causal to an acausal sinc function in the time domain.  To
    alieviate this, it is allowed to shift the total phase of the S21 in order
    to achieve maximum causality, which in this case would be a high pre-T0 to
    post-T0 power ratio.

    I wrote a section about this in the impulse response white paper citing the
    above linked paper

    I think the max causality has to be in between -pi and pi... though
    probably smaller
    HOW IS THE CENTER DEFINED???? (17 is just for the cables which I found by
    looking, but it has to be calculatable somehow).  You have to provide this
    because I'm strapped for time

    Also at some point I should fit the "causalityRatio" thing and actually
    find the perfect point, but that is polishing the cannon for sure.

    Also note that this can 180 degree phase shift things (flip the polarity)
    so BE CAREFUL
    """

    shifts = np.arange(-np.pi, np.pi, 0.01)

    causalityRatio = []

    for i in range(0, len(shifts)):
        shiftedFFT = fftPhaseShift(fft, shifts[i])
        shifted = fftw.irfft(shiftedFFT)
        causalityRatio.append(
            np.sum(shifted[:tZero]**2) / np.sum(shifted[tZero:]**2))

    #minimum is when things are "most causal" (second half biggest than first half)
    maxCausal = np.argmin(causalityRatio)

    print "Maximum causal waveform found at: ", shifts[maxCausal]
    shiftedFFT = fftPhaseShift(fft, shifts[maxCausal])

    return shiftedFFT
    def __call__(self, mantle_props, layer_props, flux_window=(-10, 20)):
        """Compute upgoing S-wave energy at top of mantle for set of seismic time series in self._v0.

        :param mantle_props: LayerProps representing mantle properties.
        :type mantle_props: seismic.model_properties.LayerProps
        :param layer_props: List of LayerProps.
        :type layer_props: list(seismic.model_properties.LayerProps)
        :return: Mean SU energy, SU energy per seismogram, wavefield vector at top of mantle in (Pd, Pu, Sd, Su)
            order for each seismogram.
        :rtype: (float, numpy.array, numpy.array)
        """
        # This is the callable operator that performs computations of energy flux

        # Compute mode matrices for mantle
        M_m, Minv_m, _ = WfContinuationSuFluxComputer._mode_matrices(mantle_props.Vp, mantle_props.Vs, mantle_props.rho,
                                                                     self._p)

        # Propagate from surface
        fvm = WfContinuationSuFluxComputer._propagate_layers(self._fv0, self._w, layer_props, self._p)
        # Decompose velocity and stress components into Pd, Pu, Sd and Su components.
        fvm = np.matmul(Minv_m, fvm)

        num_pos_freq_terms = (fvm.shape[2] + 1) // 2
        # Velocities at top of mantle
        vm = irfft(fvm[:, :, :num_pos_freq_terms], self._npts, axis=2)

        # Compute coefficients of energy integral for upgoing S-wave
        qb_m = np.sqrt(1 / mantle_props.Vs ** 2 - self._p * self._p)
        Nsu = self._dt * mantle_props.rho * (mantle_props.Vs ** 2) * qb_m

        # Compute mask for the energy integral time window
        integral_mask = (self._time_axis >= flux_window[0]) & (self._time_axis <= flux_window[1])
        vm_windowed = vm[:, :, integral_mask]

        # Take the su component.
        su_windowed = vm_windowed[:, 3, :]

        # Integrate in time
        Esu_per_event = Nsu * np.sum(np.abs(su_windowed) ** 2, axis=1)

        # Compute mean over events
        Esu = np.mean(Esu_per_event)

        return Esu, Esu_per_event, vm
예제 #33
0
def plotMinimizedGroupDelay(f, fft):

    gain = np.absolute(fft)
    phase = calcPhase(fft)
    gdOrig = calcGroupDelay(fft)

    fftNew = minimizeGroupDelayFromFFT(f, fft)
    gainNew = np.absolute(fftNew)
    phaseNew = calcPhase(fftNew)
    gdNew = calcGroupDelay(fftNew)

    figs, axes = lab.subplots(3)

    axes[0].set_ylabel("cumulative phase (radians)")
    axes[0].set_xlabel("Frequency (GHz)")
    axes[0].plot(f, phase, '.', label="measured phase")
    axes[0].plot(f, phaseCorr, label="linear fit")
    axes[0].legend(loc="lower right")

    phaseNew = phase - phaseCorr
    gdNew = calcGroupDelayFromPhase(phaseNew)

    axes[1].set_xlabel("Frequency (GHz)")
    axes[1].set_ylabel("Group Dleay (ns)")
    axes[1].plot(f, gdNew, label="corrected group delay")
    #    axes[1].plot(f[1:],gdNew,label="corrected group delay")
    twinAx = axes[1].twinx()
    twinAx.set_ylabel("Gain (dB)")
    twinAx.plot(f, 10 * np.log10(np.absolute(fft)), label="gain", color="red")
    axes[1].legend(loc="lower left")
    twinAx.legend()

    fftNew = gainAndPhaseToComplex(gain, phaseNew)
    waveYNew = fftw.irfft(fftNew)

    axes[2].plot(waveX, waveY, label="orig")
    axes[2].plot(waveX, waveYNew, label="minimize gd")
    axes[2].legend()

    figs.show()

    return
예제 #34
0
def apply_noise(y, target_snr_db=20):
    ## Adapted from: https://stackoverflow.com/questions/14058340/adding-noise-to-a-signal-in-python
    ## Apply random white noise to signal to reach target_snr_db

    x_watts = y ** 2
    sig_avg_watts = np.mean(x_watts)
    sig_avg_db = 10 * np.log10(sig_avg_watts)
    # Calculate noise according to [2] then convert to watts
    noise_avg_db = sig_avg_db - float(target_snr_db)
    noise_avg_watts = 10 ** (noise_avg_db / 10)
    # Generate an sample of white noise
    mean_noise = 0
    noise_volts = np.random.normal(mean_noise, noise_avg_watts, len(x_watts))
    Xb = rfft(noise_volts) / len(noise_volts)
    Sb = np.arange(Xb.size)+1  # Filter
    yb = irfft(Xb/Sb).real[:len(noise_volts)]
    # Noise up the original signal
    noisy_signal = y + yb

    return np.clip(noisy_signal, -1, 1)
예제 #35
0
def violet(N, state=None):
    """
    Violet noise. Power increases with 6 dB per octave. 
    
    :param N: Amount of samples.
    :param state: State of PRNG.
    :type state: :class:`np.random.RandomState`
    
    Power increases with +9 dB per octave.
    Power density increases with +6 dB per octave. 
    
    """
    state = np.random.RandomState() if state is None else state
    uneven = N%2
    X = state.randn(N//2+1+uneven) + 1j * state.randn(N//2+1+uneven)
    S = (np.arange(len(X)))# Filter
    y = (irfft(X*S)).real
    if uneven:
        y = y[:-1]
    return normalize(y)
예제 #36
0
def correlation(graphA, graphB):
    """
    Takes in two EVENLY SAMPLED graphs, then does the cross correlation in
    fourier space

    NOTE: I need to roll this a bit! 
    Note to Note: Why?  to draw it away from zero?  Maybe no roll :(
    """

    #    fart,graphA = zeroPadEqual(np.arange(len(graphA)),graphA,len(graphA)*3)
    #    fart,graphB = zeroPadEqual(np.arange(len(graphB)),graphB,len(graphB)*3)

    fftA = fftw.rfft(np.array(graphA))
    fftB = fftw.rfft(np.array(graphB))

    xCorr = np.conj(fftA) * fftB

    iXCorr = fftw.irfft(xCorr)

    #    return np.roll(iXCorr,len(iXCorr)/2)
    return iXCorr
예제 #37
0
def pink(N):
    """
    Pink noise. 
    
    :param N: Amount of samples.
    
    Pink noise has equal power in bands that are proportionally wide.
    Power density decreases with 3 dB per octave.
    
    """
    # This method uses the filter with the following coefficients.
    #b = np.array([0.049922035, -0.095993537, 0.050612699, -0.004408786])
    #a = np.array([1, -2.494956002, 2.017265875, -0.522189400])
    #return lfilter(B, A, np.random.randn(N))
    
    # Another way would be using the FFT
    x = np.random.randn(N)
    X = rfft(x) / N
    S = np.sqrt(np.arange(len(X))+1.) # +1 to avoid divide by zero
    y = (irfft(X/S)).real[0:N]
    return normalise(y)
예제 #38
0
def pink(N):
    """
    Pink noise. 
    
    :param N: Amount of samples.
    
    Pink noise has equal power in bands that are proportionally wide.
    Power density decreases with 3 dB per octave.
    
    """
    # This method uses the filter with the following coefficients.
    #b = np.array([0.049922035, -0.095993537, 0.050612699, -0.004408786])
    #a = np.array([1, -2.494956002, 2.017265875, -0.522189400])
    #return lfilter(B, A, np.random.randn(N))

    # Another way would be using the FFT
    x = np.random.randn(N)
    X = rfft(x) / N
    S = np.sqrt(np.arange(len(X)) + 1.)  # +1 to avoid divide by zero
    y = (irfft(X / S)).real[0:N]
    return normalise(y)
예제 #39
0
    def batchreconstruct(self, img, n_output_frames=None):
        nim = img.shape[0]
        r = np.mod(nim, 14)
        if r > 0:  # pad with empty frames so total number of frames is divisible by 14
            img = np.concatenate((img, np.zeros((14 - r, self.N, self.N), np.single)))
            nim = nim + 14 - r
        imf = fft.rfft2(img) * self._prefilter[:, 0:self.N // 2 + 1]
        img2 = np.zeros([nim, 2 * self.N, 2 * self.N], dtype=np.single)
        for i in range(0, nim, 7):
            self._carray1[:, 0:self.N // 2, 0:self.N // 2 + 1] = imf[i:i + 7, 0:self.N // 2, 0:self.N // 2 + 1]
            self._carray1[:, 3 * self.N // 2:2 * self.N, 0:self.N // 2 + 1] = imf[i:i + 7, self.N // 2:self.N,
                                                                              0:self.N // 2 + 1]
            img2[i:i + 7, :, :] = fft.irfft2(self._carray1) * self._reconfactor

        nim7 = nim // 7
        if n_output_frames is None:
            n_output_frames = nim7

        img3 = fft.irfft(fft.rfft(img2, nim, 0)[0:nim7 // 2 + 1, :, :], n_output_frames, 0)
        res = fft.irfft2(fft.rfft2(img3) * self._postfilter[:, :self.N + 1])
        return res
예제 #40
0
def fold(fh, comm, samplerate, fedge, fedge_at_top, nchan,
         nt, ntint, ngate, ntbin, ntw, dm, fref, phasepol,
         dedisperse='incoherent',
         do_waterfall=True, do_foldspec=True, verbose=True,
         progress_interval=100, rfi_filter_raw=None, rfi_filter_power=None,
         return_fits=False):
    """
    FFT data, fold by phase/time and make a waterfall series

    Folding is done from the position the file is currently in

    Parameters
    ----------
    fh : file handle
        handle to file holding voltage timeseries
    comm: MPI communicator or None
        will use size, rank attributes
    samplerate : Quantity
        rate at which samples were originally taken and thus double the
        band width (frequency units)
    fedge : float
        edge of the frequency band (frequency units)
    fedge_at_top: bool
        whether edge is at top (True) or bottom (False)
    nchan : int
        number of frequency channels for FFT
    nt, ntint : int
        total number nt of sets, each containing ntint samples in each file
        hence, total # of samples is nt*ntint, with each sample containing
        a single polarisation
    ngate, ntbin : int
        number of phase and time bins to use for folded spectrum
        ntbin should be an integer fraction of nt
    ntw : int
        number of time samples to combine for waterfall (does not have to be
        integer fraction of nt)
    dm : float
        dispersion measure of pulsar, used to correct for ism delay
        (column number density)
    fref: float
        reference frequency for dispersion measure
    phasepol : callable
        function that returns the pulsar phase for time in seconds relative to
        start of the file that is read.
    dedisperse : None or string (default: incoherent).
        None, 'incoherent', 'coherent', 'by-channel'.
        Note: None really does nothing
    do_waterfall, do_foldspec : bool
        whether to construct waterfall, folded spectrum (default: True)
    verbose : bool or int
        whether to give some progress information (default: True)
    progress_interval : int
        Ping every progress_interval sets
    return_fits : bool (default: False)
        return a subint fits table for rank == 0 (None otherwise)

    """
    assert dedisperse in (None, 'incoherent', 'by-channel', 'coherent')
    need_fine_channels = dedisperse in ['by-channel', 'coherent']
    assert nchan % fh.nchan == 0
    if dedisperse in ['incoherent', 'by-channel'] and fh.nchan > 1:
        oversample = nchan // fh.nchan
        assert ntint % oversample == 0
    else:
        oversample = 1

    if dedisperse == 'coherent' and fh.nchan > 1:
        raise ValueError("Cannot coherently dedisperse channelized data.")

    if comm is None:
        mpi_rank = 0
        mpi_size = 1
    else:
        mpi_rank = comm.rank
        mpi_size = comm.size

    npol = getattr(fh, 'npol', 1)
    assert npol == 1 or npol == 2
    if verbose > 1 and mpi_rank == 0:
        print("Number of polarisations={}".format(npol))

    # initialize folded spectrum and waterfall
    # TODO: use estimated number of points to set dtype
    if do_foldspec:
        foldspec = np.zeros((ntbin, nchan, ngate, npol**2), dtype=np.float32)
        icount = np.zeros((ntbin, nchan, ngate), dtype=np.int32)
    else:
        foldspec = None
        icount = None

    if do_waterfall:
        nwsize = nt*ntint//ntw//oversample
        waterfall = np.zeros((nwsize, nchan, npol**2), dtype=np.float64)
    else:
        waterfall = None

    if verbose and mpi_rank == 0:
        print('Reading from {}'.format(fh))

    nskip = fh.tell()/fh.blocksize
    if nskip > 0:
        if verbose and mpi_rank == 0:
            print('Starting {0} blocks = {1} bytes out from start.'
                  .format(nskip, nskip*fh.blocksize))

    dt1 = (1./samplerate).to(u.s)
    # need 2*nchan real-valued samples for each FFT
    if fh.telescope == 'lofar':
        dtsample = fh.dtsample
    else:
        dtsample = nchan // oversample * 2 * dt1
    tstart = dtsample * ntint * nskip

    # pre-calculate time delay due to dispersion in coarse channels
    # for channelized data, frequencies are known

    tb = -1. if fedge_at_top else +1.
    if fh.nchan == 1:
        if getattr(fh, 'data_is_complex', False):
            # for complex data, really each complex sample consists of
            # 2 real ones, so multiply dt1 by 2.
            freq = fedge + tb * fftfreq(nchan, 2.*dt1)
            if dedisperse == 'coherent':
                fcoh = fedge + tb * fftfreq(nchan*ntint, 2.*dt1)
                fcoh.shape = (-1, 1)
            elif dedisperse == 'by-channel':
                fcoh = freq + tb * fftfreq(ntint, dtsample)[:, np.newaxis]
        else:  # real data
            freq = fedge + tb * rfftfreq(nchan*2, dt1)
            if dedisperse == 'coherent':
                fcoh = fedge + tb * rfftfreq(ntint*nchan*2, dt1)
                fcoh.shape = (-1, 1)
            elif dedisperse == 'by-channel':
                fcoh = freq + tb * fftfreq(ntint, dtsample)[:, np.newaxis]
        freq_in = freq

    else:
        # Input frequencies may not be the ones going out.
        freq_in = fh.frequencies
        if oversample == 1:
            freq = freq_in
        else:
            freq = freq_in[:, np.newaxis] + tb * fftfreq(oversample, dtsample)

        fcoh = freq_in + tb * fftfreq(ntint, dtsample)[:, np.newaxis]

    # print('fedge_at_top={0}, tb={1}'.format(fedge_at_top, tb))
    # By taking only up to nchan, we remove the top channel at the Nyquist
    # frequency for real, unchannelized data.
    ifreq = freq[:nchan].ravel().argsort()

    # pre-calculate time offsets in (input) channelized streams
    dt = dispersion_delay_constant * dm * (1./freq_in**2 - 1./fref**2)

    if need_fine_channels:
        # pre-calculate required turns due to dispersion.
        #
        # set frequency relative to which dispersion is coherently corrected
        if dedisperse == 'coherent':
            _fref = fref
        else:
            _fref = freq_in[np.newaxis, :]
        # (check via eq. 5.21 and following in
        # Lorimer & Kramer, Handbook of Pulsar Astronomy
        dang = (dispersion_delay_constant * dm * fcoh *
                (1./_fref-1./fcoh)**2) * u.cycle
        with u.set_enabled_equivalencies(u.dimensionless_angles()):
            dd_coh = np.exp(dang * 1j).conj().astype(np.complex64)

        # add dimension for polarisation
        dd_coh = dd_coh[..., np.newaxis]

    # Calculate the part of the whole file this node should handle.
    size_per_node = (nt-1)//mpi_size + 1
    start_block = mpi_rank*size_per_node
    end_block = min((mpi_rank+1)*size_per_node, nt)
    for j in range(start_block, end_block):
        if verbose and j % progress_interval == 0:
            print('#{:4d}/{:4d} is doing {:6d}/{:6d} [={:6d}/{:6d}]; '
                  'time={:18.12f}'
                  .format(mpi_rank, mpi_size, j+1, nt,
                          j-start_block+1, end_block-start_block,
                          (tstart+dtsample*j*ntint).value))  # time since start

        # Just in case numbers were set wrong -- break if file ends;
        # better keep at least the work done.
        try:
            raw = fh.seek_record_read(int((nskip+j)*fh.blocksize),
                                      fh.blocksize)
        except(EOFError, IOError) as exc:
            print("Hit {0!r}; writing data collected.".format(exc))
            break
        if verbose >= 2:
            print("#{:4d}/{:4d} read {} items"
                  .format(mpi_rank, mpi_size, raw.size), end="")

        if npol == 2 and raw.dtype.fields is not None:
            raw = raw.view(raw.dtype.fields.values()[0][0])

        if fh.nchan == 1:  # raw.shape=(ntint*npol)
            raw = raw.reshape(-1, npol)
        else:              # raw.shape=(ntint, nchan*npol)
            raw = raw.reshape(-1, fh.nchan, npol)

        if dedisperse == 'incoherent' and oversample > 1:
            raw = ifft(raw, axis=1, **_fftargs).reshape(-1, nchan, npol)
            raw = fft(raw, axis=1, **_fftargs)

        if rfi_filter_raw is not None:
            raw, ok = rfi_filter_raw(raw)
            if verbose >= 2:
                print("... raw RFI (zap {0}/{1})"
                      .format(np.count_nonzero(~ok), ok.size), end="")

        if np.can_cast(raw.dtype, np.float32):
            vals = raw.astype(np.float32)
        else:
            assert raw.dtype.kind == 'c'
            vals = raw

        # For pre-channelized data, data are always complex,
        # and should have shape (ntint, nchan, npol).
        # For baseband data, we wish to get to the same shape for
        # incoherent or by_channel, or just to fully channelized for coherent.
        if fh.nchan == 1:
            # If we need coherent dedispersion, do FT of whole thing,
            # otherwise to output channels, mimicking pre-channelized data.
            if raw.dtype.kind == 'c':  # complex data
                nsamp = len(vals) if dedisperse == 'coherent' else nchan
                vals = fft(vals.reshape(-1, nsamp, npol), axis=1,
                           **_fftargs)
            else:  # real data
                nsamp = len(vals) if dedisperse == 'coherent' else nchan * 2
                vals = rfft(vals.reshape(-1, nsamp, npol), axis=1,
                            **_rfftargs)
                # Sadly, the way data are stored depends on what FFT routine
                # one is using.  We cannot deal with scipy's.
                if vals.dtype.kind == 'f':
                    raise TypeError("Can no longer deal with scipy's format "
                                    "for storing FTs of real data.")

        if fedge_at_top:
            # take complex conjugate to ensure by-channel de-dispersion is
            # applied correctly.
            # This needs to be done for ARO data, since we are in 2nd Nyquist
            # zone; not clear it is needed for other telescopes.
            np.conj(vals, out=vals)

        # Now we coherently dedisperse, either all of it or by channel.
        if need_fine_channels:
            # for by_channel, we have vals.shape=(ntint, nchan, npol),
            # and want to FT over ntint to get fine channels;
            if vals.shape[0] > 1:
                fine = fft(vals, axis=0, **_fftargs)
            else:
                # for coherent, we just reshape:
                # (1, ntint*nchan, npol) -> (ntint*nchan, 1, npol)
                fine = vals.reshape(-1, 1, npol)

            # Dedisperse.
            fine *= dd_coh

            # Still have fine.shape=(ntint, nchan, npol),
            # w/ nchan=1 for coherent.
            if fine.shape[1] > 1 or raw.dtype.kind == 'c':
                vals = ifft(fine, axis=0, **_fftargs)
            else:
                vals = irfft(fine, axis=0, **_rfftargs)

            if fine.shape[1] == 1 and nchan > 1:
                # final FT to get requested channels
                if vals.dtype.kind == 'f':
                    vals = vals.reshape(-1, nchan*2, npol)
                    vals = rfft(vals, axis=1, **_rfftargs)
                else:
                    vals = vals.reshape(-1, nchan, npol)
                    vals = fft(vals, axis=1, **_fftargs)
            elif dedisperse == 'by-channel' and oversample > 1:
                vals = vals.reshape(-1, oversample, fh.nchan, npol)
                vals = fft(vals, axis=1, **_fftargs)
                vals = vals.transpose(0, 2, 1, 3).reshape(-1, nchan, npol)

            # vals[time, chan, pol]
            if verbose >= 2:
                print("... dedispersed", end="")

        if npol == 1:
            power = vals.real**2 + vals.imag**2
        else:
            p0 = vals[..., 0]
            p1 = vals[..., 1]
            power = np.empty(vals.shape[:-1] + (4,), np.float32)
            power[..., 0] = p0.real**2 + p0.imag**2
            power[..., 1] = p0.real*p1.real + p0.imag*p1.imag
            power[..., 2] = p0.imag*p1.real - p0.real*p1.imag
            power[..., 3] = p1.real**2 + p1.imag**2

        if verbose >= 2:
            print("... power", end="")

        # current sample positions and corresponding time in stream
        isr = j*(ntint // oversample) + np.arange(ntint // oversample)
        tsr = (isr*dtsample*oversample)[:, np.newaxis]

        if rfi_filter_power is not None:
            power = rfi_filter_power(power, tsr.squeeze())
            print("... power RFI", end="")

        # correct for delay if needed
        if dedisperse in ['incoherent', 'by-channel']:
            # tsample.shape=(ntint/oversample, nchan_in)
            tsr = tsr - dt

        if do_waterfall:
            # # loop over corresponding positions in waterfall
            # for iw in xrange(isr[0]//ntw, isr[-1]//ntw + 1):
            #     if iw < nwsize:  # add sum of corresponding samples
            #         waterfall[iw, :] += np.sum(power[isr//ntw == iw],
            #                                    axis=0)[ifreq]
            iw = np.round((tsr / dtsample / oversample).to(1)
                          .value / ntw).astype(int)
            for k, kfreq in enumerate(ifreq):  # sort in frequency while at it
                iwk = iw[:, (0 if iw.shape[1] == 1 else kfreq // oversample)]
                iwk = np.clip(iwk, 0, nwsize-1, out=iwk)
                iwkmin = iwk.min()
                iwkmax = iwk.max()+1
                for ipow in range(npol**2):
                    waterfall[iwkmin:iwkmax, k, ipow] += np.bincount(
                        iwk-iwkmin, power[:, kfreq, ipow], iwkmax-iwkmin)
            if verbose >= 2:
                print("... waterfall", end="")

        if do_foldspec:
            ibin = (j*ntbin) // nt  # bin in the time series: 0..ntbin-1

            # times and cycles since start time of observation.
            tsample = tstart + tsr
            phase = (phasepol(tsample.to(u.s).value.ravel())
                     .reshape(tsample.shape))
            # corresponding PSR phases
            iphase = np.remainder(phase*ngate, ngate).astype(np.int)

            for k, kfreq in enumerate(ifreq):  # sort in frequency while at it
                iph = iphase[:, (0 if iphase.shape[1] == 1
                                 else kfreq // oversample)]
                # sum and count samples by phase bin
                for ipow in range(npol**2):
                    foldspec[ibin, k, :, ipow] += np.bincount(
                        iph, power[:, kfreq, ipow], ngate)
                icount[ibin, k, :] += np.bincount(
                    iph, power[:, kfreq, 0] != 0., ngate).astype(np.int32)

            if verbose >= 2:
                print("... folded", end="")

        if verbose >= 2:
            print("... done")

    #Commented out as workaround, this was causing "Referenced before assignment" errors with JB data
    #if verbose >= 2 or verbose and mpi_rank == 0:
    #    print('#{:4d}/{:4d} read {:6d} out of {:6d}'
    #          .format(mpi_rank, mpi_size, j+1, nt))

    if npol == 1:
        if do_foldspec:
            foldspec = foldspec.reshape(foldspec.shape[:-1])
        if do_waterfall:
            waterfall = waterfall.reshape(waterfall.shape[:-1])

    return foldspec, icount, waterfall
예제 #41
0
            if vals.shape[0] > 1:
                fine = fft(vals, axis=0, **_fftargs)
            else:
                # for coherent, we just reshape:
                # (1, ntint*nchan, npol) -> (ntint*nchan, 1, npol)
                fine = vals.reshape(-1, 1, npol)

            # Dedisperse.
            fine *= dd_coh

            # Still have fine.shape=(ntint, nchan, npol),
            # w/ nchan=1 for coherent.
            if fine.shape[1] > 1 or raw.dtype.kind == 'c':
                vals = ifft(fine, axis=0, **_fftargs)
            else:
                vals = irfft(fine, axis=0, **_rfftargs)

            if fine.shape[1] == 1 and nchan > 1:
                # final FT to get requested channels
                if vals.dtype.kind == 'f':
                    vals = vals.reshape(-1, nchan*2, npol)
                    vals = rfft(vals, axis=1, **_rfftargs)
                else:
                    vals = vals.reshape(-1, nchan, npol)
                    vals = fft(vals, axis=1, **_fftargs)
            elif dedisperse == 'by-channel' and oversample > 1:
                vals = vals.reshape(-1, oversample, fh.nchan, npol)
                vals = fft(vals, axis=1, **_fftargs)
                vals = vals.transpose(0, 2, 1, 3).reshape(-1, nchan, npol)

            # vals[time, chan, pol]
예제 #42
0
 def generate(self):
     X = np.random.randn(self.N // 2 + 1 + self.uneven) + 1j * np.random.randn(self.N // 2 + 1 + self.uneven)
     y = (irfft(X / self.S)).real
     if self.uneven:
         y = y[:-1]
     return normalize(y)