Example #1
0
 def test_definition(self):
     x = [0,1,1,2,2,3,3,4,4]
     assert_array_almost_equal(9*rfftfreq(9),x)
     assert_array_almost_equal(9*pi*rfftfreq(9,pi),x)
     x = [0,1,1,2,2,3,3,4,4,5]
     assert_array_almost_equal(10*rfftfreq(10),x)
     assert_array_almost_equal(10*pi*rfftfreq(10,pi),x)
Example #2
0
 def test_definition(self):
     x = [0, 1, 1, 2, 2, 3, 3, 4, 4]
     assert_array_almost_equal(9 * rfftfreq(9), x)
     assert_array_almost_equal(9 * pi * rfftfreq(9, pi), x)
     x = [0, 1, 1, 2, 2, 3, 3, 4, 4, 5]
     assert_array_almost_equal(10 * rfftfreq(10), x)
     assert_array_almost_equal(10 * pi * rfftfreq(10, pi), x)
Example #3
0
    def mask_freqs(self):
        # drop f[0] and f[max] if length is even
        # fs = R[0], R[1], Im[1], R[2], Im[2], ... , R[n/2-1], Im[n/2-1], R[n/2]
        if self.samplen % 2 == 0:
            freqs = fftpack.rfftfreq(self.samplen, self.rate)[1:-1:2]
        else:
            freqs = fftpack.rfftfreq(self.samplen, self.rate)[1::2]

        # target spectrum window
        if self.fmin is not None:
            freqs = freqs[freqs >= self.fmin]
        if self.fmax is not None:
            if 2 * self.fmax < freqs[-1]:
                warn('highest frequency bin ({}) is 2x larger than '
                     'cutoff frequency ({}), consider subsampling'.format(
                         freqs[-1], self.fmax),
                     stacklevel=2)
            freqs = freqs[freqs <= self.fmax]

        # initial mask is everything
        maskix = np.arange(len(freqs))

        # drop all freqs that are not a multiple of k
        if self.multiplesof is not None:
            rests = divmod(freqs, self.multiplesof)[1]
            # TODO: handle empty list
            tmpix = np.where(np.isclose(rests, 0))[0]
            maskix = np.array([x for x in maskix if x in tmpix])

        self.validmask = maskix
        num_f = len(maskix)

        if self.bins is not None and num_f < self.bins:
            warn(
                'number of bins ({}) exceeds valid frequency count ({}). Is the sample size a multiple of {}?'
                .format(self.bins, num_f, self.multiplesof))
            self.bins = num_f

        if self.logdistance is not None:
            # FIXME
            # there is probably an actual formula for this 8)
            # also handle self.distance here in case we want an equidistant mask
            i = 1
            bins = 0
            while (bins < self.bins):
                tmpix = np.logspace(0, np.log10(len(maskix)), i)
                tmpix = np.round(tmpix).astype('int') - 1
                tmpix = np.unique(tmpix)
                bins = len(tmpix)
                i += 1
            maskix = np.array([x for x in maskix if x in tmpix])

        self.distmask = maskix
        return np.array([freqs[x] for x in maskix])
 def __getitem__(self, i):
     # Process EEG
     eeg = ((self.data[i]["eeg"].float() - self.means) / self.stddevs
            )  #.t() # CxT
     # Check filtering
     # Uses global opt
     if opt.filtering:
         # Time axis
         N = eeg.size(1)
         T = 1.0 / 1000.0
         time = np.linspace(0.0, N * T, N)
         # Frequency axis
         w = rfftfreq(N, T)
         # FFT
         eeg = eeg.numpy()
         eeg_fft = rfft(eeg)
         # Filter
         eeg_fft[:, w < 15] = 0
         eeg_fft[:, np.bitwise_and(w > 47, w < 53)] = 0
         eeg_fft[:, w > 71] = 0
         eeg = irfft(eeg_fft)
         # Convert to tensor
         eeg = torch.tensor(eeg)
     # Transpose to TxC
     eeg = eeg.t()
     eeg = eeg[20:460, :]
     # Get label
     label = self.data[i]["label"]
     # Return
     return eeg, label
Example #5
0
def getFrequency(price_values):
    #Piazza Code
    yhat = fftpack.rfft(price_values)
    idx = (yhat[1:]**2).argmax() + 1
    freqs = fftpack.rfftfreq(len(price_values), d=(1.0) / (2 * np.pi))
    frequency = freqs[idx]
    return frequency
Example #6
0
def get_fft_freq(length, fps=None, time_step=None):
    if fps is None and time_step is None:
        print("Either fps or time_step must not be None.")
        return None
    time_step = time_step if fps is None else (1.0 / fps)
    freq = rfftfreq(length, time_step)
    return freq
Example #7
0
	def sineFit(self,xReal,yReal):
		N=len(xReal)
		OFFSET = (yReal.max()+yReal.min())/2.
		yhat = fftpack.rfft(yReal-OFFSET)
		idx = (yhat**2).argmax()
		freqs = fftpack.rfftfreq(N, d = (xReal[1]-xReal[0])/(2*np.pi))
		frequency = freqs[idx]/(2*np.pi)  #Convert angular velocity to freq

		amplitude = (yReal.max()-yReal.min())/2.0
		phase=0#.5*np.pi*((yReal[0]-offset)/amplitude)
		guess = [amplitude, frequency, phase,0]
		try:
			(amplitude, frequency, phase,offset), pcov = optimize.curve_fit(self.sineFunc, xReal, yReal-OFFSET, guess)
			offset+=OFFSET
			ph = ((phase)*180/(np.pi))
			if(frequency<0):
				#print 'negative frq'
				return False

			if(amplitude<0):
				ph-=180

			if(ph<0):ph = (ph+720)%360
			freq=1e6*abs(frequency)
			amp=abs(amplitude)
			pcov[0]*=1e6
			#print pcov
			if(abs(pcov[-1][0])>1e-6):
				False
			return [amp, freq, offset,ph]
		except:
			return False
def rfft_y(array, alpha, sample_rate, pp, scale=False):
    #change file format
    array = np.array(array)
    yt = np.ravel(array)
    #tukey window
    yt = yt * (tukey(yt.shape[0], alpha, sym=True))
    #scale to max(abs())=1
    if scale is True:
        scaler = StandardScaler(copy=False, with_mean=False, with_std=True)
        yt = scaler.fit_transform(yt.reshape(-1, 1))
    #rfft
    yf = np.abs(rfft(yt, n=sample_rate, axis=0))
    xf = rfftfreq(sample_rate, d=1. / sample_rate).reshape(-1, 1)
    #max pooling
    if yf.shape[0] % pp != 0:
        num_pad = (yf.shape[0] // pp) * pp + pp - yf.shape[0]
        yf_mp = np.pad(yf[:, 0], (0, num_pad), 'constant').reshape(-1, pp)
    else:
        yf_mp = yf.reshape(-1, pp)
    yf_mp = np.max(yf_mp, axis=1)
    fac_xf = (np.max(xf, axis=0) -
              np.min(xf, axis=0)) / xf.shape[0]  # stepsize xf
    xf_mp = np.arange((pp // 2) * fac_xf, (yf_mp.shape[0] * pp) * fac_xf + 1,
                      pp * fac_xf)
    return (yf_mp, xf_mp)
Example #9
0
def fit_rabi_t(xdata, ydata, f_pulse, scaling=True, offset=True, **kwargs):
    from scipy.fftpack import rfft, rfftfreq
    # ordering = xdata.argsort()
    ordering = argsort(xdata).values
    yhat = rfft(ydata[ordering])
    idx = (yhat**2).argmax()
    freqs = rfftfreq(len(xdata), d=xdata[ordering].diff()[1])
    #fR_guess = 0.7*freqs[idx]
    fR_guess = 2 / max(xdata)  # assuming roughly one Rabi period of data
    # fR_guess = 1/(19e-6)
    f0_guess = f_pulse
    params_guess = [f0_guess, fR_guess]
    # print params_guess
    A_guess = 1
    if scaling * 0:
        params_guess.append(A_guess)
    if scaling and offset:
        c_guess = 0
        params_guess.append(c_guess)

    def fitfn(t, *pars):
        return rabi(t, f_pulse, *pars)

    params, u_params = curve_fit(fitfn, xdata, ydata, params_guess, **kwargs)
    params[1] = abs(params[1])
    return params, u_params
Example #10
0
def whiten(strain, dt=1, phase_shift=0, time_shift=0):
    """Whitens strain data given the psd and sample rate, also applying a phase
    shift and time shift.

    Args:
        strain (ndarray): strain data
        interp_psd (interpolating function): function to take in freqs and output
            the average power at that freq
        dt (float): sample time interval of data
        phase_shift (float, optional): phase shift to apply to whitened data
        time_shift (float, optional): time shift to apply to whitened data (s)

    Returns:
        ndarray: array of whitened strain data
    """
    Nt = len(strain)
    ps = np.abs(fft(strain))**2
    # take the fourier transform of the data
    freqs = rfftfreq(Nt, dt)

    # whitening: transform to freq domain, divide by square root of psd, then
    # transform back, taking care to get normalization right.
    hf = rfft(strain)

    # apply time and phase shift
    hf = hf * np.exp(-1.j * 2 * np.pi * time_shift * freqs - 1.j * phase_shift)
    norm = 1. / np.sqrt(1. / (dt * 2))
    white_hf = hf / np.sqrt(interp_psd(freqs, ps, 0, 200)) * norm
    # white_ht = irfft(white_hf)
    white_ht = np.real(white_hf)
    return white_ht
def compute_fft(fs, ir):

    import scipy.signal, numpy
    from scipy import fftpack

    # creating asymmetric bartlett window for spectral analysis
    window_bart = scipy.signal.bartlett(len(ir), sym=False)

    # windowing the impulse response
    ir_wind = ir * window_bart

    # computing fft
    sig_fft = fftpack.rfft(ir_wind)

    # setting length of fft
    n = sig_fft.size
    timestep = 1 / float(fs)

    # generating frequencies according to fft points
    freq = fftpack.rfftfreq(n, d=timestep)

    # normalizing fft
    sys_fft = abs(sig_fft) / n

    # TODO FFT computing
    return sys_fft, freq
Example #12
0
def fft_filter(x, fs, band=(9, 14)):
    w = fftpack.rfftfreq(x.shape[0], d=1. / fs)
    f_signal = fftpack.rfft(x, axis=0)
    cut_f_signal = f_signal.copy()
    cut_f_signal[(w < band[0]) | (w > band[1])] = 0
    cut_signal = fftpack.irfft(cut_f_signal, axis=0)
    return cut_signal
Example #13
0
    def addBandpassNoise(self, W, sigma_pass, sigma_out):
        """add gaussian colored noise with stddev sigma_pass in
        the 1kHz - 2Khz frequency range and stddev sigma_out
        outside the frequency range above"""
        lowerFreq = 1000 #1Khz
        higherFreq = 2000
        v = W.size
        v-=1
        v |= v >> 1;
        v |= v >> 2;
        v |= v >> 4;
        v |= v >> 8;
        v |= v >> 16;
        v+=1
        white_noise = np.random.normal(0, 1, v)

        freq = rfftfreq(white_noise.size, d = 1/self._comm.FS)
        f_wnoise = rfft(white_noise)

        # If our original signal time was in seconds, this is now in Hz
        in_band_mask = np.logical_or(
                np.logical_and(freq > lowerFreq, freq < higherFreq),
                np.logical_and(freq > -higherFreq,freq < -lowerFreq)
                )
        out_of_band_mask = np.logical_not(in_band_mask)
        f_wnoise[in_band_mask] *= sigma_pass
        f_wnoise[out_of_band_mask] *= sigma_out
        return W + irfft(f_wnoise)[:W.size]
Example #14
0
def frequency():
    '''
    @Description: 
        观察FFT输出的频率计算结果 
    '''
    N = 10
    # 输入不变时输出为0
    x = np.ones(N)
    y = fftpack.fft(x)
    print('x={}'.format(x))
    print('fft(x)={}'.format(y))
    # 输入交替变化时输出中存在高频分量
    z = np.ones(N)
    z[::2] = -1
    print('Applying FFT to {}'.format(z))
    print('fft(z)={}'.format(fftpack.fft(z)))
    # fftreq函数显示那个频率需要特别关注
    print('fftpack.fftfreq(10)={}'.format(fftpack.rfftfreq(10)))
    # 输入实数序列时输出频谱是共轭对称的
    x = np.array([1, 5, 12, 7, 3, 0, 4, 3, 2, 8])
    yx = fftpack.fft(x)
    np.set_printoptions(precision=2)
    print("Applying FFT to {}".format(x))
    print('Real part:\t{}'.format(yx.real))
    print('Imaginary part:\t{}'.format(yx.imag))
Example #15
0
def fft_filter(x, fs, band=(9, 14)):
    w = fftpack.rfftfreq(x.shape[0], d=1. / fs)
    f_signal = fftpack.rfft(x, axis=0)
    cut_f_signal = f_signal.copy()
    cut_f_signal[(w < band[0]) | (w > band[1])] = 0
    cut_signal = fftpack.irfft(cut_f_signal, axis=0)
    return cut_signal
def high_pass(sig, threshold=1e-7):
    fourier = fftpack.rfft(sig)
    n = fourier.size
    spacing = 0.002
    freqs = fftpack.rfftfreq(n, spacing)
    fourier[freqs < threshold] = 0  #sets low frequencies to zero
    return fftpack.ifft(fourier)
Example #17
0
 def getFFT(self):
     freqs = []
     fdatas = np.abs(fftpack.rfft(self.data,axis=1))/self.data.shape[1]*2.0               
     for fdata in fdatas:                    
         freq = fftpack.rfftfreq(int(fdata.shape[0]),1.0/self.sample_rate)                
         freqs.append(freq)
     return (np.array(freqs), fdatas)
Example #18
0
def fft_amplitude(t, u):
    dt = np.abs(t[1] - t[0])
    len_t = len(t)
    u_fft = rfft(u)
    freq_fft = rfftfreq(len_t, dt)
    n = len(u)
    p_freq = freq_fft[1:n:2]
    n_freq = len(p_freq)
    amp_fft = np.zeros(n_freq)
    amp_fft[0] = u_fft[0] / n
    if n % 2 == 0:
        amp_fft[n_freq - 1] = u_fft[-1] / n
        j = 1
        for i in range(1, n_freq - 2):
            amp_fft[i] = 2 * np.sqrt(u_fft[j]**2 + u_fft[j + 1]**2) / n
            j += 2
    if n % 2 != 0:
        j = 1
        for i in range(1, n_freq - 1):
            amp_fft[i] = 2 * np.sqrt(u_fft[j]**2 + u_fft[j + 1]**2) / n
            j += 2

    ind_fake_freqs_1 = np.logical_or(p_freq <= 1.249e9, 1.251e9 <= p_freq)
    p_freq_1 = p_freq[ind_fake_freqs_1]
    amp_fft_1 = amp_fft[ind_fake_freqs_1]

    ind_fake_freqs_2 = np.logical_or(p_freq_1 <= 2.499e9, 2.501e9 <= p_freq_1)
    p_freq_2 = p_freq_1[ind_fake_freqs_2]
    amp_fft_2 = amp_fft_1[ind_fake_freqs_2]

    #ind_cutoff = p_freq_2 <= cutoff_frequency
    #cut_freq = p_freq_2[ind_cutoff]
    #cut_fft_amp = amp_fft_2[ind_cutoff]
    fft_amp_dict = {'frequency': p_freq_2, 'amplitude': amp_fft_2}
    return fft_amp_dict
Example #19
0
    def __init__(self, name, n, freq):
        super().__init__()
        layout = QHBoxLayout()
        self.setWindowTitle(name)

        color = np.ones((n, 4), dtype=np.float32)
        color[:, 0] = np.linspace(0, 1, n)
        color[:, 1] = color[::-1, 0]
        canvas = scene.SceneCanvas(keys='interactive', show=True)
        grid = canvas.central_widget.add_grid(spacing=0)
        self.viewbox = grid.add_view(row=0, col=1, camera='panzoom')
        x_axis = scene.AxisWidget(orientation='bottom')
        x_axis.stretch = (1, 0.1)
        grid.add_widget(x_axis, row=1, col=1)
        x_axis.link_view(self.viewbox)
        y_axis = scene.AxisWidget(orientation='left')
        y_axis.stretch = (0.1, 1)
        grid.add_widget(y_axis, row=0, col=0)
        y_axis.link_view(self.viewbox)

        self.pos = np.zeros((n, 2), dtype=np.float32)
        self.pos[:, 0] = rfftfreq(n, 1/freq)
        #pos[:, 0] = self.x_mesh[:self.n_samples_to_display]
        self.line = scene.Line(self.pos, color, parent=self.viewbox.scene)

        self.viewbox.camera.set_range()
        self.freqbar = pg.BarGraphItem(x=[1], height=0, width=0.6, brush='g')
        self.plot = pg.PlotWidget()
        self.plot.addItem(self.freqbar)
        self.plot.setFixedWidth(100)
        layout.addWidget(canvas.native)
        layout.addWidget(self.plot)
        self.setLayout(layout)
Example #20
0
def rfft_freq(data, window_func=signal.hanning):
    w = window_func(data.size)
    sig_fft = fftpack.rfft(data * w)
    freq = fftpack.rfftfreq(sig_fft.size, d=SAMPLING_INTERVAL)
    freq = freq[range(data.size / 2)]
    sig_fft = sig_fft[range(data.size / 2)]
    return sig_fft, freq
Example #21
0
def main(raw_data):
    """ Performs FFT band pass filter 
    plots and saves filtered data

    Args:
         raw_data -> np.array((points, 2))

    """

    # Perform FFT /
    fft_signal = fftpack.rfft(raw_data[:, 1])
    points = len(raw_data[:, 0])
    dx = raw_data[1, 0] - raw_data[0, 0]
    freq = fftpack.rfftfreq(points, d=dx)

    # apply band pass filter
    filtered_signal = filter_signal(freq, fft_signal)

    # transform back
    filtered_data = fftpack.irfft(filtered_signal)

    plt.subplot(211)
    plt.plot(raw_data[:, 0], raw_data[:, 1], 'k.')
    plt.ylabel('Signal a.u.')

    plt.subplot(212)
    plt.plot(raw_data[:, 0], filtered_data, 'b-')
    plt.ylabel('Signal')
    plt.xlabel('Time')
    plt.show()
    np.savetxt('filtered_data.out',
               np.c_[raw_data[:, 0], filtered_data],
               header='{:22} {:22}'.format('Time', 'Filtered'))
def compute_fft(fs, ir):

    import scipy.signal, numpy
    from scipy import fftpack

    # creating asymmetric bartlett window for spectral analysis
    window_bart = scipy.signal.bartlett(len(ir), sym=False)

    # windowing the impulse response
    ir_wind = ir * window_bart

    #computing fft
    sig_fft = fftpack.rfft(ir_wind)

    #setting length of fft
    n = sig_fft.size
    timestep = 1 / float(fs)

    #generating frequencies according to fft points
    freq = fftpack.rfftfreq(n, d=timestep)

    #normalizing fft
    sys_fft = abs(sig_fft) / n

    # TODO FFT computing
    return sys_fft, freq
Example #23
0
def getFrequency(lookbackValues):
    import scipy.fftpack as fftpack
    yhat = fftpack.rfft(lookbackValues)
    idx = (yhat[1:]**2).argmax() + 1
    freqs = fftpack.rfftfreq(len(lookbackValues), d=(1.0) / (2 * np.pi))
    frequency = freqs[idx]
    return frequency
Example #24
0
    def postprocess(self):
        if not self.postprocessed:
            print("Postprocessing grid.")
            self.t = np.arange(self.NT) * self.dt
            self.longitudinal_energy_history = 0.5 * self.epsilon_0 * (
                self.electric_field_history[:, :, 0]**2)
            perpendicular_electric_energy = 0.5 * self.epsilon_0 * (
                self.electric_field_history[:, :, 1:]**2).sum(
                    2)  # over directions
            mu_zero_inv = 1 / (self.epsilon_0 * self.c**2)
            magnetic_energy = 0.5 * (self.magnetic_field_history**
                                     2).sum(2) * mu_zero_inv  # over directions

            self.perpendicular_energy_history = perpendicular_electric_energy + magnetic_energy
            self.check_on_charge = np.gradient(
                self.electric_field_history[:, :, 0], self.dx,
                axis=1) * self.epsilon_0
            # fourier analysis
            from scipy import fftpack
            self.k_plot = fftpack.rfftfreq(int(self.NG), self.dx)[::2]
            self.longitudinal_energy_per_mode_history = np.abs(
                fftpack.rfft(self.longitudinal_energy_history))[:, ::2]
            self.perpendicular_energy_per_mode_history = np.abs(
                fftpack.rfft(self.perpendicular_energy_history))[:, ::2]

            self.longitudinal_energy_history = self.longitudinal_energy_history.sum(
                1)
            self.perpendicular_energy_history = self.perpendicular_energy_history.sum(
                1)
            self.grid_energy_history = self.perpendicular_energy_history + self.longitudinal_energy_history  # over positions
            vacuum_wave_impedance = 1 / (self.epsilon_0 * self.c)
            np.cumsum(self.laser_energy_history**2 / vacuum_wave_impedance *
                      self.dt)
            self.x_current = self.x + self.dx / 2
            self.postprocessed = True
Example #25
0
def plot_wav_analysis(wav_samples, sample_rate, freqs=[], bw=None, dft=False):
    """ Perform DFT on the samples, select the given frequencies, and plot."""
    plot_dicts = list()
    time_domain = np.arange(len(wav_samples)) / sample_rate
    plot_dicts.append({'title': 'wav samples', 'color': 'red',
                       'x': time_domain, 'xlabel': 'time (sec)',
                       'y': wav_samples, 'ylabel': 'amplitude'})
    if dft:
        arr_axis = int(np.argmax(wav_samples.shape))
        wav_samples_dft = fftp.rfft(wav_samples, axis=arr_axis)
        n = wav_samples_dft.shape[0]
        freq_domain = sample_rate * fftp.rfftfreq(n)

        plot_dicts.append({'title': 'wav samples dft',
                           'x': freq_domain, 'xlabel': 'frequency',
                           'y': wav_samples_dft, 'ylabel': 'amplitude'})
    if type(bw) == int:
        bwl = [bw] * len(freqs)
    elif bw is None:
        bwl = [10] * len(freqs)
    else:
        bwl = bw
    for i, f in enumerate(freqs):
        wav, _ = ap.bandpass_filter(wav_samples, sample_rate, f, bwl[i])
        plot_dicts.append({'title': 'bandpass filter, fc={}, bw={}'.format(f, bwl[i]),
                           'x': time_domain, 'xlabel': 'time (sec)',
                           'y': wav, 'ylabel': 'amplitude'})
    plot_wav(plot_dicts)
Example #26
0
def acquire_signal_and_dft():
    txt = signal.value
    nse = noise.value
    size = 200
    x = np.linspace(0, 4, size)
    dt = x[1] - x[0]

    y = np.zeros(size)
    F = np.zeros(size)
    w = np.zeros(size)

    if nse:
        some_noise = float(nse)*np.random.random_sample(size)
        y += some_noise

    if txt:
        #Put y in existence
        txt = 'z = ' + txt
        cur_locs = locals()
        cur_globals = globals()
        exec(txt, cur_globals, cur_locs)
        y = y + cur_locs['z']
        #scipyF = np.abs(ftp.rfft(y))
        Freal, Fimg = fwrapper.dft(y, np.zeros(size), size)
        F = np.abs(extract_real_dft(Freal, Fimg))
        w = ftp.rfftfreq(size, dt)

    plt_df = pd.DataFrame(data={"x":x, "y":y, 'dft':F, 'f':w, "color":['blue']*size})
    return plt_df
Example #27
0
def acquire_signal_and_dft():
    txt = signal.value
    nse = noise.value
    size = 200
    x = np.linspace(0, 4, size)
    dt = x[1] - x[0]

    y = np.zeros(size)
    F = np.zeros(size)
    w = np.zeros(size)

    if nse:
        some_noise = float(nse) * np.random.random_sample(size)
        y += some_noise

    if txt:
        #Put y in existence
        txt = 'z = ' + txt
        cur_locs = locals()
        cur_globals = globals()
        exec(txt, cur_globals, cur_locs)
        y = y + cur_locs['z']
        #scipyF = np.abs(ftp.rfft(y))
        Freal, Fimg = fwrapper.dft(y, np.zeros(size), size)
        F = np.abs(extract_real_dft(Freal, Fimg))
        w = ftp.rfftfreq(size, dt)

    plt_df = pd.DataFrame(data={
        "x": x,
        "y": y,
        'dft': F,
        'f': w,
        "color": ['blue'] * size
    })
    return plt_df
Example #28
0
def plot_measure_fts(Fmeas_dict, fignum=1):
    nr = len(list(Fmeas_dict.keys()))
    gs = gridspec.GridSpec(nr, 1)
    fig = plt.figure(fignum)

    for r, (name, vals) in enumerate(Fmeas_dict.items()):
        ax = fig.add_subplot(gs[r])
        xtick_labels = False
        xlabel = ''

        if r is nr - 1:
            xtick_labels = True
            xlabel = 'Frequency'

        amps = Fmeas_dict[name]['amps']
        freqs = spf.rfftfreq(len(amps))
        RN = Fmeas_dict[name]['RN']
        plot_ft(freqs,
                amps,
                ax,
                RN=RN,
                xlabel=xlabel,
                ylabel=r'$|\mathcal{F}$(' + name + r'$)|^2$',
                xtick_labels=xtick_labels)

        ax.grid('on')
    plt.tight_layout()
Example #29
0
def fourier_calc(s1, s2, t):
    """calculates rapid fft"""
    x1f = rfft(s1.x[0, :] + s1.x[0, 0])
    x2f = rfft(s2.x[0, :] + s2.x[0, 0])
    y1f = rfft(s1.x[1, :] + s1.x[1, 0])
    y2f = rfft(s2.x[1, :] + s2.x[1, 0])
    tf = rfftfreq(t.sn, t.dt)
    return array((x1f, x2f, y1f, y2f, tf))
Example #30
0
def FFT(signal, dT):
    """
    :param signal: [array]
    :param dT: sample space [float]
    """
    ampl = np.abs(rfft(signal)) * 2.0 / len(signal)
    freq = rfftfreq(len(ampl), d=dT)
    return ampl, freq
def rfft_y_base(array, sample_rate):
    #change file format
    array = np.array(array)
    yt = np.ravel(array)
    #rfft
    yf = np.abs(rfft(yt, n=sample_rate, axis=0))
    xf = rfftfreq(yf.size, d=1. / sample_rate).reshape(-1, 1)
    return (yf, xf)
Example #32
0
def get_feat(file_name):
    a, sr = librosa.load(file_name)
    fft_wave = fftpack.rfft(a, n=sr)
    fft_freq = fftpack.rfftfreq(n=sr, d=1/sr)
    y = librosa.amplitude_to_db(fft_wave, ref=np.max)
#   plt.plot(fft_freq, y)
#   plt.show()
    return y
Example #33
0
def detect_events(sample_freq, queue):
    """
    Does FFT based event detection and plotting.

    :type sample_freq: int
    :param sample_freq: Sampling frequency in Hz
    :type queue: Multiprocessing Queue
    :param queue: detect_events() reads from this queue to acquire batches of IMU data captured by the acquire_data()
                  process.
    :return: None
    """

    # detect_events() blocks on the queue read, so data processing rate is driven by the rate that
    # ac

    while True:

        imu_data_dict = queue.get()

        # Check the timestamp on this data frame. If it's more than one second behind the current
        # CLOCK_MONOTONIC_RAW time, then detect_events() isn't keeping up with the incoming stream
        # of IMU measurements comming from acquire_data(). Jump back to the top of the while loop,
        # and get another data frame. We'll keep doing that until we're reading fresh data again.

        # TODO: the threshold here might need to be adjusted, and perhaps the whole queue should be cleared.

        if (time.clock_gettime(time.CLOCK_MONOTONIC_RAW) - imu_data_dict['timestamp']) > 1:

            continue

        df = pd.DataFrame(data=imu_data_dict['w_vel'][0], columns=['x'])

        # print("Number of NaN points: " + str(df.isna().sum()))

        # Doing a linear interpolation that replaces the NaN placeholders for desynced, dropped data
        # with a straight line between the last good data point before a dropout and the next good data
        # point. This does seem to improve the signal-to-noise ratio in the FFT.
        df = df.interpolate()

        yf = fftpack.rfft(df.loc[:, 'x'])

        # Is this a better way of getting the power spectrum than numpy.abs(yf?)
        yf = numpy.sqrt((yf * yf.conj()).real)

        xf = fftpack.rfftfreq((len(df.index)), 1./sample_freq)

        peaks, peaks_dict = signal.find_peaks(yf, height=1)

        #print(xf[peaks], yf[peaks])
        #print(peaks, peaks_dict)

        # Check if any peak has been detected between 3 and 5 Hz, along the x axis. This is our rule for detecting
        # tooth-brushing behavior.

        #if numpy.where(numpy.logical_and(xf[peaks] >= 3, xf[peaks] <= 5))[0].size > 0:
        if numpy.where(peaks_dict['peak_heights'] > 800)[0].size > 0:

            print("Scratching Detected!")
 def convert_freq(self):
     self.original_complex = rfft(self.original_sig)
     self.modified_complex = np.copy(self.original_complex)
     self.freq = (rfftfreq(
         len(self.original_complex) + 1, 1 / self.sample_rate))
     self.freq = self.freq[self.freq > 0]
     print(self.freq[len(self.freq) - 1])
     self.data_line.setData(self.freq, abs(self.modified_complex))
     self.ui.modified_frequ.setXRange(0, 20000)
Example #35
0
def FFT_deNoise(y, dx, noise_level, noise_filter=0.1):
    w = rfft(y)
    f = rfftfreq(len(y), dx)
    spectrum = w**2
    cutoff = spectrum < (spectrum.max()*noise_level*noise_filter)
    w_clean = w.copy()
    w_clean[cutoff] = 0
    y_clean = irfft(w_clean)
    return f, spectrum, w_clean, y_clean
Example #36
0
def PassBand(vs, ts, fmin, fmax):
    tstep = np.mean(np.diff(ts))
    F = rfftfreq(
        len(vs)) / tstep  #len(vs) points between 0 and 1/2tstep=0.5e10Hz
    VS = rfft(vs)
    VS[F < fmin] = 0
    VS[F > fmax] = 0
    vs = irfft(VS)

    return (vs)
def low_pass(sig, threshhold=1e-4):  #threshold can be changed
    fourier = fftpack.rfft(
        sig
    )  #returns an n-dimensional array of the real parts of the transformed sig so will be 2 or 3-D
    n = fourier.size
    spacing = 0.002
    freqs = fftpack.rfftfreq(n, d=spacing)
    fourier[freqs >
            threshold] = 0  #sets high frequency indicies to zero for smoothing
    return fftpack.ifft(fourier)  #returns only the low frequencies
	def sinefit(yReal, xReal):
	    yhat = fftpack.rfft(yReal)
	    idx = (yhat**2).argmax()
	    freqs = fftpack.rfftfreq(len(xReal), d = (xReal[1]-xReal[0])/(2*pi))
	    frequency = freqs[idx]
	
	    amplitude = abs(yReal.max())
	    guess = [amplitude, frequency, 0.]
	
	    (amplitude, frequency, phase), pcov = optimize.curve_fit(mysine, xReal, yReal, guess)
	    period = 2*pi/frequency
	    return [amplitude, frequency, phase]
Example #39
0
	def getGuessValues(self,xReal,yReal,func='sine'):
		if(func=='sine' or func=='damped sine'):
			N=len(xReal)
			offset = np.average(yReal)
			yhat = fftpack.rfft(yReal-offset)
			idx = (yhat**2).argmax()
			freqs = fftpack.rfftfreq(N, d = (xReal[1]-xReal[0])/(2*np.pi))
			frequency = freqs[idx]

			amplitude = (yReal.max()-yReal.min())/2.0
			phase=0.
			if func=='sine':
				return amplitude, frequency, phase,offset
			if func=='damped sine':
				return amplitude, frequency, phase,offset,0
Example #40
0
 def test_band_pass(self):
     """Band pass filtering."""
     # bandpass around the middle frequency
     ans = band_pass(self.dat, 6, 8)
     # the amplitudes
     fourier = np.abs(rfft(ans.data, axis=0) * 2 / self.dat.data.shape[0])
     ffreqs = rfftfreq(ans.data.shape[0], 1/ans.fs)
     # check if the outer freqs are damped close to zero
     # freqs...
     for i in self.freqs[0], self.freqs[-1]:
         # buckets for freqs
         for j in fourier[ffreqs == i]:
             # channels
             for k in j:
                 self.assertAlmostEqual(k, 0., delta=.1)
Example #41
0
    def __fill_rfft_mapping():
        # Populate mapping variables with corresponding frequencies and bin indexes
        audio.__rfft_freq_index = []

        sample_frequencies = rfftfreq(config_audio.frames_per_buffer, 1 / float(config_audio.sampling_rate)).tolist()

        for i in range(len(sample_frequencies)):
            sample_frequencies[i] = int(round(sample_frequencies[i]))
            if sample_frequencies[i] not in audio.__rfft_freq_index:
                audio.__rfft_freq_index.append(sample_frequencies[i])

        audio.__rfft_bin_index = []

        for i in range(config_audio.sampling_rate / 2 + 1):
            audio.__rfft_bin_index.append(int(round(i * config_audio.frames_per_buffer / float(config_audio.sampling_rate))))
Example #42
0
    def rfft(self, d=None, ngrid=None, boxsize=None, real=True):
        if d==None:
            d=self.data
	if ngrid==None:
	    ngrid=self.ngrid
	if boxsize==None:
	    boxsize=self.boxsize

        if real:
            self.dk=np.fft.rfftn(d)
	    self.k=[sfft.fftfreq(self.dk.shape[i], 2.*np.pi/boxsize ) for i in \
            range(d.ndim-1) ] + [sfft.rfftfreq(self.dk.shape[-1],2.*np.pi/boxsize) ]
	else:
	    raise Exception()

	return #self.dk
def fitcos(x, y, fitY0 = False, guess = None):
    """
    Fit a cosin to the date in x and y. x is expected to be in rad
    """
    def cos(x, amplitude, frequency, phase):
        return amplitude * np.cos(frequency * x + phase)   
    def cos_y0(x, amplitude, frequency, phase, y0):
        return amplitude * np.cos(frequency * x + phase) + y0    

    x = np.array(x)
    y = np.array(y)    
    if not guess:       
        # fourier transform to find guess value for frequency
        yhat = fftpack.rfft(y)
        idx = (yhat**2).argmax()
        freqs = fftpack.rfftfreq(np.size(x), d = (x[0]-x[1])/(2*np.pi))
        frequency0 = freqs[idx]
        if frequency0 == np.Inf or frequency0 == 0:
            frequency0 = 1
        # maximum to find guess for amplitude
        amplitude0 = np.abs(max(y)-min(y))/2
        y00 = (max(y)-min(y))/2+min(y)
        phase0 = 0.
    else:
        amplitude0 = guess[0]
        frequency0 = guess[1]
        phase0 = guess[2]
        if fitY0:
            y00 = guess[3]
    l.debug("Fit cosin. Guessing: Amplitude %.3e, Frequency %.3e, Phase %.3e, Offset y0 %.3e"%(amplitude0, frequency0, phase0, y00))
    
    if fitY0:
        guess = [amplitude0, abs(frequency0), phase0, y00]
        (amplitude, frequency, phase, y0), pcov = optimize.curve_fit(
            cos_y0,
            x, y,
            guess)
        yFit = cos_y0(x, amplitude, frequency, +phase, y0)
        return (amplitude, frequency, phase, y0, yFit)
    else:
        guess = [amplitude0, abs(frequency0), phase0]
        (amplitude, frequency, phase), pcov = optimize.curve_fit(
            cos,
            x, y,
            guess)
        yFit = cos(x, amplitude, frequency, +phase)        
        return (amplitude, frequency, phase, 0, yFit)
Example #44
0
def sin_fit(data):  
  # generate a perfect data set (my real data have tiny error)
  print data

  def mysine(x, a1, a2, a3):
      return a1 * np.sin(a2 * x + a3)


  # xmax = 10
  # xReal = np.linspace(0, xmax, N)
  # a1 = 200.
  # a2 = 2*pi/10.5  # omega, 10.5 is the period
  # a3 = np.deg2rad(10.) # 10 degree phase offset
  # print(a1, a2, a3)
  # data = mysine(xReal, a1, a2, a3) + 0.2*np.random.normal(size=len(xReal))

  xReal = np.linspace(0, len(data), len(data))  
  N = len(data)

  yhat = fftpack.rfft(data)
  idx = (yhat[1:]**2).argmax() + 1
  freqs = fftpack.rfftfreq(N, d = (xReal[1]-xReal[0])/(2*pi))
  frequency = freqs[idx]

  amplitude = max(data)
  guess = [amplitude, frequency, 0.]
  # print(guess)
  (amplitude, frequency, phase), pcov = optimize.curve_fit(
      mysine, xReal, data, guess)

  period = 2*pi/frequency
  # print(amplitude, frequency, phase)

  print period

  phase = 0
  data_fit = mysine(xReal, amplitude, frequency, phase)
  return data_fit, period, phase
Example #45
0
	def fitData(self,xReal,yReal,**args):
		def mysine(x, a1, a2, a3,a4):
		    return a4 + a1*np.sin(abs(a2)*x + a3)
		N=len(xReal)
		yhat = fftpack.rfft(yReal)
		idx = (yhat**2).argmax()
		freqs = fftpack.rfftfreq(N, d = (xReal[1]-xReal[0])/(2*np.pi))
		frequency = freqs[idx]

		amplitude = (yReal.max()-yReal.min())/2.0
		offset = yReal.max()-yReal.min()
		frequency=args.get('frequency',1e6*abs(frequency)/(2*np.pi))*(2*np.pi)/1e6
		phase=args.get('phase',0.)
		guess = [amplitude, frequency, phase,offset]
		try:
			(amplitude, frequency, phase,offset), pcov = optimize.curve_fit(mysine, xReal, yReal, guess)
			ph = ((phase)*180/(np.pi))

			if(frequency<0):
				#print 'negative frq'
				return 0,0,0,0,pcov

			if(amplitude<0):
				#print 'AMP<0'
				ph-=180

			if(ph<-90):ph+=360
			if(ph>360):ph-=360
			freq=1e6*abs(frequency)/(2*np.pi)
			amp=abs(amplitude)
			if(frequency):	period = 1./frequency
			else: period = 0
			pcov[0]*=1e6
			return amp,freq,ph,offset,pcov
		except:
			return 0,0,0,0,[[]]
Example #46
0
    def postprocess(self):
        if not self.postprocessed:
            print("Postprocessing grid.")
            self.t = np.arange(self.NT) * self.dt
            self.longitudinal_energy_history  = 0.5 * self.epsilon_0 * (self.electric_field_history[:,:,0] ** 2)
            perpendicular_electric_energy = 0.5 * self.epsilon_0 * (self.electric_field_history[:,:,1:] ** 2).sum(2) # over directions
            mu_zero_inv = 1/ (self.epsilon_0 * self.c**2)
            magnetic_energy = 0.5 * (self.magnetic_field_history **2).sum(2) * mu_zero_inv # over directions

            self.perpendicular_energy_history = perpendicular_electric_energy + magnetic_energy
            self.check_on_charge = np.gradient(self.electric_field_history[:, :, 0], self.dx, axis=1) * self.epsilon_0
            # fourier analysis
            from scipy import fftpack
            self.k_plot = fftpack.rfftfreq(int(self.NG), self.dx)[::2]
            self.longitudinal_energy_per_mode_history = np.abs(fftpack.rfft(self.longitudinal_energy_history))[:,::2]
            self.perpendicular_energy_per_mode_history = np.abs(fftpack.rfft(self.perpendicular_energy_history))[:,::2]

            self.longitudinal_energy_history  = self.longitudinal_energy_history.sum(1)
            self.perpendicular_energy_history = self.perpendicular_energy_history.sum(1)
            self.grid_energy_history = self.perpendicular_energy_history + self.longitudinal_energy_history # over positions
            vacuum_wave_impedance= 1/ (self.epsilon_0 * self.c)
            np.cumsum(self.laser_energy_history**2/ vacuum_wave_impedance * self.dt)
            self.x_current = self.x + self.dx / 2
            self.postprocessed = True
Example #47
0
def fold(file1, dtype, samplerate, fedge, fedge_at_top, nchan,
         nt, ntint, nhead, ngate, ntbin, ntw, dm, fref, phasepol,
         coherent=False, do_waterfall=True, do_foldspec=True, verbose=True,
         progress_interval=100):
    """FFT ARO data, fold by phase/time and make a waterfall series

    Parameters
    ----------
    file1 : string
        name of the file holding voltage timeseries
    dtype : numpy dtype or '4bit' or '1bit'
        way the data are stored in the file
    samplerate : float
        rate at which samples were originally taken and thus double the
        band width (frequency units)
    fedge : float
        edge of the frequency band (frequency units)
    fedge_at_top: book
        whether edge is at top (True) or bottom (False)
    nchan : int
        number of frequency channels for FFT
    nt, ntint : int
        total number nt of sets, each containing ntint samples in each file
        hence, total # of samples is nt*ntint, with each sample containing
        a single polarisation
    nhead : int
        number of bytes to skip before reading (usually 0 for ARO)
    ngate, ntbin : int
        number of phase and time bins to use for folded spectrum
        ntbin should be an integer fraction of nt
    ntw : int
        number of time samples to combine for waterfall (does not have to be
        integer fraction of nt)
    dm : float
        dispersion measure of pulsar, used to correct for ism delay
        (column number density)
    fref: float
        reference frequency for dispersion measure
    phasepol : callable
        function that returns the pulsar phase for time in seconds relative to
        start of part of the file that is read (i.e., ignoring nhead)
    do_waterfall, do_foldspec : bool
        whether to construct waterfall, folded spectrum (default: True)
    verbose : bool
        whether to give some progress information (default: True)
    progress_interval : int
        Ping every progress_interval sets
    """

    # initialize folded spectrum and waterfall
    foldspec2 = np.zeros((nchan, ngate, ntbin))
    nwsize = nt*ntint//ntw
    waterfall = np.zeros((nchan, nwsize))

    # size in bytes of records read from file (simple for ARO: 1 byte/sample)
    recsize = nchan*ntint*{np.int8: 2, '4bit': 1}[dtype]
    if verbose:
        print('Reading from {}'.format(file1))

    with open(file1, 'rb', recsize) as fh1:

        if nhead > 0:
            if verbose:
                print('Skipping {0} bytes'.format(nhead))
            fh1.seek(nhead)

        foldspec = np.zeros((nchan, ngate), dtype=np.int)
        icount = np.zeros((nchan, ngate), dtype=np.int)

        dt1 = (1./samplerate).to(u.s)
        if coherent:
            # pre-calculate required turns due to dispersion
            fcoh = (fedge - rfftfreq(nchan*ntint, dt1.value) * u.Hz
                    if fedge_at_top
                    else
                    fedge + rfftfreq(nchan*ntint, dt1.value) * u.Hz)
            # (check via eq. 5.21 and following in
            # Lorimer & Kramer, Handbook of Pulsar Astrono
            dang = (dispersion_delay_constant * dm * fcoh *
                    (1./fref-1./fcoh)**2) * 360. * u.deg
            dedisperse = np.exp(dang.to(u.rad).value * 1j
                                ).conj().astype(np.complex64).view(np.float32)
            # get these back into order r[0], r[1],i[1],...r[n-1],i[n-1],r[n]
            dedisperse = np.hstack([dedisperse[:1], dedisperse[2:-1]])
        else:
            # pre-calculate time delay due to dispersion;
            # [::2] sets frequency channels to numerical recipes ordering
            freq = (fedge - rfftfreq(nchan*2, dt1.value)[::2] * u.Hz
                    if fedge_at_top
                    else
                    fedge + rfftfreq(nchan*2, dt1.value)[::2] * u.Hz)

            dt = (dispersion_delay_constant * dm *
                  (1./freq**2 - 1./fref**2)).to(u.s).value

        # need 2*nchan samples for each FFT
        dtsample = (nchan*2/samplerate).to(u.s).value

        for j in xrange(nt):
            if verbose and (j+1) % progress_interval == 0:
                print('Doing {:6d}/{:6d}; time={:18.12f}'.format(
                    j+1, nt, dtsample*j*ntint))   # equivalent time since start

            # just in case numbers were set wrong -- break if file ends
            # better keep at least the work done
            try:
                # data just a series of bytes, each containing one 8 bit or
                # two 4-bit samples (set by dtype in caller)
                raw = fromfile(fh1, dtype, recsize)
            except(EOFError, IOError) as exc:
                print("Hit {}; writing pgm's".format(exc))
                break

            vals = raw.astype(np.float32)
            if coherent:
                fine = rfft(vals, axis=0, overwrite_x=True)
                fine *= dedisperse
                vals = irfft(fine, axis=0, overwrite_x=True)

            chan2 = rfft(vals.reshape(-1, nchan*2), axis=-1,
                         overwrite_x=True)**2
            # rfft: Re[0], Re[1], Im[1], ..., Re[n/2-1], Im[n/2-1], Re[n/2]
            # re-order to Num.Rec. format: Re[0], Re[n/2], Re[1], ....
            power = np.hstack((chan2[:,:1]+chan2[:,-1:],
                               chan2[:,1:-1].reshape(-1,nchan-1,2).sum(-1)))

            # current sample positions in stream
            isr = j*ntint + np.arange(ntint)

            if do_waterfall:
                # loop over corresponding positions in waterfall
                for iw in xrange(isr[0]//ntw, isr[-1]//ntw + 1):
                    if iw < nwsize:  # add sum of corresponding samples
                        waterfall[:,iw] += np.sum(power[isr//ntw == iw],
                                                  axis=0)

            if do_foldspec:
                tsample = dtsample*isr  # times since start

                for k in xrange(nchan):
                    if coherent:
                        t = tsample  # already dedispersed
                    else:
                        t = tsample - dt[k]  # dedispersed times

                    phase = phasepol(t)  # corresponding PSR phases
                    iphase = np.remainder(phase*ngate,
                                          ngate).astype(np.int)
                    # sum and count samples by phase bin
                    foldspec[k] += np.bincount(iphase, power[:,k], ngate)
                    icount[k] += np.bincount(iphase, None, ngate)

                ibin = j*ntbin//nt  # bin in the time series: 0..ntbin-1
                if (j+1)*ntbin//nt > ibin:  # last addition to bin?
                    # get normalised flux in each bin (where any were added)
                    nonzero = icount > 0
                    nfoldspec = np.where(nonzero, foldspec/icount, 0.)
                    # subtract phase average and store
                    nfoldspec -= np.where(nonzero,
                                          np.sum(nfoldspec, 1, keepdims=True) /
                                          np.sum(nonzero, 1, keepdims=True), 0)
                    foldspec2[:,:,ibin] = nfoldspec
                    # reset for next iteration
                    foldspec *= 0
                    icount *= 0

    if verbose:
        print('read {0:6d} out of {1:6d}'.format(j+1, nt))

    if do_waterfall:
        nonzero = waterfall == 0.
        waterfall -= np.where(nonzero,
                              np.sum(waterfall, 1, keepdims=True) /
                              np.sum(nonzero, 1, keepdims=True), 0.)

    return foldspec2, waterfall
Example #48
0
def Redshift_Estimator_Auto(directoryname):
    #############################################
    #           Editable Params                 #
    #############################################
    # Print "this was a test" in ouput table, and
    # don't save output plots?  
    test = False
    
    # dir Name of Interest
    dirnm = directoryname #'lt30.0'
    use_prior = True
    # Import and skip specs you've already done?
    skip_prev_good = False
    
    # Skip test file objects for the above skips?
    use_tests = False
    
    
    # Choose your adventure: 
    # (1) Define bad specs you wish to skip, and run over all others
    # *OR* (2) define good specs and run over only those spectra
    
    # (1) List Bad Frames you want to skip in bad_specs
    bad_frames = []#range(312,500)
    #bad_frames = []
    #bad_frames = np.arange(100)
    
    # (2) You may also specify good frames instead of bad frames
    good_frames = []
    #good_frames = [18,21,22,23]
    #good_frames = np.arange(100)
    
    # **You can typically leave this be unless you have a custom run ** #
    # Create an instantiation of the z_est class for correlating spectra
    # To use default search of HK lines with no priors applied select True
    R = z_est(lower_w=3500.0,upper_w=9999.0,lower_z=0.05,upper_z=1.2,\
              z_res=3.0e-5,skip_initial_priors=True,auto_pilot=True,prior=use_prior)
    
    data_dir = '/nfs/kremin/DESI/quicksim/'
    
    #### relic of old code. Not useful at the moment
    # Import and skip specs you have already skipped?
    skip_prev_bad = True
    
    
    ############################################
    #        Main Body of the Code             #
    ############################################
    # Determine the name of the user for use in uniq_name
    if use_prior:
        username = '******'
    else:
        username = '******'
    
    # Avoid overwriting by appending unique name  
    # Initials_DayMonth_HourMinute    
    uniq_name = "%s_%s_" % (time.strftime("%H%M-%d%b", time.gmtime()),username)
    
    # Find path to the directory where this file is housed
    abs_cwd_path = data_dir    #os.getcwd() + '/'
    
    # Define Path to the dir
    #dirnm = 'group' + mask
    path_to_dir = os.path.join(abs_cwd_path,dirnm)
    
    
    # Display to terminal what the user has defined
    l1 = "##  test      = %s" % test
    l2 = "##  dir      = %s" % dirnm
    l3 = "##  uniq_name = %s" % uniq_name
    l4 = "##  abs_cwd_path = %s  ##" % abs_cwd_path
    
    print("\n\n" + len(l4)*'#')
    print(l1 + (len(l4)-len(l1)-2)*' ' + "##")
    print(l2 + (len(l4)-len(l2)-2)*' ' + "##")
    print(l3 +  (len(l4)-len(l3)-2)*' ' + "##")
    print(l4)
    print(len(l4)*'#' + "\n") 
    del l1,l2,l3,l4
    
    # Check the existance of all directories
    zpio.check_directories(abs_cwd_path,dirnm)
    
    # look for previously done results
    if skip_prev_good or skip_prev_bad:
        ignored_objects = zpio.get_file_specs(path_to_dir,skip_prev_good,skip_prev_bad,use_tests)
        ignored_objects = np.array(ignored_objects)
    
    if len(bad_frames)>0:
        use_bad_frames = True
        ignored_frames = np.array(bad_frames)
    elif len(good_frames)>0:
        use_bad_frames = False
        ignored_frames = np.array(good_frames)
    else:
        use_bad_frames = True
        ignored_frames = np.array([])
        
    
    # Display spectra being skipped
    print("Skipping user specified frames:")
    print(ignored_frames)
    print("Skipping previously done objects:")
    print(ignored_objects)
    
    
    
    
    
    #Import template spectrum (SDSS early type) and continuum subtract the flux
    try:
        early_type = pyfits.open(os.path.join(abs_cwd_path,'sdss_templates','spDR2-023.fit'))
    except IOError:
        print("There is no 'sdss_templates/spDR2-023.fit' in the cwd")
        print("cwd =", abs_cwd_path)
        raise("Exiting")
    
    # Declare the array for the template flux(es)
    early_type_flux = np.ndarray((3,len(early_type[0].data[0])))
    early_type_wave = np.ndarray((3,len(early_type[0].data[0])))
    coeff0 = early_type[0].header['COEFF0']
    coeff1 = early_type[0].header['COEFF1']
    early_type_flux[0,:] = early_type[0].data[0]
    early_type_wave[0,:] = 10**(coeff0 + coeff1*np.arange(0,early_type_flux[0,:].size,1))
    
    try:
        early_type = pyfits.open(os.path.join(abs_cwd_path,'sdss_templates','spDR2-024.fit'))
    except IOError:
        print("There is no 'sdss_templates/spDR2-024.fit' in the cwd")
        print("cwd =", abs_cwd_path)
        raise("Exiting")
    coeff0 = early_type[0].header['COEFF0']
    coeff1 = early_type[0].header['COEFF1']
    early_type_flux[1,:] = early_type[0].data[0]
    early_type_wave[1,:] = 10**(coeff0 + coeff1*np.arange(0,early_type_flux[1,:].size,1))
    
    try:
        early_type = pyfits.open(os.path.join(abs_cwd_path ,'sdss_templates','spDR2-025.fit'))
    except IOError:
        print("There is no 'sdss_templates/spDR2-025.fit' in the cwd")
        print("cwd =", abs_cwd_path)
        raise("Exiting")
    coeff0 = early_type[0].header['COEFF0']
    coeff1 = early_type[0].header['COEFF1']
    early_type_flux[2,:] = early_type[0].data[0]
    early_type_wave[2,:] = 10**(coeff0 + coeff1*np.arange(0,early_type_flux[2,:].size,1))
    
    
    
    
    # open the redshift output table and print the column names
    good_spec_file = os.path.join(path_to_dir,uniq_name + '{0}_speczs'.format(dirnm) + '.txt')
    with open(good_spec_file, 'w') as outf:
        if use_prior:
            outf.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % ('Frame','Est_z', \
                'Correlation', 'Template', 'True_z', 'MOCK_PHOTOZ', 'Total_SN','MEAN_SN','Med_SN', \
                'Exp','I_Mag','Model') )
        else:
            outf.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % ('Frame','Est_z', \
                'Correlation', 'Template', 'True_z', 'Total_SN','MEAN_SN','Med_SN', \
                'Exp','I_Mag','Model') )
        
        # Define the path to the fits files and loop over files within that folder
        fits_path = os.path.join(path_to_dir,'fits')
    
        for curfile in os.listdir(fits_path):
            print(curfile)
        
            sn_stat1,sn_stat2,filframe,filext = curfile.split('.')
            sn_status = sn_stat1+'.'+sn_stat2
        
            if filext!='fits' and filext!='fits':
                continue
            if sn_status != dirnm:
                print("The fits file dir isn't matching the specified dir, skipping")
                continue
            if np.any(filframe==ignored_objects):
                print("Previously Done, Skipping")
                continue
            if use_bad_frames:
                if np.any(int(filframe)==ignored_frames):
                    print("Specified to skip in calling function\n")
                    continue
            else:
                if np.all(int(filframe)!=ignored_frames):
                    print("Not in list of good frames, so skipping\n")
                    continue
            del sn_status,filext
        
            # Import the spectrum
            fileName = os.path.join(fits_path,curfile)
            try:
                fits = pyfits.open(fileName)
            except IOError:
                print("File ",fileName," could not be opened")
                print("Moving on to the next spectra")
                continue
            dat = fits[0].data
            wave = dat[1].astype(float)
            raw_flux = dat[0].astype(float)
            # Mask out previously defined bad wavelength regions for the current spec
            masked_flux, masked_wave = zwf.mask_inf_regions(raw_flux,wave)
        
            if sum(masked_flux)==0:
                print("The sum of the flux was 0, skipping")
                continue
            cur_head = fits[0].header
            truez = float(cur_head['TRUE_Z'])
            total_sn = float(cur_head['TOTSQ_SN'])
            median_sn = float(cur_head['MED_SNR'])
            mean_sn = float(cur_head['MEAN_SNR'])
            exposure = float(cur_head['EXPTIME'])
            imag = float(cur_head['ABS_MAG'])
            model = 'lrg'#cur_head['MODEL']
            fits.close()
    
    
            if use_prior:
                std = 0.02*(1+truez)
                mock_photoz = np.random.randn(1)*std + truez
            else:
                mock_photoz = None
            # Declare variables for later
            temp = [0.,0.,0.,0.,0.]
            skip_spectra = False
            #        plt.figure()
            #        plt.plot(masked_wave,masked_flux,'b-')
            #        continue
            #Clean High Frequency noise
            F1 = fftpack.rfft(masked_flux)
            cut = F1.copy()
            W = fftpack.rfftfreq(masked_wave.size,d=masked_wave[(len(masked_wave)-1)]-masked_wave[(len(masked_wave)-2)])
            cut[np.where(W>0.15)] = 0
            Flux_Sci = fftpack.irfft(cut)
            Flux_Science, masked_wave = zwf.mask_neg_regions(Flux_Sci,masked_wave)
            if len(Flux_Science)==0:
                print("The length of Flux_Science was 0 so skipping\n")
                continue
            if len(masked_wave)==0:
                print("The length of masked_wave was 0 so skipping\n")
                continue
        	    # Find the redshifts
            for i in np.arange(len(early_type_flux[:,1])):
                redshift_est,corr,ztest,corr_val = R.redshift_estimate(early_type_wave[i,:], early_type_flux[i,:], masked_wave,Flux_Science,(i+23),gal_prior=mock_photoz)
                print("Template %d, Est Red = %f" % (i+23,redshift_est))
                # Check to see if the fit was better than the previos fits for this spec
                cor = np.max(corr)
                if (cor>=temp[1]):
                    temp = [redshift_est,cor,ztest,corr_val,i]
        
                           
            # Print the best redshift estimate to the terminal
            print("\n\tBest: Template %d, Est Red = %f\n" % (temp[4]+23,temp[0]),"\n")
        
        	# Print params to the output table
        
            # Find the RA and Decs of current object
        
            
            # Print the results into the output "good specs" text file
            if use_prior:
                outf.write("%04d\t%.4f\t%.4f\t%d\t%.1f\t%0.3f\t%.2f\t%.4f\t%.4f\t%.1f\t%.1f\t%s\n"  % (np.int(filframe),temp[0],temp[1],(np.int(temp[4])+23),truez,mock_photoz,total_sn,mean_sn,median_sn,exposure,imag,model))
            else:
                outf.write("%04d\t%.4f\t%.4f\t%d\t%.1f\t%.2f\t%.4f\t%.4f\t%.1f\t%.1f\t%s\n"  % (np.int(filframe),temp[0],temp[1],(np.int(temp[4])+23),truez,total_sn,mean_sn,median_sn,exposure,imag,model))
         
           	# Create a summary plot of the best z-fit
            if not test:
                plt_name = os.path.join(path_to_dir,'red_ests','redEst_%s_%s_Tmplt%d_%s.png' % (dirnm,filframe,np.int(temp[4]+23),uniq_name))
                summary_plot(masked_wave,Flux_Science,early_type_wave[temp[4],:],early_type_flux[temp[4],:],temp[0],temp[2],temp[3],plt_name,filframe,mock_photoz)    
        
        
        # If test, print that in the outputted text files
        if test:
            print("This was a test! Not valid results", file=outf)
    
    
        
        
    
    # if it was a test, ask the user if they want to save the outputted text files
    if test:
        YorN = 'b'
        print("\n\n\n\n\tWould you like to keep the test good spectra output?")
        while (YorN!='Y' and  YorN!='N' and YorN!='n' and YorN!='y'):
            YorN = input('\tYes (y) or No (n): ')
            if (YorN=='N' or YorN=='n'):
                os.remove(good_spec_file)
Example #49
0
def _setup_chpi_fits(info, t_window, t_step_min, method='forward',
                     exclude='bads', verbose=None):
    """Helper to set up cHPI fits"""
    from scipy.spatial.distance import cdist
    from .preprocessing.maxwell import _prep_mf_coils
    if not (check_version('numpy', '1.7') and check_version('scipy', '0.11')):
        raise RuntimeError('numpy>=1.7 and scipy>=0.11 required')
    hpi_freqs, coil_head_rrs, hpi_pick, hpi_ons = _get_hpi_info(info)[:4]
    line_freqs = np.arange(info['line_freq'], info['sfreq'] / 3.,
                           info['line_freq'])
    logger.info('Line interference frequencies: %s Hz'
                % ' '.join(['%d' % l for l in line_freqs]))
    # initial transforms
    dev_head_t = info['dev_head_t']['trans']
    head_dev_t = invert_transform(info['dev_head_t'])['trans']
    # determine timing
    n_window = int(round(t_window * info['sfreq']))
    logger.debug('Coordinate transformation:')
    for d in (dev_head_t[0, :3], dev_head_t[1, :3], dev_head_t[2, :3],
              dev_head_t[:3, 3] * 1000.):
        logger.debug('{0:8.4f} {1:8.4f} {2:8.4f}'.format(*d))
    slope = np.arange(n_window).astype(np.float64)[:, np.newaxis]
    slope -= np.mean(slope)
    rads = slope / info['sfreq']
    rads *= 2 * np.pi
    f_t = hpi_freqs[np.newaxis, :] * rads
    l_t = line_freqs[np.newaxis, :] * rads
    model = [np.sin(f_t), np.cos(f_t)]  # hpi freqs
    model += [np.sin(l_t), np.cos(l_t)]  # line freqs
    model += [slope, np.ones(slope.shape)]
    model = np.concatenate(model, axis=1)
    inv_model = linalg.pinv(model)
    # Set up highpass at half lowest cHPI freq
    hp_n = 2 ** (int(np.ceil(np.log2(n_window))) + 1)
    freqs = fftpack.rfftfreq(hp_n, 1. / info['sfreq'])
    hp_ind = np.where(freqs >= hpi_freqs.min())[0][0] - 2
    hp_window = np.concatenate(
        [[0], np.repeat(np.hanning(hp_ind - 1)[:(hp_ind - 1) // 2],
                        2)])[np.newaxis]

    # Set up magnetic dipole fits
    picks_meg = pick_types(info, meg=True, eeg=False, exclude=exclude)
    picks = np.concatenate([picks_meg, [hpi_pick]])
    megchs = [ch for ci, ch in enumerate(info['chs']) if ci in picks_meg]
    templates = _read_coil_defs(elekta_defs=True, verbose=False)
    coils = _create_meg_coils(megchs, 'accurate', coilset=templates)
    if method == 'forward':
        coils = _concatenate_coils(coils)
    else:  # == 'multipole'
        coils = _prep_mf_coils(info)
    scale = make_ad_hoc_cov(info, verbose=False)
    scale = _get_whitener_data(info, scale, picks_meg, verbose=False)
    orig_dev_head_quat = np.concatenate([rot_to_quat(dev_head_t[:3, :3]),
                                         dev_head_t[:3, 3]])
    dists = cdist(coil_head_rrs, coil_head_rrs)
    hpi = dict(dists=dists, scale=scale, picks=picks, model=model,
               inv_model=inv_model, coil_head_rrs=coil_head_rrs,
               coils=coils, on=hpi_ons, n_window=n_window, method=method,
               freqs=hpi_freqs, line_freqs=line_freqs,
               hp_ind=hp_ind, hp_n=hp_n, hp_window=hp_window)
    last = dict(quat=orig_dev_head_quat, coil_head_rrs=coil_head_rrs,
                coil_dev_rrs=apply_trans(head_dev_t, coil_head_rrs),
                sin_fit=None, fit_time=-t_step_min)
    return hpi, last
Example #50
0
data = np.loadtxt('out_cut.txt') 

t = data[:,0]
flux = data[:,1]
N = len(t)

pi = np.pi
plt.figure(figsize = (15, 5))

def mysine(t, a1, a2, a3):
    return a1 * np.sin(a2 * t + a3)

yhat = fftpack.rfft(flux)
idx = (yhat**2).argmax()
freqs = fftpack.rfftfreq(N, d = (t[1]-t[0])/(2*pi))
frequency = freqs[idx]

amplitude = flux.max()
guess = [amplitude, 0.76, 0.]
print "guessed amplitude & ang. frequency:", guess
(amplitude, frequency, phase), pcov = optimize.curve_fit(
    mysine, t, flux, guess)

period = 2*pi/frequency
print "amplitude & ang. frequency & phase:", amplitude, frequency, phase

xx = t
yy = mysine(xx, amplitude, frequency, phase)
# plot the real data
plt.plot(t, flux, 'r', label = 'Real Values')
Example #51
0
def _setup_chpi_fits(info, t_window, t_step_min, method='forward',
                     exclude='bads', add_hpi_stim_pick=True,
                     remove_aliased=False, verbose=None):
    """Helper to set up cHPI fits."""
    from scipy.spatial.distance import cdist
    from .preprocessing.maxwell import _prep_mf_coils
    if not (check_version('numpy', '1.7') and check_version('scipy', '0.11')):
        raise RuntimeError('numpy>=1.7 and scipy>=0.11 required')
    hpi_freqs, coil_head_rrs, hpi_pick, hpi_ons = _get_hpi_info(info)[:4]
    # What to do e.g. if Raw has been resampled and some of our
    # HPI freqs would now be aliased
    highest = info.get('lowpass')
    highest = info['sfreq'] / 2. if highest is None else highest
    keepers = np.array([h <= highest for h in hpi_freqs], bool)
    if remove_aliased:
        hpi_freqs = hpi_freqs[keepers]
        coil_head_rrs = coil_head_rrs[keepers]
        hpi_ons = hpi_ons[keepers]
    elif not keepers.all():
        raise RuntimeError('Found HPI frequencies %s above the lowpass '
                           '(or Nyquist) frequency %0.1f'
                           % (hpi_freqs[~keepers].tolist(), highest))
    if info['line_freq'] is not None:
        line_freqs = np.arange(info['line_freq'], info['sfreq'] / 3.,
                               info['line_freq'])
    else:
        line_freqs = np.zeros([0])
    logger.info('Line interference frequencies: %s Hz'
                % ' '.join(['%d' % l for l in line_freqs]))
    # initial transforms
    dev_head_t = info['dev_head_t']['trans']
    head_dev_t = invert_transform(info['dev_head_t'])['trans']
    # determine timing
    n_window = int(round(t_window * info['sfreq']))
    logger.debug('Coordinate transformation:')
    for d in (dev_head_t[0, :3], dev_head_t[1, :3], dev_head_t[2, :3],
              dev_head_t[:3, 3] * 1000.):
        logger.debug('{0:8.4f} {1:8.4f} {2:8.4f}'.format(*d))
    slope = np.arange(n_window).astype(np.float64)[:, np.newaxis]
    slope -= np.mean(slope)
    rads = slope / info['sfreq']
    rads *= 2 * np.pi
    f_t = hpi_freqs[np.newaxis, :] * rads
    l_t = line_freqs[np.newaxis, :] * rads
    model = [np.sin(f_t), np.cos(f_t)]  # hpi freqs
    model += [np.sin(l_t), np.cos(l_t)]  # line freqs
    model += [slope, np.ones(slope.shape)]
    model = np.concatenate(model, axis=1)
    inv_model = linalg.pinv(model)
    # Set up highpass at half lowest cHPI freq
    hp_n = 2 ** (int(np.ceil(np.log2(n_window))) + 1)
    freqs = fftpack.rfftfreq(hp_n, 1. / info['sfreq'])
    hp_ind = np.where(freqs >= hpi_freqs.min())[0][0] - 2
    hp_window = np.concatenate(
        [[0], np.repeat(np.hanning(hp_ind - 1)[:(hp_ind - 1) // 2],
                        2)])[np.newaxis]

    # Set up magnetic dipole fits
    picks_meg = pick_types(info, meg=True, eeg=False, exclude=exclude)
    if len(exclude) > 0:
        if exclude == 'bads':
            msg = info['bads']
        else:
            msg = exclude
        logger.debug('Static bad channels (%d): %s'
                     % (len(msg), u' '.join(msg)))
    if add_hpi_stim_pick:
        if hpi_pick is None:
            raise RuntimeError('Could not find HPI status channel')
        picks = np.concatenate([picks_meg, [hpi_pick]])
    else:
        picks = picks_meg
    megchs = [ch for ci, ch in enumerate(info['chs']) if ci in picks_meg]
    templates = _read_coil_defs(elekta_defs=True, verbose=False)
    coils = _create_meg_coils(megchs, 'accurate', coilset=templates)
    if method == 'forward':
        coils = _concatenate_coils(coils)
    else:  # == 'multipole'
        coils = _prep_mf_coils(info)
    scale = make_ad_hoc_cov(info, verbose=False)
    scale = _get_whitener_data(info, scale, picks_meg, verbose=False)
    orig_dev_head_quat = np.concatenate([rot_to_quat(dev_head_t[:3, :3]),
                                         dev_head_t[:3, 3]])
    dists = cdist(coil_head_rrs, coil_head_rrs)
    hpi = dict(dists=dists, scale=scale, picks=picks, model=model,
               inv_model=inv_model, coil_head_rrs=coil_head_rrs,
               coils=coils, on=hpi_ons, n_window=n_window, method=method,
               freqs=hpi_freqs, line_freqs=line_freqs,
               hp_ind=hp_ind, hp_n=hp_n, hp_window=hp_window)
    last = dict(quat=orig_dev_head_quat, coil_head_rrs=coil_head_rrs,
                coil_dev_rrs=apply_trans(head_dev_t, coil_head_rrs),
                sin_fit=None, fit_time=-t_step_min)
    return hpi, last
def fourier_detect(x, times, rate):
    fr = rfftfreq(len(times), 1/rate)
    y = rfft(x)
    y[fr > int(1/0.05)] = 0
    x_smoothed = irfft(y)
    return times[argrelmax(x_smoothed)[0]]
Example #53
0
median_sdss_redshift = np.median(Gal_dat.spec_z[Gal_dat.spec_z > 0.0])
print 'Median SDSS redshift',median_sdss_redshift

R = z_est()

for k in range(len(Gal_dat)):
    if Gal_dat.slit_type[k] == 'g' and Gal_dat.good_spectra[k] == 'y':
        if sdss_check:
            if Gal_dat.spec_z[k] != 0.0: skipgal = False
            else: skipgal = True
        else: skipgal = False
        if not skipgal:
            F1 = fftpack.rfft(Flux_science[k])
            cut = F1.copy()
            W = fftpack.rfftfreq(spectra[keys[k]]['wave2'].size,d=spectra[keys[k]]['wave2'][1001]-spectra[keys[k]]['wave2'][1000])
            cut[np.where(W>0.15)] = 0
            Flux_science2 = fftpack.irfft(cut)
            Flux_sc = Flux_science2 - signal.medfilt(Flux_science2,171)
            d.set('pan to 1150.0 '+str(Gal_dat.FINAL_SLIT_Y[k])+' physical')
            d.set('regions command {box(2000 '+str(Gal_dat.FINAL_SLIT_Y[k])+' 4500 '+str(Gal_dat.SLIT_WIDTH[k])+') #color=green highlite=1}')
            redshift_est[k],cor[k],ztest,corr_val,qualityval['Clear'][k] = R.redshift_estimate(early_type_wave,early_type_flux,spectra[keys[k]]['wave2'],Flux_science2,gal_prior=None)
            try:
                HSN[k],KSN[k],GSN[k] = sncalc(redshift_est[k],spectra[keys[k]]['wave2'],Flux_sc)
            except ValueError:
                HSN[k],KSN[k],GSN[k] = 0.0,0.0,0.0
            SNavg[k] = np.average(np.array([HSN[k],KSN[k],GSN[k]]))
            SNHKmin[k] = np.min(np.array([HSN[k],KSN[k]]))

    else:
        redshift_est[k] = 0.0
Example #54
0
def kfftn(dk_shape, ndim, boxsize):
    ''' return frequency grid '''
    k=[sfft.fftfreq(dk_shape[i], 2.*np.pi/boxsize ) for i in \
      range(ndim-1) ] + [sfft.rfftfreq(dk_shape[-1],2.*np.pi/boxsize) ]
    return k
Example #55
0
def krfftn(dk_shape, ndim, boxsize):
    ''' return the k grid used for rfftn, where the last axis is done 
        using rfft '''
    k=[sfft.fftfreq(dk_shape[i], 2.*np.pi/boxsize ) for i in \
      range(ndim-1) ] + [sfft.rfftfreq(dk_shape[-1],2.*np.pi/boxsize) ]
    return k
	def __init__(self, totalTime, dt, dtIntegration, stokes, beta, signalToNoise=0.0, seed=0, modulationType=0):
		"""Summary
		
		Parameters
		----------
		totalTime : TYPE
		    Description
		dt : TYPE
		    Description
		dtIntegration : TYPE
		    Description
		seed : int, optional
		    Description
		
		Returns
		-------
		TYPE : Description
		"""
		
		self.totalTime = totalTime
		self.dt = dt
		self.dtIntegration = dtIntegration
		self.seed = seed
		self.signalToNoise = signalToNoise
		self.modulationType = modulationType

		if (self.seed != 0):
			np.random.seed(self.seed)

# Read seeing power spectrum
		self.powerLites = np.loadtxt('powerSpectrumSeeing.dat')
		self.powerLites[:,1] = 10.0**self.powerLites[:,1]

# Number of samples of the original sample
		self.nSteps = int(totalTime / dt)
		self.times = np.arange(self.nSteps) * self.dt

# Frequency axis
		self.freq = fft.rfftfreq(self.nSteps, d=dt)

# Betas and Stokes parameters
		self.beta = beta
		self.stokes = stokes		

# Generate Gaussian noise with unit variance and multiply by the square root of the power spectrum
# to generate the noise with the appropriate power spectrum
		noise = np.random.randn(self.nSteps)
		noiseFFT = myFFT(noise)

		self.powerSeeing = np.interp(np.abs(self.freq), self.powerLites[:,0], self.powerLites[:,1])
		self.powerSeeing[0] = 0.0
		
		self.seeingFFT = np.sqrt(self.powerSeeing) * noiseFFT
		self.seeingFFT /= np.sqrt(myTotalPower(self.seeingFFT))
		self.seeing = myIFFT(self.seeingFFT)

# Make sure that the total power is unity
		print 'Total variance = ', np.sum(self.seeing**2), myTotalPower(self.seeingFFT)

# Compute the signal and its power spectrum
		self.signal = [None] * 4
		for i in range(4):
			self.signal[i] = self.stokes[i]*(1.0 + self.beta[i] * self.seeing)

# Generate modulation using a lambda/4 and lambda/2 polarimeter with random angles
		# self.modulation = [np.ones(self.nSteps), 2.0*np.random.rand(self.nSteps)-1.0, 2.0*np.random.rand(self.nSteps)-1.0, 2.0*np.random.rand(self.nSteps)-1.0]
		if (self.modulationType == 0):
			self.alphaModulation = 0.5*np.pi*np.random.rand(self.nSteps)
			self.betaModulation = 0.5*np.pi*np.random.rand(self.nSteps)
		else:
			temp = np.load('alphaBetaSamples.npz')
			self.alphaModulation = temp['arr_0'][0:self.nSteps]
			self.betaModulation = temp['arr_1'][0:self.nSteps]

		self.modulation = [np.ones(self.nSteps), \
			np.cos(2.0*self.alphaModulation) * np.cos(2.0*(self.alphaModulation-2.0*self.betaModulation)),\
			np.sin(2.0*self.alphaModulation) * np.cos(2.0*(self.alphaModulation-2.0*self.betaModulation)),\
			np.sin(2.0*(2.0*self.betaModulation-self.alphaModulation))]

		self.integrationTime = self.dtIntegration
		self.lengthSample = int(self.dtIntegration / self.dt)
		self.nSamples = int(self.dt / self.dtIntegration * self.nSteps)

		self.signalIntegrated = [None] * 2
		for i in range(2):
			temp = self.signal[0] * self.modulation[0]
			sign = (-1.0)**i
			for j in range(1,4):
				temp += sign * self.signal[j] * self.modulation[j]
			self.signalIntegrated[i] = bin_ndarray(temp, (self.nSamples,), operation='sum') 
			self.signalIntegrated[i] += np.mean(self.signalIntegrated[i]) / self.signalToNoise * np.random.randn(self.nSamples)

		self.tIntegrated = np.arange(self.nSamples) * self.dtIntegration

# Generate modulation matrix
		self.sparseM = [None] * 4
		self.sparseMStar = [None] * 4

		for state in range(4):

			sparseData = []
			sparseRow = []
			sparseCol = []
			loop = 0
			for i in range(self.nSamples):
				for j in range(self.lengthSample):
					sparseData.append(self.modulation[state][loop])
					sparseRow.append(i)
					sparseCol.append(loop)

					loop += 1

			self.sparseM[state] = sp.coo_matrix((sparseData, (sparseRow, sparseCol)), shape=(self.nSamples, self.nSteps))
			self.sparseMStar[state] = self.sparseM[state].transpose(copy=True)

		self.factor = 2*np.ones(self.nSteps)
		self.factor[0] = 1.0
    def lowpassSweptSineGeneration(self):
        """ Function to illustrate the steps taken to generate a low pass swept sine.

        Instead of using the Signal Generator, preform the inverse filtering manually
        so that the steps may be illustrated.
        """
        from scipy.signal import butter, lfilter, filtfilt

        self.logger.debug("Entering lowpassSweptSineGeneration")

        T = 64 * 10e-3  # 125 ms
        sample_rate = 44100.0
        f_1 = sample_rate / 2.0
        fft_size = 2 ** 18

        # Generate time vector
        t = arange(0, T, 1 / sample_rate)

        # Generate the signal from 0 to Nyquist frequency
        a = pi * f_1 / T

        s = sin(a * t ** 2)

        plot(1000 * t, s)
        xlabel("Time (ms)")
        ylabel(r"$s(t)$")
        xlim(0, 125)
        ylim(-1.1, 1.1)
        subplots_adjust(left=0.15, right=0.97, top=0.97, bottom=0.10)
        savefig("Analysis/Images/swept_sine.eps")
        cla()

        # Determine the spectrum
        S = fft(s, fft_size)
        # Inverse of the magnitude spectrum
        iaS = abs(S) ** -1
        liaS = log(iaS)
        liaS -= min(liaS)

        plot(fftfreq(fft_size, 1 / sample_rate)[:fft_size / 2], liaS[:fft_size / 2])
        xlabel("Frequency (Hz)")
        ylabel(r"ln$| S(\omega) | ^ {-1}$")
        xlim(0, sample_rate / 2)
        subplots_adjust(left=0.15, right=0.97, top=0.97, bottom=0.10)
        savefig("Analysis/Images/inverse_log_spectrum.eps")
        cla()
        # c, similiar to the cepstrum, is the inverse of the logarithmic inverse
        # magnitude spectrum
        c = ifft(log(iaS))

        # Window c to produce m
        m = r_[c[0], 2 * c[1:len(S) / 2 - 1], c[len(S) / 2], zeros(len(S) / 2)]
        plot(m)
        xlabel("samples")
        ylabel(r"$m\left[n\right]$")
        gca().get_yaxis().set_ticks([])
        subplots_adjust(left=0.10, right=0.97, top=0.97, bottom=0.10)
        ylim(-0.05, 0.05)
        xlim(0, 1000)
        savefig("Analysis/Images/minimum_phase.eps")
        cla()
        # Determine the spectrum of the windowed 'cepstrum'
        M = fft(m, fft_size)

        # Determine the minimum phase inverse filter
        iSmp = exp(M)
        plot(fftfreq(fft_size, 1 / sample_rate)[:fft_size / 2], iSmp[:fft_size / 2])
        xlim(0, sample_rate / 2.0)
        ylim(0.010, 0.030)
        ylabel(r"$X_{mp}^{-1}\left[k\right]$")
        xlabel("Frequency (Hz)")
        subplots_adjust(left=0.10, right=0.97, top=0.97, bottom=0.10)
        gca().get_yaxis().set_ticks([])
        xlim(0, sample_rate / 2.0)
        savefig("Analysis/Images/inverse_minimum_phase.eps")
        cla()

        # Determine the minimum phase spectrum
        Smp = S * iSmp

        # Determine the minimum phase signal
        smp = ifft(Smp)

        # smp will have fft_size samples, which could be very long
        # reduce to length of the signal specified
        smp = smp[:len(t)]

         # Low pass filter the signal to the upper frequency
        [b, a] = butter(8, 0.9, btype="low")
        smp = lfilter(b, a, smp)

        SMP = abs(rfft(smp, 2 ** 14))
        SMP -= max(SMP)
        S = abs(rfft(s, 2 ** 14))
        S -= max(S)
        S += 13

        plot(rfftfreq(len(SMP), 1 / sample_rate), SMP)
        plot(rfftfreq(len(S), 1 / sample_rate), S)

        title("SMP")
        show()
        # Normalize so that the maximum value is 1
        smp /= max(abs(smp))

        plot(smp)
        show()
        signal = smp
    def windowEffects(self):
        """ Function used to generate figures for the window function section. """
        self.logger.debug("Entering windowEffects")
        
        # Declare system variables
        fs = 44100.0
        
        c = 344.0
        l = 2.0
        mic_l = l / 2.0
        N = 2 ** 18

        [bl, al] = butter(1, 10000 / fs / 2.0, 'low')

        T = 0.1
        t = arange(0, T, 1 / fs)
        s = r_[sin(2 * pi * t * (fs / 2 / (2 * T))), zeros(fs * (1 - T) + 1)]  # Impulse
        s = r_[1, rand(fs)]

        h = r_[1, zeros(fs)]  # perfect reflector
        h = lfilter(bl, al, h)

        y = r_[s, zeros(fs * (l + mic_l) / c)]
        y += r_[zeros(fs * (l + mic_l) / c), ifft(fft(s) * fft(h))]

        cepstrum = ifft(log(abs(fft(y, 2 ** 18)) ** 2))

        t = arange(0, len(cepstrum) / fs, 1 / fs)
        t *= 1000

        plot(t, cepstrum)

        win = r_[hanning(100)[:50], ones(400), hanning(100)[50:]]
        win *= max(abs(cepstrum[100:500]))
        t = arange(0, len(win) / fs, 1 / fs)
        t += 0.0058
        t *= 1000
        plot(t, win, c="black", ls="--")
        xlim(0, 25)
        ylim(-0.1, 0.3)
        grid(True)

        xlabel("quenfrency (ms)")

        savefig("Analysis/Images/lifting_the_impulse_response.eps")
        cla()

        t = arange(0, 1, 1 / fs)
        x = cos(2 * pi * 1000 * t)
        N = 2 ** 18
        n = 1000

        plot(rfftfreq(N / 2 + 1, 1 / fs), rfft(x[:n], N))
        xlim(0, 2000)

        ax = gca()

        ax.set_xticks([1000])
        ax.set_xticklabels(["$f_{0}$"])
        ax.set_yticks([0])

        grid(True)
        axhline(y=0)
        xlabel("Frequency (Hz)")
        ylabel(r"$X\left(f\right)$")
        savefig("Analysis/Images/windowed_sinusoidal_signal.eps")

        cla()

        n = 10

        W = abs(fft(ones(n), N)) ** 2

        plot(fftfreq(N), W)

        enbw = 100
        enbw_rect = r_[0, max(W) * ones(enbw), 0]

        plot(linspace(-0.05, 0.05, enbw + 2), enbw_rect, c="black", ls="--")
        ax = gca()

        ax.set_xticks([0])
        ax.set_yticks([0])
        ax.set_yticklabels("")

        xlabel("Frequency (Hz)")
        ylabel(r"$\left|W\left(f\right)\right|^{2}$")

        axvline(x=0)

        ylim(0, 110)

        annotate("peak power gain\n" + r"at $\left|W\left(0\right)\right|^{2}$", xy=(0.05, max(W)),
                xytext=(0.15, max(W) - 2), arrowprops=dict(fc="black", width=1, headwidth=5),
                verticalalignment='top', fontsize=10)
        annotate("Equivalent\nNoise Bandwidth", xy=(-0.05, max(W) / 2), xytext=(-0.15, max(W) / 2),
                arrowprops=dict(fc="black", width=1, headwidth=5), horizontalalignment="right",
                verticalalignment="center", fontsize=10)
        annotate("", xy=(0.05, max(W) / 2), xytext=(0.15, max(W) / 2),
                arrowprops=dict(fc="black", width=1, headwidth=5), horizontalalignment="right",
                fontsize=10)

        savefig("Analysis/Images/equivalent_noise_bandwidth.eps")

        cla()

        tukey = lambda N, a: r_[hanning(a * N)[:a * N / 2.0], ones(N * (1 - a)), hanning(a * N)[a * N / 2.0:]]

        N = 220

        plot(ones(N)[N / 2:], c="black", ls="-", label="Rectangle")
        #plot([0, 0], [0, 1], c="black", ls="-")
        plot([N / 2, N / 2], [0, 1], c="black", ls="-")

        plot(tukey(N, 0.25)[N / 2:], c="black", ls="--", label=r"Tukey $\alpha = 0.25$")
        plot(tukey(N, 0.55)[N / 2:], c="black", ls="-.", label=r"Tukey $\alpha = 0.50$")
        plot(tukey(N, 0.75)[N / 2:], c="black", ls=":", label=r"Tukey $\alpha = 0.75$")
        plot(hanning(N)[N / 2:], c="gray", label="Hanning")

        legend()
        leg = gca().get_legend()
        setp(leg.get_texts(), fontsize="small")

        xlim(0, N + 10)
        ylim(0, 1.05)

        ax = gca()
        ax.set_xticks([N / 2])
        ax.set_xticklabels([r"N/2"])

        savefig("Analysis/Images/window_shapes.eps")

        cla()

        # n = 100
        # N = 2 ** 18

        # R = 20 * log10(rfft(ones(n), N))
        # R -= max(R)

        # T25 = 20 * log10(rfft(tukey(n, 0.25), N))
        # T25 -= max(T25)

        # T50 = 20 * log10(rfft(tukey(n, 0.50), N))
        # T50 -= max(T50)

        # T75 = 20 * log10(rfft(tukey(n, 0.75), N))
        # T75 -= max(T75)

        # H = 20 * log10(rfft(hanning(n), N))
        # H -= max(H)

        # semilogx(rfftfreq(N / 2 + 1) * n, R, label="Rectangle", c="black", ls="-")
        # semilogx(rfftfreq(N / 2 + 1) * n, T25, label=r"Tukey $\alpha = 0.25$", c="black", ls="--")
        # semilogx(rfftfreq(N / 2 + 1) * n, T50, label=r"Tukey $\alpha = 0.50$", c="black", ls="-.")
        # semilogx(rfftfreq(N / 2 + 1) * n, T75, label=r"Tukey $\alpha = 0.75$", c="black", ls=":")
        # semilogx(rfftfreq(N / 2 + 1) * n, H, label="Hanning", c="gray", ls="-")

        # xlim(0.1, 15)
        # ylim(-60, 0)

        # legend(loc="lower left")
        # leg = gca().get_legend()
        # setp(leg.get_texts(), fontsize="small")
        # show()

        cla()

        n = 220
        N = 2 ** 18

        fig, (ax1, ax2, ax3, ax4) = subplots(4, 1, sharex=True, sharey=True)
        ax1.semilogx(rfftfreq(N / 2 + 1, 1 / fs), rfft(ones(n), N), label="rect")
        ax1.text(800, 50, "Rectangle Window", horizontalalignment="center", verticalalignment="center")
        ax1.grid(True)

        ax2.semilogx(rfftfreq(N / 2 + 1, 1 / fs), rfft(tukey(n, 0.25), N), label="a = 0.25")
        ax2.text(800, 50, r"Tukey Window, $\alpha = 0.25$", horizontalalignment="center", verticalalignment="center")
        ax2.grid(True)

        ax3.semilogx(rfftfreq(N / 2 + 1, 1 / fs), rfft(tukey(n, 0.50), N), label="a = 0.50")
        ax3.text(800, 50, r"Tukey Window, $\alpha = 0.50$", horizontalalignment="center", verticalalignment="center")
        ax3.grid(True)

        ax4.semilogx(rfftfreq(N / 2 + 1, 1 / fs), rfft(tukey(n, 0.75), N), label="a = 0.75")
        ax4.text(800, 50, r"Tukey Window, $\alpha = 0.75$", horizontalalignment="center", verticalalignment="center")
        ax4.grid(True)

        axes = gca()

        axes.set_xticks([100, 125, 160, 200, 250, 315, 400,
                        500, 630, 800, 1000, 1250, 1600, 2000,
                        2500, 3150, 4000, 5000])
        axes.set_xticklabels([100, 125, 160, 200, 250, 315, 400,
                        500, 630, 800, 1000, 1250, 1600, 2000,
                        2500, 3150, 4000, 5000], rotation="vertical")
        axes.set_yticks([0])
        axes.set_yticklabels([0])

        ylim(-100, 100)
        xlim(100, 5000)

        fig.subplots_adjust(bottom=0.15, top=0.98, right=0.98, left=0.05)
        xlabel("Frequency (Hz)")

        savefig("Analysis/Images/window_frequency_response.eps")
Example #59
0
def hanning_real_plot(data, rate):
    fourier = rfft(data * np.hanning(len(data)))
    abs_fourier = abs(fourier)
    freqs = rfftfreq(len(fourier), 1.0 / rate)
    semilogy(freqs, abs_fourier)
Example #60
0
def real_plot(data, rate):
    fourier = rfft(data)
    abs_fourier = abs(fourier)
    freqs = rfftfreq(len(fourier), 1.0 / rate)
    semilogy(freqs, abs_fourier)