Ejemplo n.º 1
0
def plot_psd_resampled():
    global t, ch1, ch2, sps, sampl, title
    
    plt.clf()
    nfft=2048 #int(2**np.ceil(np.log2(sampl)));

    ax1=plt.subplot(211)
    (Pxx, freqs)=plt.psd(ch1[::5], NFFT=nfft, Fs=sps/5, window=mlab.window_hanning, color='red')
    plt.title('PSD (NFFT='+str(nfft)+', Fs='+str(sps/5)+'Hz, hann) from '+title)
    plt.xlabel('')

    ax2=plt.subplot(212)
    (Pxx, freqs)=plt.psd(ch2[::5], NFFT=nfft, Fs=sps/5, window=mlab.window_hanning, color='green')
    plt.ylabel('')
    ax2.set_ylim(ax1.get_ylim())

    fig=plt.gcf();
    f_dpi=fig.get_dpi()
    #print "dpi: %i" % (f_dpi)
    
    #f_size=fig.get_size_inches()
    #print "figure size in Inches", f_size
    #print "or pixels %i x %i pixel" % (f_dpi*f_size[0], f_dpi*f_size[1])
    #fig.set_size_inches([f_size[0]*2, f_size[1]]);
    
    fig.set_size_inches(1200.0/f_dpi, 500.0/f_dpi);
    
    #f_size=fig.get_size_inches()
    #print "resized to %i x %i pixel" % (f_dpi*f_size[0], f_dpi*f_size[1])

    
    plt.savefig(os.path.join(_LOCATION_CHARTS,"last_psd_res.png"), dpi=96, bbox_inches='tight')
    plt.clf()
    print "plotting psd_resampled finished"
Ejemplo n.º 2
0
def fftplt1(x,Mch,dtrnd=True,demean=True):
	fs=1

	if demean:
		x = x - x.mean()

	if dtrnd:
		Pout = plt.psd(x, NFFT=Mch, detrend=pylab.detrend_linear, noverlap=Mch/2, Fs=fs)
	else:
		Pout = plt.psd(x, NFFT=Mch, detrend=pylab.detrend_none, noverlap=Mch/2, Fs=fs)

	xdtrnd = pylab.detrend_linear(x)
	xauto = mcmath.acorr_mlab(xdtrnd,2)
	rhoxauto = (xauto[1] + np.sqrt(abs(xauto[2])))/2
	R = mcmath.redspec(rhoxauto,np.arange(Mch/2),Mch)

	P = Pout[0][:R.size]
	F = Pout[1][:R.size]
	Psum = P.sum()
	Rsum = R.sum()
	PRratio = Psum/Rsum
	Rcmp = R*PRratio

	plt.figure()
	plt.plot(F,P)
	plt.plot(F,Rcmp)

	return (F,P,Rcmp)
Ejemplo n.º 3
0
 def inspect_source_psd(self, ic):
     data = self._get_ica_data()
     source = self.ica._transform_epochs(data, concatenate=True)[ic]
     sfreq = data.info['sfreq']
     plt.figure()
     plt.psd(source, Fs=sfreq, NFFT=128, noverlap=0, pad_to=None)
     plt.show()
Ejemplo n.º 4
0
def fftplt3(x,Mch,pval=0.1,dtrnd=True,demean=True,titlestr='',Dt=0.01):
	"""Differs from fftplt3 solely by plotting the power spectral density
	in log-form, 10*log10(P)."""
	fs=1
	titl_plc = (1.,1.)

	if demean:
		x = x - x.mean()

	if dtrnd:
		Pout = plt.psd(x, NFFT=Mch, detrend=pylab.detrend_linear, noverlap=Mch/2, Fs=fs)
	else:
		Pout = plt.psd(x, NFFT=Mch, detrend=pylab.detrend_none, noverlap=Mch/2, Fs=fs)

	xdtrnd = pylab.detrend_linear(x)
	xauto = mcmath.acorr_mlab(xdtrnd,2)
	rhoxauto = (xauto[1] + np.sqrt(abs(xauto[2])))/2
	R = mcmath.redspec(rhoxauto,np.arange(Mch/2),Mch)

	P = Pout[0][:R.size]
	F = Pout[1][:R.size]
	Psum = P.sum()
	Rsum = R.sum()
	PRratio = Psum/Rsum
	Rcmp = R*PRratio

	dof = (x.size / (Mch/2.)) * 1.2
	Fval = stats.f.isf(pval,dof,dof)

	tst = P / Rcmp
	pass1 = np.where(tst > Fval)[0]
	maxs = mcmath.extrema_find(P,'max',t=F,dt=Dt)
	max_ind = np.intersect1d(maxs,pass1)
	Fmaxs = F[max_ind]
	Fmaxs2 = np.append(Fmaxs,0.1)
	Fmaxs2.sort()
	Tmaxs = 1/(Fmaxs2)
	Tmaxs = np.round(Tmaxs,2)

	ax = plt.gca()
	ax.plot(F,10*np.log10(Rcmp))

	ax.set_xticks(Fmaxs2)
	ax.set_xticklabels(Tmaxs)
	my_fmt_xdate(ax,rot=90,hal='center')
	multiline(Fmaxs,c='red',ls='--')

	xtl = ax.get_xticklabels()
	ytl = ax.get_yticklabels()
	plt.setp(xtl,'size',10)
	plt.setp(ytl,'size',9)

	ppct = int((1 - pval)*100)
	titl_str = '%s FFT (chunksize: %d) :: peak CL: %d%%' % (titlestr,Mch,ppct)
	plt.text(titl_plc[0],titl_plc[1],titl_str,ha='right',va='bottom',size=12,transform=ax.transAxes)
	plt.xlabel('Peak period (yrs)',size=11)


	return (F,P,Rcmp)
Ejemplo n.º 5
0
def do_check_spectrum(hostData, DUTData, samplingRate, fLow, fHigh, margainLow, margainHigh):
    # reduce FFT resolution to have averaging effects
    N = 512 if (len(hostData) > 512) else len(hostData)
    iLow = N * fLow / samplingRate + 1  # 1 for DC
    if iLow > (N / 2 - 1):
        iLow = N / 2 - 1
    iHigh = N * fHigh / samplingRate + 1  # 1 for DC
    if iHigh > (N / 2 + 1):
        iHigh = N / 2 + 1
    print fLow, iLow, fHigh, iHigh, samplingRate

    Phh, freqs = plt.psd(
        hostData,
        NFFT=N,
        Fs=samplingRate,
        Fc=0,
        detrend=plt.mlab.detrend_none,
        window=plt.mlab.window_hanning,
        noverlap=0,
        pad_to=None,
        sides="onesided",
        scale_by_freq=False,
    )
    Pdd, freqs = plt.psd(
        DUTData,
        NFFT=N,
        Fs=samplingRate,
        Fc=0,
        detrend=plt.mlab.detrend_none,
        window=plt.mlab.window_hanning,
        noverlap=0,
        pad_to=None,
        sides="onesided",
        scale_by_freq=False,
    )
    print len(Phh), len(Pdd)
    print "Phh", abs(Phh[iLow:iHigh])
    print "Pdd", abs(Pdd[iLow:iHigh])
    amplitudeRatio = np.sqrt(abs(Pdd[iLow:iHigh] / Phh[iLow:iHigh]))
    ratioMean = np.mean(amplitudeRatio)
    amplitudeRatio = amplitudeRatio / ratioMean
    print "Normialized ratio", amplitudeRatio
    print "ratio mean for normalization", ratioMean
    positiveMax = abs(max(amplitudeRatio))
    negativeMin = abs(min(amplitudeRatio))
    passFail = (
        True if (positiveMax < (margainHigh / 100.0 + 1.0)) and ((1.0 - negativeMin) < margainLow / 100.0) else False
    )
    RatioResult = np.zeros(len(amplitudeRatio), dtype=np.int16)
    for i in range(len(amplitudeRatio)):
        RatioResult[i] = amplitudeRatio[i] * 1024  # make fixed point
    print "positiveMax", positiveMax, "negativeMin", negativeMin
    return (passFail, negativeMin, positiveMax, RatioResult)
def problem_5():
    """
    This function uses pyplot.psd to plot the spectrum for each of the time
    series generated above.

    Inputs:
        None

    Outputs:
        This will automatically generate a plot for each of the 4 data-sets.
    """

    plt.figure()
    plt.psd(gdp)
    plt.title('GDP Spectrum')
    plt.show()

    plt.figure()
    plt.psd(cpi)
    plt.title('CPI Spectrum')
    plt.show()

    plt.figure()
    plt.psd(cons)
    plt.title('Consumption Spectrum')
    plt.show()

    plt.figure()
    plt.psd(inv)
    plt.title('Investment Spectrum')
    plt.show()
def chickling_csd_2ch(shotno, date=time.strftime("%Y%m%d")):

	fname, data = file_finder(shotno,date)
	data = quickextract_data(fname)

	#reshape the array of x points (20M for 1s) into a 2d array each with 40k segments.
	phasediff_co2 = np.unwrap(data[0]['phasediff_co2'])
	phasediff_hene = np.unwrap(data[0]['phasediff_hene'])
	fs = data[0]['samplerate']
	
	plt.figure("2 Channels | Blue = PSD Scene | Orange = PSD Reference | Green = CSD | shot " + str(shotno) +  " Date " + str(date))
	plt.psd(phasediff_co2, Fs=fs)
	plt.psd(phasediff_hene,Fs=fs)
	plt.csd(phasediff_co2, phasediff_hene, Fs=fs)
	plt.show()
Ejemplo n.º 8
0
def main(argv):
    name = raw_input("Enter file prefix: ")

    infileprefix = "out_%s" % name
    outfilename = 'pyout_%s.csv' % name

    FFT_SIZE = 64

    freq_to_power = {}
    for filename in sorted(listdir("out/")):
        if filename.startswith(infileprefix):
            Fc = int(
                sub(r'[_\.csv]', '',
                    findall(r'_[0-9]+\.csv', filename)[0]))
            print(Fc)
            with open(path.join("out", filename), "rb") as file:
                samples = bytes2iq(bytearray(file.read()))
                mean = np.mean(samples)
                samples = samples - mean

                Ps, fs = plt.psd(samples, NFFT=FFT_SIZE, Fs=20, Fc=Fc)

                for i in range(len(Ps)):
                    if (fs[i] % 1.0 == 0.0):
                        freq_to_power[fs[i]] = np.log(Ps[i])

    with open(path.join('out', outfilename), 'a') as out:
        for freq in freq_to_power:
            out.write('%s,%s\n' % (freq, freq_to_power[freq]))
Ejemplo n.º 9
0
def calculate_amplitude(st):
    """
    Calculate a noise proxy for the first trace in that station.
    """
    res = plt.psd(st[0].data,Fs=st[0].stats.sampling_rate,NFFT=4096,noverlap=4096/2)
    amp_sum = np.sum(res[0])
    return amp_sum
Ejemplo n.º 10
0
def plt_walking_psd(data, sig, n_peaks=6, nFFT=256, delta=10, show_peaks=False):
    plt.figure()
    for si in list(set(data.subj)):
        di = data[data.subj == si]
        di = di[di.act == 4]
        x = di[sig][:]
        a,b,l = plt.psd(x, nFFT, 52., color='k', return_line=True)
        # grab data from the plot
        pwr, fq = l[0].get_ydata(), l[0].get_xdata()
        #p = peak_detection(pwr, n_peaks, 0, .02, int(pk_dist*52))
        #print pwr, fq  
        mx, mn = peakdet(pwr, delta=delta)
        mx, mn = mx.astype(int), mn.astype(int)

 
        if show_peaks:
            #for pi in p:
            #print pi
            #plt.plot(fq[pi[0]], pi[1], 'ro')
            for p in mx:
                plt.plot(fq[p[0]], p[1], 'go')
            for p in mn:
                plt.plot(fq[p[0]], p[1], 'ro')

    plt.tight_layout()
    plt.show()
Ejemplo n.º 11
0
def load_audio_mfcc_plus(path, category, fileid):
    print(fileid)
    audio_file = mp.AudioFileClip(path, fps=16000);
    audio = audio_file.to_soundarray()
    audio = (audio[:, 0] + audio[:, 1]) / 2
    mfcc_structure = psf.mfcc(audio, samplerate=16000, winlen=0.576, winstep=0.576, nfft=16384, numcep=26, nfilt=52)

    
    mfcc_structure = np.asarray(mfcc_structure) 
    
    #plt.show()
    r = int(len(mfcc_structure[:,0]))
    for i in range(0, r):
        a = audio[i * 9216 : (i + 1) * 9216]
        m = mfcc_structure[i,:]
    
        zero_crossings       = ((a[:-1] * a[1:]) < 0).sum() # Source: https://stackoverflow.com/questions/30272538/python-code-for-counting-number-of-zero-crossings-in-an-array
        zero_crossings       = zero_crossings / (10 ** 3)
        maximum_amplitude    = np.max(plt.psd(a)[0])
        spectral_centroid    = librosa.feature.spectral_centroid(y=a, n_fft=16384, sr=16000)
        spectral_centroid    = np.resize(spectral_centroid, (1, 11))
        spectral_centroid    = spectral_centroid / (10 ** 3)
    
        m = np.append(m, zero_crossings)
        m = np.append(m, maximum_amplitude)
        m = np.append(m, spectral_centroid)
        m = utils.normalize(m)
        spect_list_mfcc_plus.append(m)
        category_list_mfcc_plus.append(category)
    audio_file.close()
Ejemplo n.º 12
0
 def readPSD(self, width: int):
     samples = self.read(width)
     return psd(samples,
                NFFT=1024,
                Fs=self.radio.sample_rate / 1e6,
                Fc=self.radio.center_freq / 1e6,
                return_line=True)
Ejemplo n.º 13
0
def _plot_fft(tdms, **kargs):
	ti = kargs['ti']
	fft_len = kargs['fft_len']
	fft_scale = kargs['fft_scale']
	p, f = plt.psd(tdms.wav.__getslice__(*ti),
			fft_len,
			tdms.fs)
	if fft_scale == 'db':
		plt.ylabel(r'Amplitude $\frac{mV^2}{Hz}$ ($dB$)')
	elif fft_scale == 'linear':
		plt.cla()
		plt.plot(f, p)
		plt.ylabel(r'Amplitude $\frac{mV^2}{Hz}$')
	plt.xlabel(u'Frequency ($Hz$)')
	return
	fft = np.fft.fft(tdms.wav.__getslice__(*ti), fft_len)[:fft_len/2]

	if fft_unit == 'pow':
		fft = fft * fft.conjugate() / fft_len
	elif fft_unit == 'amp':
		fft = np.abs(fft / fft_len)
		plt.ylabel('Amplitude mV/Hz')

	plt.plot(np.fft.fftfreq(fft_len, 1/tdms.fs)[:fft_len/2], fft)
	plt.grid()
def compute_and_plot_psd(data, sfreq, NFFT=512, show=True):
    """ Computes the power spectral density and produces a plot.

    Parameters
    ----------
    data : ndarray (n_times)
        The signal.
    sfreq : float
        Sampling frequency.
    NFFT : int (power of 2)
        Number of bins for each block of FFT.
    show : bool
        Display or hide plot. (Default is True)

    Returns
    -------
    power : ndarray
        The power spectral density of the signal.
    freqs : ndarray
        Frequencies.
    """

    if show is False:
        pl.ioff()
    power, freqs = pl.psd(data, Fs=sfreq, NFFT=NFFT)

    return power, freqs
Ejemplo n.º 15
0
    def calc_heart_rate_freq(self, signal, fs):

        # signal = self.detrend(signal, 8)
        b, a = sig.butter(3, .05, btype='low')  # currently high pass

        # Remove DC Baseline
        signal = self.detrend(signal, 4)

        # Filter low values
        signal = sig.lfilter(b, a, signal)

        # signal = self.detrend(signal, 4)

        #Take the PSD of the signal
        #Calculate HR based on find peak
        Pxx, Freqs = plt.psd(signal, NFFT=len(signal), Fs=fs)

        plt.show()

        # Filter the Pxx
        # Pxx = sig.lfilter(b,a,Pxx)

        slice_from = 3

        peaks, _ = sig.find_peaks(Pxx[slice_from:], distance=20)

        heart_freq = Freqs[slice_from:][peaks[0]]

        return heart_freq * 60
Ejemplo n.º 16
0
    def psd(self):
        if self.is_on_cuda:
            self.cpu()

            plt.psd(self[0],
                    NFFT=16384,
                    Fs=self.fs_in_fiber,
                    window=np.hamming(16384))
            plt.show()
            self.cuda()
        else:
            plt.psd(self[0],
                    NFFT=16384,
                    Fs=self.fs_in_fiber,
                    window=np.hamming(16384))
            plt.show()
Ejemplo n.º 17
0
def analyze_validation_data(station_id, validation_date, data_type):
     duration=0.5
     decimation_level=5
     decimation_frequency=1/decimation_level   
     vd=ValidationData()

     vd.populate(station_id,validation_date,data_type)
     for wdx in np.arange(48):
          selected_data=vd.data[int(wdx*1800):int((wdx+1)*1800)]
          validation_data=signal.detrend(signal.decimate(selected_data,decimation_level,ftype='iir', axis=-1, zero_phase=True))
          Pxx, freqs = plt.psd(validation_data, NFFT=1024, Fs=decimation_frequency, detrend='mean',scale_by_freq=True)
          if wdx==0:
              X_val=Pxx[0:100]/np.max(Pxx[0:100])    
          else:
              X_val=np.vstack((X_val,Pxx[0:100]/np.max(Pxx[0:100])))

     X_validation = preprocessing.scale(X_val,0)     
#     X_validation = X_val    

     prediction=clf.predict(X_validation)

     prediction_proba=clf.predict_proba(X_validation)

     vd.plot_prediction(prediction, prediction_proba, Fs=decimation_frequency,title=str(data_type)+'_'+str(validation_date.date()), x_label='UTC (hh:mm)', y_label='Frequency (Hz)')
     classes = {0: 'noisy', 1: 'clean'}
     colors = {0: 'red', 1: 'blue'}
     fig, axes = plt.subplots(8,6)
     fig.subplots_adjust(hspace=1)     
     for ax, wdx in zip(axes.flatten(), np.arange(48)):
          ax.plot(freqs[0:100], X_validation[wdx], colors[prediction[wdx]])
          ax.set(title=','.join((classes[prediction[wdx]],str(wdx))).upper())
     fig.set_size_inches(37, 10) 
     plt.savefig(validation_date.strftime('%Y%m%d')+'_windows.png') 
Ejemplo n.º 18
0
def do_check_spectrum_playback(hostData, samplingRate, fLow, fHigh, margainLow, margainHigh):
    # reduce FFT resolution to have averaging effects
    N = 512 if (len(hostData) > 512) else len(hostData)
    iLow = N * fLow / samplingRate + 1 # 1 for DC
    if iLow > (N / 2 - 1):
        iLow = (N / 2 - 1)
    iHigh = N * fHigh / samplingRate + 1 # 1 for DC
    if iHigh > (N / 2 + 1):
        iHigh = N / 2 + 1
    print fLow, iLow, fHigh, iHigh, samplingRate

    Phh, freqs = plt.psd(hostData, NFFT=N, Fs=samplingRate, Fc=0, detrend=plt.mlab.detrend_none,\
        window=plt.mlab.window_hanning, noverlap=0, pad_to=None, sides='onesided',\
        scale_by_freq=False)
    print len(Phh)
    print "Phh", abs(Phh[iLow:iHigh])
    spectrum = np.sqrt(abs(Phh[iLow:iHigh]))
    spectrumMean = np.mean(spectrum)
    spectrum = spectrum / spectrumMean
    print "Mean ", spectrumMean
    print "Normalized spectrum", spectrum
    positiveMax = abs(max(spectrum))
    negativeMin = abs(min(spectrum))
    passFail = True if (positiveMax < (margainHigh / 100.0 + 1.0)) and\
        ((1.0 - negativeMin) < margainLow / 100.0) else False
    spectrumResult = np.zeros(len(spectrum), dtype=np.int16)
    for i in range(len(spectrum)):
        spectrumResult[i] = spectrum[i] * 1024 # make fixed point
    print "positiveMax", positiveMax, "negativeMin", negativeMin
    return (passFail, negativeMin, positiveMax, spectrumResult)
Ejemplo n.º 19
0
def doPSD(sdr, centerFrequency):
    """ Odcita vzorky, ktore potom spracuje a vykresli do grafu.
        Funkcia konci ulozenim grafu do .png formatu a ukoncenim kernel modulu."""

    global hodnoty, frekvencie
    # configuring device
    sdr.sample_rate = 2.4e6
    sdr.center_freq = centerFrequency
    sdr.gain = 7.1  # Podporovane hodnoty gain: [-9.9; -4; 7.1; 17.9; 19.2]

    samples = sdr.read_samples(number_of_FFT *
                               256)  # == 262 144 komplexnych cisel

    # Using matplotlib.pyplot to estimate and plot the Power Spectral Density
    result = plt.psd(samples,
                     NFFT=number_of_FFT,
                     Fs=sdr.sample_rate,
                     Fc=sdr.center_freq)

    hodnoty = result[0]  # 'hodnoty' predstavuju vykon a maju absolutnu velkost
    frekvencie = result[
        1]  # 'frekvencie' su hodnoty prisluchajuce jednotlivym hodnotam 'hodnoty'

    # Save figure (Line2D object)
    # Urobi graf: ['frekvencie', 'hodnoty'] ale hodnoty su uz premenene na db ako 10*log_10(hodnoty[_])
    plt.savefig('/var/www/html/obrazky/power_spectral_density.png')
    # Clearing figure
    plt.clf()
Ejemplo n.º 20
0
def compute_and_plot_psd(data, sfreq, NFFT=512, show=True):
    """ Computes the power spectral density and produces a plot.

    Parameters
    ----------
    data : ndarray (n_times)
        The signal.
    sfreq : float
        Sampling frequency.
    NFFT : int (power of 2)
        Number of bins for each block of FFT.
    show : bool
        Display or hide plot. (Default is True)

    Returns
    -------
    power : ndarray
        The power spectral density of the signal.
    freqs : ndarray
        Frequencies.
    """

    if show is False:
        pl.ioff()
    power, freqs = pl.psd(data, Fs=sfreq, NFFT=NFFT)

    return power, freqs
Ejemplo n.º 21
0
def psd_processing(dev_id: str, time_range=['now() - 40m', 'now() - 30m']):
    cli = InfluxData()
    cli.set_client(config.INFLUXDB_HOST,
                   config.INFLUXDB_PORT,
                   config.INFLUXDB_ID,
                   config.INFLUXDB_PASSWORD,
                   database=config.INFLUXDB_DATABASE)
    if time_range[0] == 'now() - 40m':
        rs = cli.query(
            f"select * from acc_data where dev_id='{dev_id}' and time >= {time_range[0]} AND time <= {time_range[1]}"
        )
    else:
        rs = cli.query(
            f"select * from acc_data where dev_id='{dev_id}' and time >= '{time_range[0]}' AND time <= '{time_range[1]}'"
        )
    data = cli.resultSetToDF(rs)
    if type(data['acc_data']) is list:  #  it means empty data
        logging.warning('empty data')
        return dev_id
    if len(data['acc_data']
           ) < EXPECT_DATASET_SIZE * 0.9:  # not enough data size
        logging.warning('not enough data size')
        return dev_id
    data = data['acc_data']  # convert dict_dataframe to dataframe
    x = data['x'] * constants.g
    y = data['y'] * constants.g
    z = data['z'] * constants.g
    offset_x = x - np.mean(x)
    offset_y = y - np.mean(y)
    offset_z = z - np.mean(z)
    Pxx, freqs_x = plt.psd(offset_x, NFFT=200, Fs=100)
    Pyy, freqs_y = plt.psd(offset_y, NFFT=200, Fs=100)
    Pzz, freqs_z = plt.psd(offset_z, NFFT=200, Fs=100)
    row = {
        'dev_id': dev_id,
        'Mean_PSD_x': np.mean(Pxx),
        'Mean_PSD_y': np.mean(Pyy),
        'Mean_PSD_z': np.mean(Pzz),
        'MAX_PSD_x': np.max(Pxx),
        'MAX_PSD_y': np.max(Pyy),
        'MAX_PSD_z': np.max(Pzz),
        'MIN_PSD_x': np.min(Pxx),
        'MIN_PSD_y': np.min(Pyy),
        'MIN_PSD_z': np.min(Pzz)
    }
    return row
Ejemplo n.º 22
0
 def readPSD(self, width: int):
     samples = self.read(width)
     return psd(samples,
                NFFT=1024,
                Fs=self.radio.sample_rate / 1e6,
                Fc=self.radio.center_freq / 1e6,
                return_line=True)
     # matplotlib.pyplot.savefig("./scan.png")
Ejemplo n.º 23
0
    def plot_PSD(self, sensor: str):
        """
        Plots Auto Power Spectrum

        :param sensor: The value of the column name from the loaded files where the desired information is. [SENSOR NAME]
        :param sampling_freq: The signal sampling frequency.

        :return: Instance so that it can be linearly written in code.
        """

        plt.psd(self.data_read[sensor], Fs=self.sampling_rate)

        # Setup Plot Parameters.
        plt.title('Auto-Power Spectrum of ' + sensor)
        plt.show()

        return self  # Return Instance so that it can be linearly written in code.
Ejemplo n.º 24
0
 def psd(self):
     if self.is_on_cuda:
         self.cpu()
         plt.figure()
         plt.psd(self.ds_in_fiber[0],
                 NFFT=16384,
                 Fs=self.fs_in_fiber,
                 scale_by_freq=True)
         self.cuda()
         plt.show()
     else:
         plt.figure()
         plt.psd(self.ds_in_fiber[0],
                 NFFT=16384,
                 Fs=self.fs_in_fiber,
                 scale_by_freq=True)
         plt.show()
    def psd_maker(self):
        """
        Adds a power and corresponding rfrequency column to data. 

        Args:
            data (dataFrame): df with relevant eye data. Assumes data from only ONE feature_1
            feature_1 (dataFrame column): feature from dataframe to filter unique values
            feature_1 (dataFrame column): feature from dataframe to filter unique values
            signal (dataFrame column): signal the PSD calculation will be run on

        Returns:
        
           output_df (dataFrame): original dataFrame with power and frequency columns with the same values 
           within each unique feature_2

        """

        # Establish the output df
        df = pd.DataFrame(
            columns=[self.feature_2, 'power', 'freq', 'log_power'])

        # The actual loop. Meant to run on one person at a time
        for x in self.data[self.feature_1].unique():
            df_1 = self.data[self.data[self.feature_1] == x]
            for y in df_1[self.feature_2].unique():
                # Subset data to just one feature_2 at a time per feature_1
                q_data = df_1[df_1[self.feature_2] == y]

                # Window size should always be bigger than the length of the data, and ideally a power of 2.
                nfft = next_power_of_2(len(q_data))

                # Do the actual psd.
                power, freq = plt.psd(q_data[self.signal],
                                      Fs=110,
                                      NFFT=nfft,
                                      window=signal.get_window('hamming',
                                                               Nx=nfft,
                                                               fftbins=True),
                                      detrend='mean',
                                      pad_to=nfft,
                                      noverlap=nfft / 2)
                plt.close()

                log_power = np.log(power)
                # Build the new row for the output df
                psd_row = pd.DataFrame(
                    [[y, power, freq, log_power]],
                    columns=[self.feature_2, 'power', 'freq', 'log_power'])

                # Add the next row of data
                df = pd.concat([df, psd_row])

        # Remerge w/ original data
        # power and frequency repeat each row w/i a question so if there's a more optimal way to store data do that
        output_df = self.data.merge(right=df, on=feature_2)
        self.data = output_df

        return self.data
Ejemplo n.º 26
0
def main():
    L1norm = np.add(np.abs(data_array[:, 1]), np.abs(data_array[:, 2]),
                    np.abs(data_array[:, 3]))
    b, a = sig.butter(3, 0.2, btype='low')
    c, d = sig.butter(3, 0.5, btype='high')
    lowsignal_filtered = sig.lfilter(b, a, L1norm)
    highsignal_filtered = sig.lfilter(c, d, L1norm)
    plt.subplot(321)
    plt.plot(t, L1norm)
    plt.subplot(322)
    plt.psd(L1norm, NFFT=len(t), Fs=fs)
    plt.subplot(323)
    plt.plot(t, lowsignal_filtered)
    plt.subplot(324)
    plt.psd(lowsignal_filtered, NFFT=len(t), Fs=fs)
    plt.subplot(325)
    plt.plot(t, highsignal_filtered)
    plt.subplot(326)
    plt.psd(highsignal_filtered, NFFT=len(t), Fs=fs)
    plt.show()
    x = detrend(s, 15)
    #  plt.subplot(211)
    # plt.plot(t, x)
    #  plt.subplot(212)
    # plt.psd(x, NFFT=len(t), Fs=fs) #plot the power spectral density
    # plt.show()
    Pxx, freqs = plt.psd(x, NFFT=len(t), Fs=fs)
    index = np.argmax(Pxx)
    maxpower = freqs[index]
    print(maxpower)
Ejemplo n.º 27
0
 def calc_heart_rate_freq(signal, fs):
     b, a = sig.butter(3, .2, btype='low')
     signal = sig.detrend(signal, 4)
     signal = sig.lfilter(b, a, signal)
     Pxx, Freqs = plt.psd(signal, NFFT=len(signal), Fs=fs)
     slice_from = 3
     peaks, _ = sig.find_peaks(Pxx[slice_from:], distance=20)
     heart_freq = Freqs[slice_from:][peaks[0]]
     return heart_freq * 60
Ejemplo n.º 28
0
def plot_psd_resampled():
    global t, ch1, ch2, sps, sampl, title

    plt.clf()
    nfft = 2048  #int(2**np.ceil(np.log2(sampl)));

    ax1 = plt.subplot(211)
    (Pxx, freqs) = plt.psd(ch1[::5],
                           NFFT=nfft,
                           Fs=sps / 5,
                           window=mlab.window_hanning,
                           color='red')
    plt.title('PSD (NFFT=' + str(nfft) + ', Fs=' + str(sps / 5) +
              'Hz, hann) from ' + title)
    plt.xlabel('')

    ax2 = plt.subplot(212)
    (Pxx, freqs) = plt.psd(ch2[::5],
                           NFFT=nfft,
                           Fs=sps / 5,
                           window=mlab.window_hanning,
                           color='green')
    plt.ylabel('')
    ax2.set_ylim(ax1.get_ylim())

    fig = plt.gcf()
    f_dpi = fig.get_dpi()
    #print "dpi: %i" % (f_dpi)

    #f_size=fig.get_size_inches()
    #print "figure size in Inches", f_size
    #print "or pixels %i x %i pixel" % (f_dpi*f_size[0], f_dpi*f_size[1])
    #fig.set_size_inches([f_size[0]*2, f_size[1]]);

    fig.set_size_inches(1200.0 / f_dpi, 500.0 / f_dpi)

    #f_size=fig.get_size_inches()
    #print "resized to %i x %i pixel" % (f_dpi*f_size[0], f_dpi*f_size[1])

    plt.savefig(os.path.join(_LOCATION_CHARTS, "last_psd_res.png"),
                dpi=96,
                bbox_inches='tight')
    plt.clf()
    print "plotting psd_resampled finished"
def pyplot_welch(data, NFFT_length, sampling_frequency, segment_pad_to_length):
    
    import numpy as np
    import matplotlib.pyplot as plt
    
    overlap = int(np.size(NFFT_length)/ 2)
    Pxx, freqs = plt.psd(data, NFFT=NFFT_length, Fs=sampling_frequency,
                                pad_to=segment_pad_to_length, noverlap=overlap)
    
    return Pxx, freqs
Ejemplo n.º 30
0
def calculate_amplitude(st):
    """
    Calculate a noise proxy for the first trace in that station.
    """
    res = plt.psd(st[0].data,
                  Fs=st[0].stats.sampling_rate,
                  NFFT=4096,
                  noverlap=4096 / 2)
    amp_sum = np.sum(res[0])
    return amp_sum
Ejemplo n.º 31
0
def plotPowerSpectrum(data, samplingRate, ROICount):

    plt.figure(3)

    for x in range(0, ROICount):

        plt.subplot(221 + x)
        plt.psd(data[x][1], NFFT=240, Fs=samplingRate, noverlap=230)
        plt.xlabel("frequency (Hz)")
        plt.ylabel("Power (Pxx)")

    plt.subplots_adjust(top=0.90,
                        bottom=0.10,
                        left=0.15,
                        right=0.95,
                        hspace=0.35,
                        wspace=0.45)

    plt.show()
def _test_make_TDMA_slot_():
    f_s = 2e9
    t_s = np.arange(2**16) / f_s
    # Test that power scales as expected
    T_symbol = (
        t_s.max() - t_s.min()
    ) / 2000.  # Toal number of symbols, need >1000 for 1/N (>=200) to converge to < 10%
    s = ASK_series(t_s, f_s / MHz / 4., T_symbol)
    p = signal_power(s)
    for N in [3, 4, 5]:
        for S in range(N):
            s_slot = _make_TDMA_slot_(
                t_s, s, T_slot=T_symbol * 10, slot_no=S, N_slots=N
            )  # 10 symbols per slot, slots repeat every N*10 symbols
            assert_isclose(p / float(N),
                           signal_power(s_slot),
                           "Slot %d/%d power not scaled as 1/Nslots" % (S, N),
                           rtol=0.1)

    # A visual inspection
    pyplot.figure()
    pyplot.psd(s,
               Fs=f_s,
               NFFT=len(s) // 16,
               scale_by_freq=True,
               label="Continuous ASK signal")
    pyplot.psd(s_slot,
               Fs=f_s,
               NFFT=len(s) // 16,
               scale_by_freq=True,
               label="Slot=%d/%d ASK signal" % (S, N),
               hold=True)
    pyplot.legend()
    pyplot.title("_test_make_TDMA_slot_")

    pyplot.figure()
    pyplot.plot(t_s / 1e-6, s, label="Continuous ASK signal")
    pyplot.plot(t_s / 1e-6, s_slot, label="Slot=%d/%d ASK signal" % (S, N))
    pyplot.ylabel("amplitude")
    pyplot.xlabel("time [microsec]")
    pyplot.legend()
    pyplot.title("_test_make_TDMA_slot_")
Ejemplo n.º 33
0
def plot():
    df = pd.read_csv('filename_var.get()')
    x = df['x']
    y = df['y']

    plt.figure("Time vs Frequency domain")

    plt.subplot(211)  #time domain
    plt.plot(y, x, 'r')
    plt.xlabel('Time [ns]')
    plt.ylabel('Value (DEC)')

    plt.title('Time and Frequency domain', fontsize=14)
    plt.show()
    plt.grid()

    plt.subplot(212)  #frequency domain
    plt.psd(x, 2048, 250e3)  #65536
    plt.xlabel('Frequency [Hz]')
    plt.ylabel('PSD [db/Hz]')
Ejemplo n.º 34
0
def plot_segment(filename, eeg, stage, step):
    plt.figure()
    _psd, f = plt.psd(eeg[step * fs * 30:(step + 1) * fs * 30],
                      Fs=fs,
                      return_line=False)
    PSD_W.append(_psd) if stage == 'W' else PSD_1.append(_psd)
    # plt.axis([0, 140, -90, -30])
    # plt.suptitle('PSD of ' + filename + ' using pyplot.psd()', fontweight ="bold")
    # plt.savefig('1220/PSD_' + filename + '_' + stage + '_' + str(step) + '.png')
    plt.close()
    pass
Ejemplo n.º 35
0
def _filter_meg_label_ts_parallel(p):
    from mne.filter import high_pass_filter
    label_data, label_ind, fs, low_freq_cut_off, do_plot = p
    filter_label_data = np.empty(label_data.shape)
    E = label_data.shape[1]
    for epoch_ind in range(E):
        x = label_data[:, epoch_ind]
        if do_plot:
            plt.psd(x, Fs=fs)
            plt.show()
        x_filter = high_pass_filter(x,
                                    Fs=fs,
                                    Fp=low_freq_cut_off,
                                    n_jobs=1,
                                    verbose=False)
        if do_plot:
            plt.psd(x_filter, Fs=fs)
            plt.show()
        filter_label_data[:, epoch_ind] = x_filter
    return filter_label_data, label_ind
Ejemplo n.º 36
0
def plotPSDMean(samples, area, monkey, color, color_mean):
    """
    plot the PSD mean and the PSD on the same graph for the samples
    """
    psd_lst = []
    f_axis = []
    psd, f = plt.psd(samples[0],
                     NFFT=1024,
                     Fs=SAMPLE_RATE,
                     noverlap=896,
                     color=color,
                     label="area " + str(area))
    for x in f:
        if x < 150:
            f_axis.append(x)
    psd_lst.append(psd[:len(f_axis)])
    i = 0
    for x in tqdm(samples):
        if i == 0:
            i += 1
            continue
        psd, f = plt.psd(x,
                         NFFT=1024,
                         Fs=SAMPLE_RATE,
                         noverlap=896,
                         color=color)
        psd_lst.append(psd[:len(f_axis)])

    plt.show()

    psd_lst = np.array(psd_lst)
    # for p in psd_lst:
    #     plt.plot(f_axis,10 * np.log10(p), color=color)

    psd_mean = psd_lst.mean(axis=0)
    psd_mean = 10 * np.log10(psd_mean)
    # plt.plot(f_axis, psd_mean, color=color_mean, label="Mean area "+str(area))
    # plt.title(monkey + " area " + str(area) + " | PSD mean and PSD")
    # plt.legend()
    # plt.show()
    return psd_lst, psd_mean, f_axis
Ejemplo n.º 37
0
def test_audio_mfcc_plus(path, start, length, FPS):
    i = 0
    v = 0
    for this_start in range(start, start + length, 30):
        j = 0
        test_list_mfcc_plus = []
        result = []
        print(this_start)
        audio, video = load_movie(path, this_start, 30, FPS)
        audio_array = audio.to_soundarray()
        audio_array = (audio_array[:, 0] + audio_array[:, 1]) / 2
        mfcc_structure = psf.mfcc(audio_array, samplerate=16000, winlen=0.576, winstep=0.576, nfft=16384, numcep=26, nfilt=52)
        mfcc_structure = np.asarray(mfcc_structure)
        r = int(len(mfcc_structure[:,0]))
        for k in range(0, r):
            s = mfcc_structure[k,:]
            a = audio_array[k * 9056 : (k + 1) * 9056]
    
            zero_crossings       = ((a[:-1] * a[1:]) < 0).sum() # Source: https://stackoverflow.com/questions/30272538/python-code-for-counting-number-of-zero-crossings-in-an-array
            zero_crossings       = zero_crossings / (10 ** 3)
            maximum_amplitude    = np.max(plt.psd(a)[0])
            spectral_centroid    = librosa.feature.spectral_centroid(y=a, n_fft=16384, sr=16000)
            spectral_centroid    = np.resize(spectral_centroid, (1, 11))
            spectral_centroid    = spectral_centroid / (10 ** 3)
        
            s = np.append(s, zero_crossings)
            s = np.append(s, maximum_amplitude)
            s = np.append(s, spectral_centroid)
            s = utils.normalize(s)
            
            test_list_mfcc_plus.append(s)  
        
        for t in test_list_mfcc_plus:
            t = t.reshape(1, 1, 39, 1)
            result.append(cnn_mfcc_plus.predict(t))
        for res in result:
            m = max(res)
            m = max(m)
            i = i + 1
            j = j + 1
            if(res[0][0] == m):
                print("Segment " + str(i) + " is non-violent.")
                video.save_frame("Output/MFCC+/Non-Violent/Image/frame" + str(i) +".jpeg", t = (j - 1) * 0.566)
                wav.write("Output/MFCC+/Non-Violent/Sound/frame" + str(i) + ".wav", FPS, audio_array[int((j - 1) * FPS * 0.566):int(j * FPS * 0.566)])
            if(res[0][1] == m):
                v = v + 1
                print("Segment " + str(i) + " is violent.")
                video.save_frame("Output/MFCC+/Violent/Image/frame" + str(i) +".jpeg", t = (j - 1) * 0.566)
                wav.write("Output/MFCC+/Violent/Sound/frame" + str(i) + ".wav", FPS, audio_array[int((j - 1) * FPS * 0.566):int(j * FPS * 0.566)])
        video.close()
        audio.close()
    print("Amount of violence: " + str(v / i * 100) + "%")
Ejemplo n.º 38
0
def test_compares_psd():
    """Test PSD estimation on raw for plt.psd and scipy.signal.welch
    """
    raw = io.Raw(raw_fname)

    exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053']  # bads + 2 more

    # picks MEG gradiometers
    picks = pick_types(raw.info,
                       meg='grad',
                       eeg=False,
                       stim=False,
                       exclude=exclude)[:2]

    tmin, tmax = 0, 10  # use the first 60s of data
    fmin, fmax = 2, 70  # look at frequencies between 5 and 70Hz
    n_fft = 2048

    # Compute psds with the new implementation using Welch
    psds_welch, freqs_welch = psd_welch(raw,
                                        tmin=tmin,
                                        tmax=tmax,
                                        fmin=fmin,
                                        fmax=fmax,
                                        proj=False,
                                        picks=picks,
                                        n_fft=n_fft,
                                        n_jobs=1)

    # Compute psds with plt.psd
    start, stop = raw.time_as_index([tmin, tmax])
    data, times = raw[picks, start:(stop + 1)]
    from matplotlib.pyplot import psd
    out = [psd(d, Fs=raw.info['sfreq'], NFFT=n_fft) for d in data]
    freqs_mpl = out[0][1]
    psds_mpl = np.array([o[0] for o in out])

    mask = (freqs_mpl >= fmin) & (freqs_mpl <= fmax)
    freqs_mpl = freqs_mpl[mask]
    psds_mpl = psds_mpl[:, mask]

    assert_array_almost_equal(psds_welch, psds_mpl)
    assert_array_almost_equal(freqs_welch, freqs_mpl)

    assert_true(psds_welch.shape == (len(picks), len(freqs_welch)))
    assert_true(psds_mpl.shape == (len(picks), len(freqs_mpl)))

    assert_true(np.sum(freqs_welch < 0) == 0)
    assert_true(np.sum(freqs_mpl < 0) == 0)

    assert_true(np.sum(psds_welch < 0) == 0)
    assert_true(np.sum(psds_mpl < 0) == 0)
Ejemplo n.º 39
0
def plotpsd(data: np.ndarray, fs: int, nmode: str):
    ax = figure().gca()

    Pxx, f = psd(data, NFFT=NFFT, Fs=fs, label="measured")
    ax.set_xscale("log")
    ax.set_xlim((100, None))

    f2 = f[-2]
    a2 = 10 * np.log10(Pxx[-2])

    f1 = 100

    octaves = np.log2(f2 / f1)

    if nmode == "white":
        ax.axhline(a2, linestyle="--", color="black", label="theoretical")
    elif nmode == "pink":
        ax.plot(
            [f1, f2],
            [3 * octaves + a2, a2],
            linestyle="--",
            color="black",
            label="theoretical",
        )
    elif nmode == "blue":
        ax.plot(
            [f1, f2],
            [-3 * octaves + a2, a2],
            linestyle="--",
            color="black",
            label="theoretical",
        )
    elif nmode == "brown":
        ax.plot(
            [f1, f2],
            [6 * octaves + a2, a2],
            linestyle="--",
            color="black",
            label="theoretical",
        )
    elif nmode == "violet":
        ax.plot(
            [f1, f2],
            [-6 * octaves + a2, a2],
            linestyle="--",
            color="black",
            label="theoretical",
        )

    ax.set_title(f"{nmode} noise")
    ax.legend(loc="best")
Ejemplo n.º 40
0
def wdm_test():
    # grid_size = np.array([50, 50, 75, 50, 50, 75, 50, 100, 50, 75, 100, 75, 100])
    # grid = [0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1]
    # start_freq = 193.1e12
    grid_size = np.array([50e9, 50e9, 50e9, 50e9])
    grid = [1, 1, 1, 1]

    modu = []

    for i, g in enumerate(grid_size):
        modu.append(
            QamModulator(Laser(193.1e12 + g * i, 0, False), IQ(),
                         PulseShaping(0.2, 1024), DAC(),
                         SignalParam(sps_in_fiber=14)))

        modu[-1].modulate('cpu')

    wdm = Mux(modu, grid_size, 193.1e12, grid=grid)
    wdm_signal = wdm.mux_signal('cpu')
    print(len(wdm_signal.symbols))
    import matplotlib.pyplot as plt
    plt.psd(wdm_signal.samples_in_fiber[0])
    plt.show()
Ejemplo n.º 41
0
def plot_psd():
    global t, ch1, ch2, sps, sampl, title
    
    plt.clf()
    nfft=2048 #int(2**np.ceil(np.log2(sampl)));

    ax1=plt.subplot(211)
    (Pxx, freqs)=plt.psd(ch1, NFFT=nfft, Fs=sps, window=mlab.window_hanning)
    plt.title('PSD (NFFT='+str(nfft)+', Fs='+str(sps)+'Hz, hann) from '+title)
    plt.xlabel('')

    ax2=plt.subplot(212)
    (Pxx, freqs)=plt.psd(ch2, NFFT=nfft, Fs=sps, window=mlab.window_hanning)
    plt.ylabel('')
    ax2.set_ylim(ax1.get_ylim())

    fig=plt.gcf()
    f_dpi=fig.get_dpi()    
    fig.set_size_inches(1200.0/f_dpi, 500.0/f_dpi);

    plt.savefig(os.path.join(_LOCATION_CHARTS,"last_psd.png"), dpi=96, bbox_inches='tight')
    plt.clf()
    print "plotting psd finished"
Ejemplo n.º 42
0
def plot_psd_OFDM_symbols():  # pragma: no cover
    """Plot the power spectral density of OFDM modulated symbols.

    This function is not used in any unittest, but it is interesting to
    visualize that the modulate method of the OFDM class is working as it
    should.
    """
    from matplotlib import pyplot as plt
    # xxxxxxxxxx OFDM Details xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
    fft_size = 64
    cp_size = 12
    num_used_subcarriers = 52
    ofdm_object = ofdm.OFDM(fft_size, cp_size, num_used_subcarriers)
    # xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

    # xxxxxxxxxx Input generation (not part of OFDM) xxxxxxxxxxxxxxxxxxxxxx
    num_bits = 2500
    # generating 1's and 0's
    ip_bits = np.random.random_integers(0, 1, num_bits)
    # Number of modulated symbols
    # num_mod_symbols = num_bits * 1
    # xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

    # BPSK modulation
    # bit0 --> -1
    # bit1 --> +1
    ip_mod = 2 * ip_bits - 1

    # OFDM Modulation
    output = ofdm_object.modulate(ip_mod)

    # xxxxxxxxxx Plot the PSD xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
    # MATLAB code to plot the power spectral density
    # close all
    fsMHz = 20e6
    Pxx, W = plt.psd(output, NFFT=fft_size, Fs=fsMHz)
    # [Pxx,W] = pwelch(output,[],[],4096,20);
    plt.plot(
        W,
        10 * np.log10(Pxx)
    )
    plt.xlabel('frequency, MHz')
    plt.ylabel('power spectral density')
    plt.title('Transmit spectrum OFDM (based on 802.11a)')
    plt.show()
Ejemplo n.º 43
0
def test_compares_psd():
    """Test PSD estimation on raw for plt.psd and scipy.signal.welch
    """
    raw = io.read_raw_fif(raw_fname)

    exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053']  # bads + 2 more

    # picks MEG gradiometers
    picks = pick_types(raw.info, meg='grad', eeg=False, stim=False,
                       exclude=exclude)[:2]

    tmin, tmax = 0, 10  # use the first 60s of data
    fmin, fmax = 2, 70  # look at frequencies between 5 and 70Hz
    n_fft = 2048

    # Compute psds with the new implementation using Welch
    psds_welch, freqs_welch = psd_welch(raw, tmin=tmin, tmax=tmax,
                                        fmin=fmin, fmax=fmax,
                                        proj=False, picks=picks,
                                        n_fft=n_fft, n_jobs=1)

    # Compute psds with plt.psd
    start, stop = raw.time_as_index([tmin, tmax])
    data, times = raw[picks, start:(stop + 1)]
    from matplotlib.pyplot import psd
    out = [psd(d, Fs=raw.info['sfreq'], NFFT=n_fft) for d in data]
    freqs_mpl = out[0][1]
    psds_mpl = np.array([o[0] for o in out])

    mask = (freqs_mpl >= fmin) & (freqs_mpl <= fmax)
    freqs_mpl = freqs_mpl[mask]
    psds_mpl = psds_mpl[:, mask]

    assert_array_almost_equal(psds_welch, psds_mpl)
    assert_array_almost_equal(freqs_welch, freqs_mpl)

    assert_true(psds_welch.shape == (len(picks), len(freqs_welch)))
    assert_true(psds_mpl.shape == (len(picks), len(freqs_mpl)))

    assert_true(np.sum(freqs_welch < 0) == 0)
    assert_true(np.sum(freqs_mpl < 0) == 0)

    assert_true(np.sum(psds_welch < 0) == 0)
    assert_true(np.sum(psds_mpl < 0) == 0)
Ejemplo n.º 44
0
def _plot_freq_hist(tdms, **kargs):
	ti = kargs['ti']
	fi = kargs['fi']
	fft_len = kargs['fft_len']
	fft_scale = kargs['fft_scale']
	n_bins = kargs['n_bins']

#	bins = list()
	s, f = plt.psd(tdms.wav.__getslice__(*ti),
			fft_len,
			tdms.fs)
	sfi = None
	ffi = None
	for v, i in zip(f, xrange(len(f))):
		if fi[0] is not None and v > fi[0]:
			if sfi is None:
				sfi = i
		if fi[1] is not None and v > fi[1]:
			ffi = i
			break
	if sfi is None:
		sfi = 0
	if ffi is None:
		ffi = len(f)
	s = s[sfi:ffi]
	plt.cla()
#	s = tdms.wav.__getslice__(*ti)
	t = sum(s)
	m = len(s) / n_bins
	r = len(s) - m * n_bins
	for i in xrange(n_bins):
		step = i * m + (i if i < r else r)
		b = step, step + m + (1 if i < r else 0)
#		bins.append(b, sum(s.__getslice__(*b)))
		plt.bar(b[0] + sfi, sum(s.__getslice__(*b))/t, b[1] - b[0])
	plt.ylabel(r'Amplitude $\frac{mV^2}{Hz}$')
	plt.xlabel(u'Frequency ($Hz$)')
Ejemplo n.º 45
0
def power_spectral_density(data_array, frequency, segment_size=256, window_method=False):
    """Return the power spectral density using matplotlib.pyplot.psd function."""
    return psd(data_array, NFFT=segment_size, Fs = frequency)
Ejemplo n.º 46
0
# Generate source time courses from 2 dipoles and the correspond evoked data

times = np.arange(300, dtype=np.float) / raw.info['sfreq'] - 0.1
rng = np.random.RandomState(42)


def data_fun(times):
    """Function to generate random source time courses"""
    return (1e-9 * np.sin(30. * times) *
            np.exp(- (times - 0.15 + 0.05 * rng.randn(1)) ** 2 / 0.01))

stc = simulate_sparse_stc(fwd['src'], n_dipoles=2, times=times,
                          random_state=42, labels=labels, data_fun=data_fun)

###############################################################################
# Generate noisy evoked data
picks = mne.pick_types(raw.info, meg=True, exclude='bads')
iir_filter = fit_iir_model_raw(raw, order=5, picks=picks, tmin=60, tmax=180)[1]
snr = 6.  # dB
evoked = simulate_evoked(fwd, stc, info, cov, snr, iir_filter=iir_filter)

###############################################################################
# Plot
plot_sparse_source_estimates(fwd['src'], stc, bgcolor=(1, 1, 1),
                             opacity=0.5, high_resolution=True)

plt.figure()
plt.psd(evoked.data[0])

evoked.plot()
Ejemplo n.º 47
0
def do_AGN_Kepler():

    sname = 'Zw 229-15'
    data = fits.open(data_dir + 'kepler_zw229_Q7.fits')[1].data
    jdate = data['time']
    flux = np.array(data['SAP_FLUX'], dtype=float)
    ferr = np.array(data['SAP_FLUX_ERR'], dtype=float)

    keep = np.where(np.logical_and(np.isfinite(jdate), np.isfinite(flux)))[0]
    jdate = jdate[keep]
    jdate -= jdate.min()
    flux = flux[keep]
    ferr = ferr[keep]

    df = flux[1:] - flux[0:-1]  # remove outliers
    keep = np.where(np.abs(df) < 56.0)
    jdate = jdate[keep]
    flux = flux[keep]
    ferr = ferr[keep]

    load_pickle = True
    if load_pickle:
        carma_sample = cPickle.load(open(data_dir + 'zw229.pickle', 'rb'))
        # rerun MLE
        # carma_model = cm.CarmaModel(jdate, flux, ferr, p=carma_sample.p, q=carma_sample.q)
        # mle = carma_model.get_map(carma_sample.p, carma_sample.q)
        # carma_sample.add_map(mle)
    else:
        carma_sample = make_sampler_plots(jdate, flux, ferr, 7, 'zw229_', sname, njobs=1)

    # transform the flux through end matching
    tflux = flux - flux[0]
    slope = (tflux[-1] - tflux[0]) / (jdate[-1] - jdate[0])
    tflux -= slope * jdate

    plt.subplot(111)
    dt = jdate[1] - jdate[0]
    pgram, freq = plt.psd(tflux, 512, 2.0 / dt, detrend=detrend_mean)
    plt.clf()

    ax = plt.subplot(111)
    print 'Getting bounds on PSD...'
    psd_low, psd_hi, psd_mid, frequencies = carma_sample.plot_power_spectrum(percentile=95.0, sp=ax, doShow=False,
                                                                             color='SkyBlue', nsamples=5000)
    psd_mle = cm.power_spectrum(frequencies, carma_sample.mle['sigma'], carma_sample.mle['ar_coefs'],
                                ma_coefs=np.atleast_1d(carma_sample.mle['ma_coefs']))
    ax.loglog(freq / 2.0, pgram, 'o', color='DarkOrange')
    psd_slope = 3.14
    above_noise = np.where(freq / 2.0 < 1.0)[0]
    psd_norm = np.mean(np.log(pgram[above_noise[1:]]) + 3.14 * np.log(freq[above_noise[1:]] / 2.0))
    psd_plaw = np.exp(psd_norm) / (freq[1:] / 2.0) ** psd_slope
    ax.loglog(freq[1:] / 2.0, psd_plaw, '-', lw=2, color='DarkOrange')
    ax.loglog(frequencies, psd_mid, '--b', lw=2)
    noise_level = 2.0 * dt * np.mean(ferr ** 2)
    ax.loglog(frequencies, np.ones(frequencies.size) * noise_level, color='grey', lw=2)
    ax.set_ylim(bottom=noise_level / 100.0)
    ax.annotate("Measurement Noise Level", (3.0 * ax.get_xlim()[0], noise_level / 2.5))
    ax.set_xlabel('Frequency [1 / day]')
    ax.set_ylabel('Power Spectral Density [flux$^2$ day]')
    plt.title(sname)
    plt.savefig(base_dir + 'plots/zw229_psd.eps')

    plt.clf()
    carma_sample.plot_1dpdf('measerr_scale')
    plt.savefig(base_dir + 'plots/zw229_measerr_scale.eps')
    measerr_scale = carma_sample.get_samples('measerr_scale')
    print "95% credibility interval on Kepler measurement error scale parameter:", np.percentile(measerr_scale, 2.5), \
        np.percentile(measerr_scale, 97.5)

    pfile = open(data_dir + 'zw229.pickle', 'wb')
    cPickle.dump(carma_sample, pfile)
    pfile.close()
Ejemplo n.º 48
0
import numpy as np
import matplotlib.pyplot as plt


data = np.loadtxt('/home/pauln/Dropbox/DocumentsF/' +
'_Research/LabjackDevelopment/2014-02-24_Data/2014_02_24-run01Data.txt',
 skiprows=1)
dt = []
times = data[:, 0]
n = len(times) - 1
dt[0:n] = times[1:n] - times[0:n - 1]
deltaT = np.mean(dt)
print deltaT

tbSignal = data[:, 3]

tb = tbSignal[0:len(times)]
timeSlice = times[0:len(times)]

plt.subplot(211)
plt.plot(timeSlice, tb)
plt.subplot(212)
rawPSD = plt.psd(tb, NFFT=511, Fs=1.0 / deltaT)

plt.show()
Ejemplo n.º 49
0
Archivo: PSD.py Proyecto: Klabbedi/PSD
# coding: utf-8
import console
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as ml

# console.clear()
file_name = 'wc-cent-2.npy'
data = np.load(file_name)

fig_mag = plt.figure()
plt.psd(data[:, 0], NFFT=256, Fs=80, window=ml.window_hanning, detrend = ml.detrend_none, scale_by_freq = True, noverlap = 0, pad_to = None, sides = 'onesided')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Power/Frequency (dB/Hz)')
plt.title('PSD X')
plt.show()
Ejemplo n.º 50
0
raw.info['bads'] = ['MEG 2443', 'EEG 053']  # mark bad channels

# Set up pick list: Gradiometers - bad channels
picks = mne.pick_types(raw.info, meg='grad', exclude='bads')

order = 5  # define model order
picks = picks[:1]

# Estimate AR models on raw data
b, a = fit_iir_model_raw(raw, order=order, picks=picks, tmin=60, tmax=180)
d, times = raw[0, 10000:20000]  # look at one channel from now on
d = d.ravel()  # make flat vector
innovation = signal.convolve(d, a, 'valid')
d_ = signal.lfilter(b, a, innovation)  # regenerate the signal
d_ = np.r_[d_[0] * np.ones(order), d_]  # dummy samples to keep signal length

###############################################################################
# Plot the different time series and PSDs
plt.close('all')
plt.figure()
plt.plot(d[:100], label='signal')
plt.plot(d_[:100], label='regenerated signal')
plt.legend()

plt.figure()
plt.psd(d, Fs=raw.info['sfreq'], NFFT=2048)
plt.psd(innovation, Fs=raw.info['sfreq'], NFFT=2048)
plt.psd(d_, Fs=raw.info['sfreq'], NFFT=2048, linestyle='--')
plt.legend(('Signal', 'Innovation', 'Regenerated signal'))
plt.show()
Ejemplo n.º 51
0
import argparse
import sys

# Calculates sampling rate and plots 4 channel specgram of EEG data.csv passed in as in command line arg

eegData = pd.read_csv(sys.argv[1])
#eegData = pd.read_csv('Filtered_EEG3.csv')
time = (eegData['Timestamp (ms)'][eegData.shape[0] - 1] - eegData['Timestamp (ms)'][0]) / 1000
samples = eegData.shape[0] - 1
samplingRate = samples/time
print('estimated sampling rate: ',  samplingRate)


plt.figure()
plt.subplot(2,2,1)
plt.psd(eegData['Electrode 1'], Fs=samplingRate)
plt.xlim(0,65)
plt.ylim(-60,20)

plt.subplot(2,2,2)
plt.psd(eegData['Electrode 2'], Fs=samplingRate)
plt.xlim(0,65)
plt.ylim(-60,20)
plt.ylabel('')

plt.subplot(2,2,3)
plt.psd(eegData['Electrode 3'], Fs=samplingRate)
plt.xlim(0,65)
plt.ylim(-60,20)
plt.ylabel('')
            origin='upper',
            #vmax=fft_vmax,
            #vmin=fft_vmin,
            )

#images's PSD
ax = fig.add_subplot(235)
ax.set_title("PSD")
imgWidth=originalImage.shape[1] #np.matrix has matrix-like dimensions, height first
assert imgWidth % 2 == 0, "Image width must be even"

#frames per second == 25; however the ticmarks won't fit on X axis, using default Fs=2
freq = originalImage.size * 25
psdShape = lambda m: np.reshape(m, m.size)

plt.psd(psdShape(originalImage), NFFT=imgWidth, label="original")
plt.psd(psdShape(encodedImage), NFFT=imgWidth, label="encoded")
plt.psd(psdShape(encodedImage-originalImage), NFFT=imgWidth, label="difference")
#filtered images' PSD
plt.psd(psdShape(encodedLaplace1), NFFT=imgWidth, label='laplace1')
#plt.psd(psdShape(encodedCanny), label='canny')
#plt.psd(psdShape(encodedSobel), label='sobel')
leg = ax.legend(('original', 'encoded', 'difference', 'laplace1'),
           'upper center', shadow=True)

ax = fig.add_subplot(236)
ax.set_title("CannySharpen(Laplace1(enc))")
imshow(encodedCannyLaplace1, interpolation='nearest', 
            cmap=cm.gray,
            origin='upper',
            #vmax=spatial_vmax,
Ejemplo n.º 53
0
def do_simulated_regular():

    # first generate some data assuming a CARMA(5,3) process on a uniform grid
    sigmay = 2.3  # dispersion in lightcurve
    p = 5  # order of AR polynomial
    qpo_width = np.array([1.0/100.0, 1.0/100.0, 1.0/500.0])
    qpo_cent = np.array([1.0/5.0, 1.0/50.0])
    ar_roots = cm.get_ar_roots(qpo_width, qpo_cent)
    ma_coefs = np.zeros(p)
    ma_coefs[0] = 1.0
    ma_coefs[1] = 4.5
    ma_coefs[2] = 1.25
    sigsqr = sigmay ** 2 / cm.carma_variance(1.0, ar_roots, ma_coefs=ma_coefs)

    ny = 1028
    time = np.arange(0.0, ny)
    y0 = cm.carma_process(time, sigsqr, ar_roots, ma_coefs=ma_coefs)

    ysig = np.ones(ny) * np.sqrt(1e-2)
    # ysig = np.ones(ny) * np.sqrt(1e-6)

    y = y0 + ysig * np.random.standard_normal(ny)

    froot = base_dir + 'plots/car5_regular_'

    plt.subplot(111)
    plt.plot(time, y0, 'k-')
    plt.plot(time, y, '.')
    plt.xlim(time.min(), time.max())
    plt.xlabel('Time')
    plt.ylabel('CARMA(5,3) Process')
    plt.savefig(froot + 'tseries.eps')

    ar_coef = np.poly(ar_roots)

    print 'Getting maximum-likelihood estimates...'

    carma_model = cm.CarmaModel(time, y, ysig)
    pmax = 7
    MAP, pqlist, AIC_list = carma_model.choose_order(pmax, njobs=-1)

    # convert lists to a numpy arrays, easier to manipulate
    pqarray = np.array(pqlist)
    pmodels = pqarray[:, 0]
    qmodels = pqarray[:, 1]
    AICc = np.array(AIC_list)

    plt.clf()
    plt.subplot(111)
    for i in xrange(qmodels.max()+1):
        plt.plot(pmodels[qmodels == i], AICc[qmodels == i], 's-', label='q=' + str(i), lw=2)
    plt.legend()
    plt.xlabel('p')
    plt.ylabel('AICc(p,q)')
    plt.xlim(0, pmodels.max() + 1)
    plt.savefig(froot + 'aic.eps')
    plt.close()

    nsamples = 50000
    carma_sample = carma_model.run_mcmc(nsamples)
    carma_sample.add_mle(MAP)

    plt.subplot(111)
    pgram, freq = plt.psd(y)
    plt.clf()

    ax = plt.subplot(111)
    print 'Getting bounds on PSD...'
    psd_low, psd_hi, psd_mid, frequencies = carma_sample.plot_power_spectrum(percentile=95.0, sp=ax, doShow=False,
                                                                             color='SkyBlue', nsamples=5000)
    psd_mle = cm.power_spectrum(frequencies, carma_sample.mle['sigma'], carma_sample.mle['ar_coefs'],
                                ma_coefs=np.atleast_1d(carma_sample.mle['ma_coefs']))
    ax.loglog(freq / 2.0, pgram, 'o', color='DarkOrange')
    psd = cm.power_spectrum(frequencies, np.sqrt(sigsqr), ar_coef, ma_coefs=ma_coefs)
    ax.loglog(frequencies, psd, 'k', lw=2)
    ax.loglog(frequencies, psd_mle, '--b', lw=2)
    noise_level = 2.0 * np.mean(ysig ** 2)
    ax.loglog(frequencies, np.ones(frequencies.size) * noise_level, color='grey', lw=2)
    ax.set_ylim(bottom=noise_level / 100.0)
    ax.annotate("Measurement Noise Level", (3.0 * ax.get_xlim()[0], noise_level / 2.5))
    ax.set_xlabel('Frequency')
    ax.set_ylabel('Power Spectral Density')

    plt.savefig(froot + 'psd.eps')

    print 'Assessing the fit quality...'
    carma_sample.assess_fit(doShow=False)
    plt.savefig(froot + 'fit_quality.eps')

    pfile = open(data_dir + froot + '.pickle', 'wb')
    cPickle.dump(carma_sample, pfile)
    pfile.close()
Ejemplo n.º 54
0
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab

fs = 1000
t = np.linspace(0, 0.3, 301)
A = np.array([2, 8]).reshape(-1, 1)
f = np.array([150, 140]).reshape(-1, 1)
xn = (A * np.exp(2j * np.pi * f * t)).sum(axis=0) + 5 * np.random.randn(*t.shape)

yticks = np.arange(-50, 30, 10)
xticks = np.arange(-500, 550, 100)
plt.subplots_adjust(hspace=0.45, wspace=0.3)
ax = plt.subplot(1, 2, 1)

plt.psd(xn, NFFT=301, Fs=fs, window=mlab.window_none, pad_to=1024,
        scale_by_freq=True)
plt.title('Periodogram')
plt.yticks(yticks)
plt.xticks(xticks)
plt.grid(True)
plt.xlim(-500, 500)

plt.subplot(1, 2, 2, sharex=ax, sharey=ax)
plt.psd(xn, NFFT=150, Fs=fs, window=mlab.window_none, noverlap=75, pad_to=512,
        scale_by_freq=True)
plt.title('Welch')
plt.xticks(xticks)
plt.yticks(yticks)
plt.ylabel('')
plt.grid(True)
plt.xlim(-500, 500)
import pylab as pyl
array  = t2.copy()
length = len(array)
# Create time data for x axis based on array length
x = sy.linspace(0.00001, length*0.00001, num=length)

# Do FFT analysis of array
FFT = sy.fft(array)
# Getting the related frequencies
freqs = syfp.fftfreq(array.size, d=(x[1]-x[0]))

# Create subplot windows and show plot
pyl.subplot(211)
pyl.plot(x, array)
pyl.subplot(212)
pyl.plot(freqs, sy.log10(FFT), 'x')
pyl.show()

import matplotlib.pylab as plt
import matplotlib.mlab as mlb

Fs = 1./(d[1]- d[0])  # sampling frequency
plt.psd(array, Fs=Fs, detrend=mlb.detrend_mean) 
plt.show()

plot(X, 'b-')

dataF = np.abs(np.fft.fftshift(np.fft.fft(t2)))
plot(dataF, 'b-')

Ejemplo n.º 56
0
np.random.seed(19680801)


dt = 0.01
t = np.arange(0, 10, dt)
nse = np.random.randn(len(t))
r = np.exp(-t/0.05)

cnse = np.convolve(nse, r)*dt
cnse = cnse[:len(t)]
s = 0.1*np.sin(2*np.pi*t) + cnse

plt.subplot(211)
plt.plot(t, s)
plt.subplot(212)
plt.psd(s, 512, 1/dt)

plt.show()

###############################################################################
# Compare this with the equivalent Matlab code to accomplish the same thing::
#
#     dt = 0.01;
#     t = [0:dt:10];
#     nse = randn(size(t));
#     r = exp(-t/0.05);
#     cnse = conv(nse, r)*dt;
#     cnse = cnse(1:length(t));
#     s = 0.1*sin(2*pi*t) + cnse;
#
#     subplot(211)
Ejemplo n.º 57
0
def plot_joint_psd(tdms_list, ti_list, fft_len=256, fft_scale='db', fi=None,
		bar=False, figure_size=(8,6), output_fn=None, figure_resolution=80):

	plt.figure(figsize=figure_size, dpi=figure_resolution)

	psd_len = fft_len / 2 + 1
#	data_list = np.zeros((len(tdms_list), psd_len))
	data_dict = dict()

	for tdms, ti, i, in zip(tdms_list, ti_list, xrange(len(tdms_list))):
		ti = (int(round(ti[0] * tdms.fs)), int(round((ti[1] * tdms.fs))))
		t = tdms.wav.__getslice__(*ti)
		data, f_list = plt.psd(t, fft_len, Fs=tdms.fs)

		if fft_scale == 'db':
			data = 10 * np.log10(data)

		if data_dict.has_key(tdms.group_id):
			data_dict[tdms.group_id] = \
					np.concatenate((data_dict[tdms.group_id], [data]))
		else:
			data_dict[tdms.group_id] = np.array([data])

	if fi != None:
		fi = (fi[0] if fi[0] is not None else 0,
				fi[1] if fi[1] is not None else tdms_list[0].fs / 2)
		f_start_idx = None
		f_stop_idx = None
		for f, i in zip(f_list, xrange(len(f_list))):
			if f_start_idx is None and f > fi[0]:
				f_start_idx = i - 1
			if f_stop_idx is None and f > fi[1]:
				f_stop_idx = i - 1
		f_list = f_list[f_start_idx:f_stop_idx]

	plt.cla()
	if fft_scale == 'db':
		plt.ylabel(r'Amplitude $\frac{mV^2}{Hz}$ ($dB$)')
	elif fft_scale == 'linear':
		plt.ylabel(r'Amplitude $\frac{mV^2}{Hz}$')
	plt.xlabel(u'Frequency ($Hz$)')

	xticks_list = ([], [])
	for key, data, i in zip(data_dict.iterkeys(), data_dict.itervalues(), xrange(len(data_dict))):
		data = data[:,f_start_idx:f_stop_idx]
		if bar:
			data_sum = data.sum(1)
			plt.bar(i+0.1, data_sum.mean(), yerr=data_sum.std(),
					error_kw=dict(elinewidth=6, capsize=10, ecolor='black'), color='white')
			xticks_list[0].append(i+0.5)
			xticks_list[1].append(key)
		else:
			plt.errorbar(f_list, data.mean(0), data.std(0), label=str(key), ecolor='b')
	if bar:
		plt.xlim((0, len(data_dict)))
		plt.xticks(*xticks_list)
		print xticks_list
	else:
		plt.legend()
		plt.xlim((min(f_list), max(f_list)))
	if output_fn is not None:
		plt.savefig(output_fn)
	plt.show()
Ejemplo n.º 58
0
def problem_6():
    """
    This function uses the HP filter created before to filter the data
    before repeating the same process as in problem 5. We will plot both the
    trend and cycle produced by the HP filter.

    Inputs:
        None

    Outputs:
        This function automatically generates plots of the spectra for the
        filtered data.

    Notes:
        This function uses the function hp_filter found in the file hp_filter.py
    """
    gdpT, gdpC = hp.hp_filter(gdp)
    cpiT, cpiC = hp.hp_filter(cpi)
    consT, consC = hp.hp_filter(cons)
    invT, invC = hp.hp_filter(inv)

    plt.figure()
    plt.psd(gdpT)
    plt.title('GDP Trend Spectrum')
    plt.show()

    plt.figure()
    plt.psd(cpiT)
    plt.title('CPI Trend Spectrum')
    plt.show()

    plt.figure()
    plt.psd(consT)
    plt.title('Consumption Trend Spectrum')
    plt.show()

    plt.figure()
    plt.psd(invT)
    plt.title('Investment Trend Spectrum')
    plt.show()

    plt.figure()
    plt.psd(gdpC)
    plt.title('GDP Cycle Spectrum')
    plt.show()

    plt.figure()
    plt.psd(cpiC)
    plt.title('CPI Cycle Spectrum')
    plt.show()

    plt.figure()
    plt.psd(consC)
    plt.title('Consumption Cycle Spectrum')
    plt.show()

    plt.figure()
    plt.psd(invC)
    plt.title('Investment Cycle Spectrum')
    plt.show()
Ejemplo n.º 59
0
def do_XRB():
    sname = 'XTE 1550-564'
    data_file = data_dir + 'LC_B_3.35-12.99keV_1div128s_total.fits'
    data = fits.open(data_file)[1].data
    tsecs = data['TIME']
    flux = data['RATE']
    dt = tsecs[1:] - tsecs[:-1]
    gap = np.where(dt > 1)[0]

    tsecs = tsecs[gap[0]+1:gap[1]][:40000]
    flux = flux[gap[0]+1:gap[1]][:40000]

    tsecs0 = tsecs.copy()
    flux0 = flux.copy()

    ndown_sample = 4000
    idx = np.random.permutation(len(flux0))[:ndown_sample]
    idx.sort()
    tsecs = tsecs[idx]
    logflux = np.log(flux[idx])
    ferr = np.sqrt(flux[idx])
    logf_err = ferr / flux[idx]

    # # high-frequency sampling lightcurve
    # high_cutoff = 10000
    # tsecs_high = tsecs[:high_cutoff]
    # logflux_high = np.log(flux[:high_cutoff])
    # ferr_high = np.sqrt(flux[:high_cutoff])
    # logferr_high = ferr_high / flux[:high_cutoff]
    #
    # ndown_sample_high = 1000
    # idx_high = np.random.permutation(len(logflux_high))[:ndown_sample_high]
    # idx_high.sort()
    #
    # # middle-frequency sampling lightcurve
    # tsecs_mid = tsecs[high_cutoff:]
    # logflux_mid = np.log(flux[high_cutoff:])
    # ferr_mid = np.sqrt(flux[high_cutoff:])
    # logf_err_mid = ferr_mid / flux[high_cutoff:]
    # # logf_err = np.sqrt(0.00018002985939372774 / 2.0 / np.median(dt))  # eyeballed from periodogram
    # # logf_err = np.ones(len(tsecs)) * logf_err
    #
    # ndown_sample_mid = 4000 - ndown_sample_high
    # idx_mid = np.random.permutation(len(logflux_mid))[:ndown_sample_mid]
    # idx_mid.sort()
    #
    # tsecs = np.concatenate((tsecs_high[idx_high], tsecs_mid[idx_mid]))
    # logflux = np.concatenate((logflux_high[idx_high], logflux_mid[idx_mid]))
    # logf_err = np.concatenate((logferr_high[idx_high], logf_err_mid[idx_mid]))
    # idx = np.concatenate((idx_high, idx_mid))

    plt.plot(tsecs0, np.log(flux0))
    plt.errorbar(tsecs, logflux, yerr=logf_err)
    print 'Measurement errors are', np.mean(logf_err) / np.std(logflux) * 100, ' % of observed standard deviation.'
    print 'Mean time spacing:', np.mean(tsecs[1:] - tsecs[:-1])
    # print 'Mean time spacing for high-frequency sampling:', np.mean(tsecs_high[idx_high[1:]]-tsecs_high[idx_high[:-1]])
    # print 'Mean time spacing for low-frequency sampling:', np.mean(tsecs_mid[idx_mid[1:]]-tsecs_mid[idx_mid[:-1]])
    plt.show()
    plt.clf()
    plt.plot(tsecs, logflux)
    plt.show()
    plt.hist(logflux, bins=100, normed=True)
    plt.xlabel('log Flux')
    print 'Standard deviation in lightcurve:', np.std(logflux)
    print 'Typical measurement error:', np.mean(logf_err)
    plt.show()
    plt.clf()
    assert np.all(np.isfinite(tsecs))
    assert np.all(np.isfinite(logflux))
    assert np.all(np.isfinite(logf_err))
    dt_idx = tsecs[1:] - tsecs[:-1]
    assert np.all(dt_idx > 0)

    load_pickle = True
    if load_pickle:
        carma_sample = cPickle.load(open(data_dir + 'xte1550_p5q4.pickle', 'rb'))
    else:
        carma_sample = make_sampler_plots(tsecs, logflux, logf_err, 7, 'xte1550_', sname, njobs=7)

    plt.subplot(111)
    pgram, freq = plt.psd(np.log(flux0), 512, 2.0 / np.median(dt), detrend=detrend_mean)
    plt.clf()

    ax = plt.subplot(111)
    print 'Getting bounds on PSD...'
    psd_low, psd_hi, psd_mid, frequencies = carma_sample.plot_power_spectrum(percentile=95.0, sp=ax, doShow=False,
                                                                             color='SkyBlue', nsamples=5000)
    psd_mle = cm.power_spectrum(frequencies, carma_sample.mle['sigma'], carma_sample.mle['ar_coefs'],
                                ma_coefs=np.atleast_1d(carma_sample.mle['ma_coefs']))
    ax.loglog(freq / 2, pgram, 'o', color='DarkOrange')
    nyquist_freq = np.mean(0.5 / dt_idx)
    nyquist_idx = np.where(frequencies <= nyquist_freq)[0]
    ax.loglog(frequencies, psd_mle, '--b', lw=2)
    # noise_level = 2.0 * np.mean(dt_idx) * np.mean(logf_err ** 2)
    noise_level0 = 0.00018002985939372774 / 2.0  # scale the noise level seen in the PSD
    noise_level = noise_level0 * (0.5 / np.median(dt)) / nyquist_freq
    ax.loglog(frequencies[nyquist_idx], np.ones(len(nyquist_idx)) * noise_level, color='grey', lw=2)
    # ax.loglog(frequencies, np.ones(len(frequencies)) * noise_level0)
    ax.set_ylim(bottom=noise_level0 / 10.0)
    ax.annotate("Measurement Noise Level", (3.0 * ax.get_xlim()[0], noise_level / 1.5))
    ax.set_xlabel('Frequency [Hz]')
    ax.set_ylabel('Power Spectral Density [fraction$^2$ Hz$^{-1}$]')
    plt.title(sname)

    plt.savefig(base_dir + 'plots/xte1550_psd.eps')

    # plot the standardized residuals and compare with the standard normal
    plt.clf()
    kfilter, mu = carma_sample.makeKalmanFilter('map')
    kfilter.Filter()
    kmean = np.asarray(kfilter.GetMean())
    kvar = np.asarray(kfilter.GetVar())
    standardized_residuals = (carma_sample.y - mu - kmean) / np.sqrt(kvar)
    plt.hist(standardized_residuals, bins=100, normed=True, color='SkyBlue', histtype='stepfilled')
    plt.xlabel('Standardized Residuals')
    plt.ylabel('Probability Distribution')
    xlim = plt.xlim()
    xvalues = np.linspace(xlim[0], xlim[1], num=100)
    expected_pdf = np.exp(-0.5 * xvalues ** 2) / np.sqrt(2.0 * np.pi)
    plt.plot(xvalues, expected_pdf, 'k', lw=3)
    plt.title(sname)
    plt.savefig(base_dir + 'plots/xte1550_resid_dist.eps')

    # plot the autocorrelation function of the residuals and compare with the 95% confidence intervals for white
    # noise
    plt.clf()
    maxlag = 50
    wnoise_upper = 1.96 / np.sqrt(carma_sample.time.size)
    wnoise_lower = -1.96 / np.sqrt(carma_sample.time.size)
    plt.fill_between([0, maxlag], wnoise_upper, wnoise_lower, facecolor='grey')
    lags, acf, not_needed1, not_needed2 = plt.acorr(standardized_residuals, maxlags=maxlag, lw=3)
    plt.xlim(0, maxlag)
    plt.ylim(-0.2, 0.2)
    plt.xlabel('Time Lag')
    plt.ylabel('ACF of Residuals')
    plt.savefig(base_dir + 'plots/xte1550_resid_acf.eps')

    # plot the autocorrelation function of the squared residuals and compare with the 95% confidence intervals for
    # white noise
    plt.clf()
    squared_residuals = standardized_residuals ** 2
    wnoise_upper = 1.96 / np.sqrt(carma_sample.time.size)
    wnoise_lower = -1.96 / np.sqrt(carma_sample.time.size)
    plt.fill_between([0, maxlag], wnoise_upper, wnoise_lower, facecolor='grey')
    lags, acf, not_needed1, not_needed2 = plt.acorr(squared_residuals - squared_residuals.mean(), maxlags=maxlag,
                                                    lw=3)
    plt.xlim(0, maxlag)
    plt.ylim(-0.2, 0.2)
    plt.xlabel('Time Lag')
    plt.ylabel('ACF of Sqrd. Resid.')
    plt.savefig(base_dir + 'plots/xte1550_sqrres_acf.eps')

    if not load_pickle:
        pfile = open(data_dir + 'xte1550_nonoise.pickle', 'wb')
        cPickle.dump(carma_sample, pfile)
        pfile.close()