def graphSvmLatency(predictions, output, rawTimeDomainData, fftTimes): #use a n fold accuracy type thing, so that n-1 classifiers estimate the output at each point. Then plot that value #vs the time domain data to see when we transition rawTimes = [1000 * float(x) / constants.samplesPerSecond for x in range(len(rawTimeDomainData))] figure = pylab.figure(figsize=(20, 12)) pylab.subplot(211) pylab.plot(rawTimes, rawTimeDomainData) pylab.plot(fftTimes, map(lambda x: x*0.1+0.1, output), '-o') pylab.plot(fftTimes, map(lambda x: x*0.1 + 0.3, predictions), '-o') pylab.grid(True) pylab.subplot(212) pylab.specgram( rawTimeDomainData, NFFT = constants.windowSize, Fs = constants.samplesPerSecond, noverlap = constants.samplesPerSecond / constants.transformsPerSecond, sides = 'onesided', detrend = pylab.detrend_mean ) pylab.grid(True) pylab.show()
def plot_example_spectrograms(example,rate): """ This function creates a figure with spectrogram sublpots to of the four sleep examples. (Recall row 0 is REM, rows 1-3 are NREM stages 1, 2 and 3/4) """ sleep_stages = ['REM sleep', 'Stage 1 NREM sleep', 'Stage 2 NREM sleep', 'Stage 3 and 4 NREM sleep']; plt.figure() ###YOUR CODE HERE for i in range( len(example[:,0]) ): # plot every sleep stage in a separate plot plt.subplot(2,2,i+1) # plot spectogram plt.specgram(example[i, :],NFFT=512,Fs=rate) # add legend plt.xlabel('Time (Seconds)') plt.ylabel('Frequency (Hz)') plt.title( 'Spectogram ' + sleep_stages[i] ) plt.ylim(0,60) plt.xlim(0,290) return
def plot_specgram(in_array): pt.figure() pylab.specgram(in_array, NFFT=256, noverlap=224) pt.xlabel('SNP #') pt.ylabel('f') pt.suptitle('Spectrogram') pt.title('Color is |A|^2')
def plotSpectrogram(timeDomainData): import constants pylab.specgram( timeDomainData, NFFT = constants.windowSize, Fs = constants.samplesPerSecond, noverlap = constants.samplesPerSecond / constants.transformsPerSecond, sides = 'onesided', detrend = pylab.detrend_mean ) pylab.show()
def plot_example_spectrograms(examples,rate): """ This function creates a figure with spectrogram sublpots to of the four sleep examples. (Recall row 0 is REM, rows 1-3 are NREM stages 1, 2 and 3/4) """ plt.figure() ###YOUR CODE HERE # plot the spectrogram, for each of the four samples for example in examples: plt.specgram(example, NFFT=256, Fs= rate) return NotImplementedError
def plot_example_spectrograms(example,rate): """ This function creates a figure with spectrogram sublpots to of the four sleep examples. (Recall row 0 is REM, rows 1-3 are NREM stages 1, 2 and 3/4) """ plt.figure() ###YOUR CODE HERE y_lim = 40 plt.title('Spectrogram') bin_space = 512 #30*rate # A typical window size is 30 seconds plt.subplot(411) plt.specgram(examples[0]/np.sum(examples[0]),NFFT=bin_space,Fs=srate) plt.ylim((0,y_lim)) plt.title ('REM') plt.subplot(412) plt.title ('Stage 1 NREM') plt.specgram(examples[1]/np.sum(examples[1]),NFFT=bin_space,Fs=srate) plt.ylim((0,y_lim)) plt.subplot(413) plt.title ('Stage 2 NREM') plt.specgram(examples[2]/np.sum(examples[2]),NFFT=bin_space,Fs=srate) plt.ylim((0,y_lim)) plt.subplot(414) plt.title ('Stage 3/4 NREM') plt.specgram(examples[3]/np.sum(examples[3]),NFFT=bin_space,Fs=srate) plt.ylim((0,y_lim)) plt.show(); return
def graph_spectrogram(self, filename): wav_file = f"{filename}" try: home_folder = os.path.dirname(os.path.realpath(__file__)).replace( '/hello', '') sound_info, frame_rate = self.get_wav_info(home_folder + wav_file) pylab.figure(figsize=(4, 3)) pylab.specgram(sound_info, Fs=frame_rate) output_file = f"{home_folder+filename.replace('media/documents', 'media/documents/output')}.png" pylab.savefig(output_file) pylab.close() except Exception as e: print(e) return
def graph_spectrogram(wav_file): sound_info, frame_rate = get_wav_info(wav_file) pylab.figure(num=None, figsize=(19, 12)) pylab.subplot(111) pylab.title('spectrogram of %r' % wav_file) spectrum = pylab.specgram(sound_info, Fs=frame_rate, NFFT=8192) pylab.savefig(wav_file + '_spectrogram.png')
def plot_example_spectrograms(example,rate): """ This function creates a figure with spectrogram sublpots to of the four sleep examples. (Recall row 0 is REM, rows 1-3 are NREM stages 1, 2 and 3/4) """ plt.figure() ###YOUR CODE HERE # for i in range(0, len(example)): # plt.subplot(2,2,i+1) # plt.title(rowname[i]) # Pxx, freqs, bins, im = plt.specgram(example[i],NFFT=256,Fs=rate) # plt.ylim(0,70) # plt.xlabel('Time (Seconds)') # plt.ylabel('Frequency (Hz)') # # return # for i in range(0, len(example)): # plt.subplot(2,2,i+1) plt.title(rowname[i]) Pxx, freqs, bins, im = plt.specgram(example,NFFT=512,Fs=rate) plt.ylim(0,70) plt.xlabel('Time (Seconds)') plt.ylabel('Frequency (Hz)') return
def plot_spectrograms(data, rate, subject, condition): """ Creates spectrogram subplots for all 9 channels """ fig = plt.figure() # common title fname = 'Spectrogram - '+'Subject #'+subject+' '+condition+' Dataset' fig.suptitle(fname, fontsize=14, fontweight='bold') # common ylabel fig.text(0.06, 0.5, 'ylabel', ha='center', va='center', rotation='vertical', fontsize=14, fontweight='bold') # use this to stack EEG, EOG, EMG on top of each other sub_order = [1,4,7,10,2,5,3,6,9] for ch in range(0, len(data)): plt.subplot(4, 3, sub_order[ch]) plt.subplots_adjust(hspace=.6) # adds space between subplots plt.title(channel_name[ch]) Pxx, freqs, bins, im = plt.specgram(data[ch],NFFT=512,Fs=rate) plt.ylim(0,70) plt.xlabel('Time (Seconds)') plt.ylabel('Frequency (Hz)') #fig.savefig(fname+'.pdf', format='pdf') buggy resolution problem return
def plot_hypnogram(eeg, channel, stages, srate, subject, condition): """ This function takes the eeg, the stages and sampling rate and draws a hypnogram over the spectrogram of the data. """ fig,ax1 = plt.subplots() #Needed for the multiple y-axes #Use the specgram function to draw the spectrogram as usual Pxx, freqs, bins, im = plt.specgram(eeg,NFFT=512,Fs=srate) #Label your x and y axes and set the y limits for the spectrogram plt.ylim(0,30) plt.xlabel('Time (Seconds)') plt.ylabel('Frequency (Hz)') ax2 = ax1.twinx() #Necessary for multiple y-axes #Use ax2.plot to draw the hypnogram. Be sure your x values are in seconds #HINT: Use drawstyle='steps' to allow step functions in your plot times = np.arange(0,len(stages)*30, 30) ax2.plot(times, stages, drawstyle='steps', color='blue') #Label your right y-axis and change the text color to match your plot ax2.set_ylabel('NREM Stage',color='b') #Set the limits for the y-axis plt.ylim(0.0,7.0) #Only display the possible values for the stages ax2.set_yticks(np.arange(1,7)) #Change the left axis tick color to match your plot for t1 in ax2.get_yticklabels(): t1.set_color('b') #Title your plot fname = ('Hypnogram - '+ 'Subject #' +subject+' '+condition+' - '+str(channel_name[channel])) plt.title(fname, fontsize=14, fontweight='bold') # plt.title('Hypnogram - '+ 'Subject #'+subject+' '+condition+' - ' # +str(channel_name[channel]), # fontsize=14, fontweight='bold') fig.savefig(fname+'.png', format='png') return
def specgram(trace, NFFT=256, noverlap=128): # ----------------------------------------------------------------------------- ''' Returns the spectrogram of the trace ''' from matplotlib.pylab import specgram from matplotlib.pylab import detrend_mean dt = (trace._time[1]-trace._time[0]).item() Fs = 1000./dt return specgram(trace._data, NFFT=NFFT, Fs=Fs, detrend=detrend_mean, noverlap=noverlap)
def _create_histogram(self, example): (Pxx, freqs, bins, im) = plt.specgram(example) plt.clf() # specgram plots so we clear img = features.as_img(Pxx) patches = features.get_slices(img, self.n_patches) patch_counts = [0] * self.patch_types for patch in patches: patch_type = self.patch_clusterer.predict(patch)[0] patch_counts[patch_type] += 1 return patch_counts
def display_spectrogram_for_one_audio_file(local_audio_file): ''' Display spectrogram for a local audio file using soundfile. Parameters ---------- local_audio_file : string Local location to the audio file to be displayed. Returns ------- None Raises ------ DLPyError If anything goes wrong, it complains and prints the appropriate message. ''' try: import soundfile as sf import matplotlib.pylab as plt except (ModuleNotFoundError, ImportError): raise DLPyError('cannot import soundfile') if os.path.isdir(local_audio_file): local_audio_file_real = random_file_from_dir(local_audio_file) else: local_audio_file_real = local_audio_file print('File location: {}'.format(local_audio_file_real)) data, sampling_rate = sf.read(local_audio_file_real) plt.specgram(data, Fs=sampling_rate) # add axis labels plt.ylabel('Frequency [Hz]') plt.xlabel('Time [sec]')
def plot_hypnogram(eeg, stages, srate): """ This function takes the eeg, the stages and sampling rate and draws a hypnogram over the spectrogram of the data. """ fig,ax1 = plt.subplots() #Needed for the multiple y-axes #Use the specgram function to draw the spectrogram as usual y_lim = 40; plt.specgram(eeg/np.sum(eeg),NFFT=512,Fs=srate) #Label your x and y axes and set the y limits for the spectrogram ax1.set_ylim((0,y_lim)) ax1.set_xlim((0,len(eeg)/srate)) plt.title ('Hypnogram') ax1.set_xlabel('Time in Seconds') ax1.set_ylabel('Frequency in Hz') ax2 = ax1.twinx() #Necessary for multiple y-axes #Use ax2.plot to draw the hypnogram. Be sure your x values are in seconds #HINT: Use drawstyle='steps' to allow step functions in your plot ax2.plot(np.arange(0,len(stages))*30,stages,drawstyle='steps') #Label your right y-axis and change the text color to match your plot ax2.set_ylabel('NREM Stages',color='b') #Set the limits for the y-axis ax2.set_ylim(0.5,3.5) ax2.set_xlim((0,len(eeg)/srate)) #Only display the possible values for the stages ax2.set_yticks(np.arange(1,4)) #Change the left axis tick color to match your plot for t1 in ax2.get_yticklabels(): t1.set_color('b')
def ReadAIFF(file): wave = aifc.open(file, 'r') nFrames = wave.getnframes() wave_str = wave.readframes(nFrames) wave.close() wave_data = np.fromstring(wave_str, dtype=np.short).byteswap() # wave_data = 1. / nFrames * np.abs(scipy.fft(wave_data)) wave_data_gram = pylab.specgram(wave_data, NFFT=NFFT, noverlap=noverlap)[0] # wave_data_gram = np.log(1 + log_scale * wave_data_gram) print wave_data_gram wave_data_gram = wave_data_gram.reshape(shape[0] * shape[1]) w_max = max(wave_data_gram) w_min = min(wave_data_gram) wave_data_gram = (wave_data_gram - w_min) / (w_max - w_min) return wave_data_gram
def spectrogram(self, path): sr, d = wavfile.read(path) wavefile = wave.open(path, 'r') nchannels = wavefile.getnchannels() if nchannels == 1: print("channels should be 1: %d" % nchannels) channels = [ np.array(d[:]) ] s, f, t, im = pylab.specgram(channels[0], Fs=sr) im.figure.gca().set_axis_off() return im.figure else: print("channels should be 2: %d" % nchannels) channels = [ np.array(d[:, 0]), np.array(d[:, 1]) ] s, f, t, im = pylab.specgram(channels[1], Fs=sr) im.figure.gca().set_axis_off() return im.figure
def plot_hypnogram(eeg, stages, srate, end_time, title): """ This function takes the eeg, the stages and sampling rate and draws a hypnogram over the spectrogram of the data. """ # eeg = eeg[0:1+3600*srate] fig,ax1 = plt.subplots() #Needed for the multiple y-axes #Use the specgram function to draw the spectrogram as usual Pxx, freqs, bins, im = plt.specgram(eeg, NFFT=1380, Fs=srate) #Label your x and y axes and set the y limits for the spectrogram plt.xlabel('Time [seconds]') plt.ylabel('Frequency [Hz]') plt.ylim(0,30) # plt.xlim(0,3600) #plt.axes([0,0,3600,30]) print eeg print stages print '' ax2 = ax1.twinx() #Necessary for multiple y-axes #Use ax2.plot to draw the hypnogram. Be sure your x values are in seconds #HINT: Use drawstyle='steps' to allow step functions in your plot time=np.arange(0, int(len(eeg)/(srate)), 30 ) ax2.plot(time, stages, drawstyle = 'steps') plt.xlim(0, end_time) #Label your right y-axis and change the text color to match your plot ax2.set_ylabel('NREM stage',color='b') #Set the limits for the y-axis plt.ylim(0.5,3.5) #Only display the possible values for the stages ax2.set_yticks(np.arange(1,4)) #Change the left axis tick color to match your plot for t1 in ax2.get_yticklabels(): t1.set_color('b') #Title your plot plt.title(title)
def f(wav_file): sound_info, frame_rate = get_wav_info(wav_file) pylab.figure(num=None, figsize=(19, 12)) pylab.subplot(111) pylab.title('spectrogram of %r' % wav_file) spectrum = pylab.specgram(sound_info, Fs=frame_rate, NFFT=8192) pylab.savefig(wav_file + '_spectrogram.png') freqs = spectrum[1] t = spectrum[2] im = spectrum[3] spectrum = spectrum[0].T avg_freqs = [] for x in range(0, len(t)): sum = (np.dot(spectrum[x], freqs.T)) avg_freqs.append(sum / np.sum(spectrum[x])) return avg_freqs, t
def plot_example_spectrograms(example, rate): """ This function creates a figure with spectrogram sublpots to of the four sleep examples. (Recall row 0 is REM, rows 1-3 are NREM stages 1, 2 and 3/4) """ plt.figure() plt.specgram(example[0], 1024, rate) plt.specgram(example[1], 1024, rate) plt.specgram(example[2], 1024, rate) plt.specgram(example[3], 1024, rate) plt.ylim(0, 50) plt.xlabel('Time(S)') plt.ylabel('Frequency(HZ)') plt.title('spectrpgrams for the different stages') plt.show() return
def plot_example_spectrograms(example,rate): """ This function creates a figure with spectrogram sublpots to of the four sleep examples. (Recall row 0 is REM, rows 1-3 are NREM stages 1, 2 and 3/4) """ plt.figure() ###YOUR CODE HERE for idx in range(len(example)): plt.subplot(2,2,idx+1) Pxx, freqs, bins, im = plt.specgram(example[idx], NFFT=1380, Fs=rate) print 'bin' +str(idx) +': ' +str(len(bins)) plt.xlabel('Time [s]') plt.ylabel('Frequency [Hz]') # plt.title('Spectrogram ' + str(idx+1)) return
def plot_example_spectrograms(example,rate): """ This function creates a figure with spectrogram sublpots to of the four sleep examples. (Recall row 0 is REM, rows 1-3 are NREM stages 1, 2 and 3/4) """ plt.figure() for i in range(4): plt.subplot(2,2,i+1) Pxx, freqs, bins, im = plt.specgram(examples[i,:],NFFT=512,Fs=rate) if i == 0: plt.title('REM example') else: plt.title('NREM ' + str(i) + ' example') plt.ylim((0,40)) plt.xlabel('Time (s)') plt.ylabel('Frequency (Hz)') return bins
def plot_hypnogram(eeg, stages, srate): """ This function takes the eeg, the stages and sampling rate and draws a hypnogram over the spectrogram of the data. """ fig,ax1 = plt.subplots() #Needed for the multiple y-axes #Use the specgram function to draw the spectrogram as usual psd, frequency, bins, im = plt.specgram(eeg, NFFT=512, Fs=srate) #Label your x and y axes and set the y limits for the spectrogram plt.ylim(0,30) plt.xlabel('Time (Seconds)') plt.ylabel('Frequency (Hz)') ax2 = ax1.twinx() #Necessary for multiple y-axes #Use ax2.plot to draw the hypnogram. Be sure your x values are in seconds #HINT: Use drawstyle='steps' to allow step functions in your plot times = np.arange(0,len(stages)*30, 30) ax2.plot(times, stages, drawstyle='steps', linewidth = 2) #Label your right y-axis and change the text color to match your plot ax2.set_ylabel('NREM Stage',color='b') #Set the limits for the y-axis plt.ylim(0.5,3.5) #Set the limits for the y-axis plt.xlim(0,3000) #Only display the possible values for the stages ax2.set_yticks(np.arange(1,4)) #Change the left axis tick color to match your plot for t1 in ax2.get_yticklabels(): t1.set_color('b') #Title your plot plt.title('Hypnogram - Test Data')
def plot_subject_spectrograms(DATA, srate ,title): """ This function creates a figure with spectrogram subplots to all 9 channels """ # xlimit = plt.figure() for i in range(0, 9): plt.subplot(9,1,i+1) plt.subplots_adjust(hspace = 0 ) Pxx, freqs, bins, im = plt.specgram(DATA[i], NFFT=512, Fs=srate, label = 'Chanel') # plt.ylabel('data' +str(i), loc='right') plt.ylim(0,30) plt.xlim(0,len(DATA[i])/srate) if (i==0): plt.title(title) plt.text(0.5, 0.5, 'EEG - channel 1 [C3/A2]') if (i==1): plt.text(0.5, 0.5, 'EEG - channel 2 [O2/A1]') if (i==2): plt.text(0.5, 0.5, 'EEG - channel 8 [C4/A1]') if (i==3): plt.text(0.5, 0.5, 'EEG - channel 9 [O1/A2]') if (i==4): plt.text(0.5, 0.5, 'EOG - channel 3 [ROC/A2]') if (i==5): plt.text(0.5, 0.5, 'EOG - channel 4 [LOC/A1]') if (i==6): plt.text(0.5, 0.5, 'EMG - channel 5 [EMG1]') if (i==7): plt.text(0.5, 0.5, 'EMG - channel 6 [EMG2]') if (i==8): plt.text(0.5, 0.5, 'EMG - channel 7 [EMG3]') plt.xlabel("Time (s)") plt.ylabel("Freqency (Hz)") plt.show() return
plt.title("Transformada de Fourier de signal") plt.xlabel("Frecuencia") plt.ylabel("Transoformada de Fourier") plt.xlim(-650, 650) plt.subplot(2, 1, 2) plt.plot(f_sum, np.abs(fourier_sum)) plt.title("Transformada de Fourier de la suma") plt.xlabel("Frecuencia") plt.ylabel("Transformada de Fourier") plt.xlim(-650, 650) plt.savefig("GarciaCamila_Transformadas.pdf") #Se hace el espectrograma de ambas señales- plt.figure() plt.subplot(2, 1, 1) plt.specgram(y_sum, NFFT=256, Fs=2, Fc=0) plt.title("Espectograma de signal") plt.xlabel("Frecuencia") plt.ylabel("Transformada de Fourier") plt.subplot(2, 1, 2) plt.specgram(y_sig, NFFT=256, Fs=2, Fc=0) plt.title("Espectograma de la suma") plt.xlabel("Frecuencia") plt.ylabel("Transformada de Fourier") plt.savefig("GarciaCamila_Espectrogramas.pdf") #Se almacenan los datos de temblor.txt temblor = np.genfromtxt("temblor.txt", skip_header=4) #Grafico de los datos temblor plt.figure()
low_cut = 40.0 high_cut = 1.0 fs = 250 for i in range(0,2500): data_string = f.readline().strip('\n\0') channel1_data.append(float(data_string)) filter1_data = bandpass_filter(channel1_data, high_cut, low_cut) fft1_data = np.abs(fft(filter1_data,250)) fft1_data = fft1_data[0:50] t = np.arange(0,10,0.004) plt.subplot(2,2,1) plt.title("EEG data from Hiren") plt.plot(t,channel1_data) plt.grid() plt.subplot(2,2,2) plt.title("EEG filtered signal") plt.plot(t,filter1_data) plt.grid() plt.subplot(2,2,3) plt.plot(fft1_data) plt.grid() plt.subplot(2,2,4) Pxx,freq,bins,im = plt.specgram(filter1_data,NFFT=250,Fs=fs) plt.show()
fl = 60 filename = file wavefile = wave.open(path + "/" + dir + "/" + filename, 'r') # open for writing nchannels = wavefile.getnchannels() sample_width = wavefile.getsampwidth() framerate = wavefile.getframerate() numframes = wavefile.getnframes() # get wav_data wav_data = wavefile.readframes(-1) wav_data = np.fromstring(wav_data, 'Int16') Time = np.linspace(0, len(wav_data) / framerate, num=len(wav_data)) pl.figure(1) pl.title('Signal Wave...') pl.plot(Time, wav_data) Fs = framerate pl.figure(2) pl.subplots_adjust(left=0, right=1, bottom=0, top=1) pl.specgram(wav_data, NFFT=1024, Fs=Fs, noverlap=512) pl.axis('off') pl.axis('tight') pl.savefig(out + "/" + dir + '/%s.png' % filename) count += 1 print(dir + ": " + str(count) + " for this round") f = open("/home/ubuntu/src/tf_p27/tensorflow/tensorflow/clouder/log.txt", "a") f.write("%s finished\n" % dir) f.close()
suma += data[k] * np.exp(-2j * np.pi * k * (n * 1.0 / N)) print(suma) amplitudes.append(suma) return np.asarray(amplitudes) res = abs(Fourier(N, data1[:, 1])) #res2 = abs(Fourier(N,data2[:,1])) freq = fftfreq(N, d=dt) sci_res = fft(data1[:, 1]) plt.figure() plt.plot(freq, res) plt.xlim(0, 3000) plt.show() ##graficos de la transformada de Fourier de ambas senales## #print (len(res),len(res2),len(fftfreq(N,d=dt))) #plt.figure() #plt.plot(res) #plt.plot(fftfreq(N,d=dt),res2) #plt.show() ##espectrograma de las senales## plt.figure() plt.subplot(2, 1, 1) plt.specgram(data1[:, 1]) plt.subplot(2, 1, 2) plt.specgram(data2[:, 1]) plt.show()
plt.plot(freq, sigSum_trans, "r") plt.title("Transformada Seniales sumadas") plt.xlabel("Tiempo") plt.ylabel("Senial") plt.subplot(1, 2, 2) plt.plot(freq, sig_trans, "b") plt.title("Transformada Seniales seguidas") plt.xlabel("Tiempo") plt.ylabel("Senial") plt.savefig("PlotTransFourier2.pdf") #Usando el paquete matplotlib.pyplot.specgram (ver: https://matplotlib.org/api/_as_gen/matplotlib.pyplot.specgram.html) haga un espectrograma de las dos seniales. plt.figure() plt.specgram(senialSum, NFFT=256, Fs=100) plt.title("Espectrograma senial sumada") plt.xlabel("t") plt.ylabel("Frecuencia") plt.savefig("specgram1.pdf") plt.figure() plt.specgram(senial, NFFT=256, Fs=100) plt.title("Espectrograma senial seguida") plt.xlabel("t") plt.ylabel("Frecuencia") plt.savefig("specgram2.pdf") #Almacene los datos de temblor.txt. Estos datos son datos reales de una senial sismica temblor = np.genfromtxt("temblor.txt") tTemb = np.linspace(0, 900.01, len(temblor))
# number of sample points that the sliding window overlaps, must be less than NFFT noverlap = 50 xstart = 0 # x axis limits in the plot xend = 21627 # max. length of signal: 21627 sec # plot ax1 = plt.subplot(211) plt.plot(tr.times(), tr.data, linewidth=0.5) plt.xlabel('time [sec]') plt.ylabel('velocity [m/s]') plt.subplot(212, sharex=ax1) plt.title('spectrogram, window length %s pts' % NFFT) Pxx, freqs, bins, im = plt.specgram(tr.data, NFFT=NFFT, Fs=tr.stats.sampling_rate, noverlap=noverlap, cmap=plt.cm.gist_heat) # Pxx is the segments x freqs array of instantaneous power, freqs is # the frequency vector, bins are the centers of the time bins in which # the power is computed, and im is the matplotlib.image.AxesImage instance plt.ylabel('frequency [Hz]') plt.xlabel('time [sec]') plt.ylim(0, 0.2) plt.xlim(xstart, xend) plt.show() # - # The example above uses the `specgram` function of `matplotlib.pylab`. In case of very long signals or signals with a high sampling rate, this is highly recommended due to the high computational effort. In case of shorter time signals, `ObsPy` also offers a `spectrogram` function. An example can be found [here](https://docs.obspy.org/tutorial/code_snippets/plotting_spectrograms.html).
plt.xlabel("frecuencias") plt.ylabel("f(t)") plt.savefig('Fourier_trans.pdf') #Determinacion de las frecuencias principales pos_maxima = np.argmax(s) a = frecuencias[pos_maxima] print("La frecuencia principal de signal.dat es", a) pos_maxima = np.argmax(s1) b = frecuencias1[pos_maxima] print("La frecuencia principal de signalSuma.dat es", b) #6)Creacion del espectograma plt.figure(figsize=[10, 10]) plt.subplot(1, 2, 1) plt.specgram(f, NFFT=256 * 2, Fs=0.3) plt.ylabel("frecuencias") plt.xlabel("t(s)") plt.title("Espectograma de signal.dat") plt.subplot(1, 2, 2) plt.specgram(f1, NFFT=(256 * 2), Fs=0.3) plt.colorbar() plt.ylabel("frecuencias") plt.xlabel("t(s)") plt.title("Espectograma de signalSuma.dat ") plt.savefig("espectograma.pdf") #7 importacion de los datos de temblor.txt datos2 = np.genfromtxt('temblor.txt', skip_header=4) #Como la frecuencia es 100HZ se asume que los datos se toman cada 0.1 segundos periodo = 0.0
def spectrograms(EEG, nfft): (spec1,f,_,_) = plt.specgram(EEG[:,0], NFFT=nfft, Fs=207,noverlap=nfft/2,mode='magnitude') (spec2,f,_,_) = plt.specgram(EEG[:,1], NFFT=nfft, Fs=207,noverlap=nfft/2,mode='magnitude') fs_below_40 = f < 40 return (np.stack((np.log10(spec1[fs_below_40,:]),np.log10(spec2[fs_below_40,:]))).astype(np.float32),f[fs_below_40])
plt.figure() plt.plot(freq, res2, c="black", label="suma") plt.plot(freq, res, c="red", label="separadas") plt.legend(loc="upper right") plt.title("Transformada de Fourier de las senales") plt.xlabel("Frecuencia") plt.ylabel("Densidad de frecuencia") plt.xlim(0, 1000) plt.savefig("Fourier_senales.pdf") ##espectrograma de las senales## plt.figure() plt.title("Espectrogramas de senales") plt.subplot(2, 1, 1) plt.specgram(data1[:, 1]) plt.ylabel("Frecuencia(Hz)") plt.title("Suma senales") plt.subplot(2, 1, 2) plt.specgram(data2[:, 1]) plt.xlabel("tiempo(s)") plt.title("senales individuales") plt.savefig("espectrogramas1.pdf") ##Almacenamiento de datos y grafica de la senal "temblor" en funcion de tiempo## temblor = np.genfromtxt("temblor(1).txt", skip_header=4) Num = len(temblor) dt1 = (0.01)
plt.savefig("TransformadasSenales.png") #Spectogram dt = t[1] - t[0] Fs = int( 1.0 / dt ) #Cantidad de samples por unidad de tiempo. En nuestro caso será el array completo porque los datos son de menos de 1 segundo. Referencias: https://matplotlib.org/gallery/images_contours_and_fields/specgram_demo.html#sphx-glr-gallery-images-contours-and-fields-specgram-demo-py FsS = int(1.0 / dt) plt.figure() plt.subplot(2, 1, 1) plt.title("Espectrograma señal sumada") plt.ylabel("Frecuencia (Hz)") plt.xlabel("Tiempo (s)") plt.specgram( fS, Fs=FsS ) #, noverlap=900) # Referencia: https://matplotlib.org/gallery/images_contours_and_fields/specgram_demo.html#sphx-glr-gallery-images-contours-and-fields-specgram-demo-py plt.subplots_adjust(hspace=0.5) plt.savefig("EspectrogramaSenales.png") plt.subplot(2, 1, 2) plt.title("Espectrograma señal") plt.ylabel("Frecuencia (Hz)") plt.xlabel("Tiempo (s)") plt.specgram( f, Fs=Fs ) # Referencia: https://matplotlib.org/gallery/images_contours_and_fields/specgram_demo.html#sphx-glr-gallery-images-contours-and-fields-specgram-demo-py plt.savefig("EspectrogramaSenales.png") #Señal sismica datosTemblor = np.genfromtxt("temblor.txt", skip_header=4) dt = 1 / 100 #En el documento aparece que la frecuencia de sampleo fue 100Hz. Como Hz son s^-1 siginifica que 1/hz nos dara el dt que existe entre los datos.
########################## #You can put the code that calls the above functions down here if __name__ == "__main__": # plt.close('all') #Closes old plots. #Load the example data eeg1, srate, stage1 = pull_lead1_epoch(0, 0, 5) # plot frequency response plt.figure() Pxx, freqs = p.psd(eeg1,NFFT=512,Fs=srate) # tighter versus default NFFT=256 # plot power spectral density plt.figure() Pxx, freqs, bins, im = plt.specgram(eeg1,NFFT=256,Fs=srate) #plot_example_psds(eeg1,srate) # plot raw EEG time series and observed stages # # plt.figure() # # fig,ax1 = plt.subplots() #Needed for the multiple y-axes # # times = np.arange(0,len(eeg1),1) # plt.plot(times,eeg1) # #plt.plot(times,stage1) #
def draw_specgram(signal, name): # Use specgram. pylab.specgram(signal, NFFT=1024, Fs=128)
# file = open(wave, 'wb+') # file.write(response.read()) # file.close() wave_file = wav.read('english.wav') # print(wave_file[1]) # draw waveform graph of the audio wavefile = wave.open('english.wav', 'r') params = wavefile.getparams() nchannels, sample_width, framerate, numfreams = params[:4] sample_rate, data = wave_file plt.subplot(2, 1, 1) plt.title('Original') plt.plot(data) # set an new data as quiet with only 0.2x of original newdata = data * 0.2 newdata = newdata.astype(numpy.int16) wav.write('silent.wav', sample_rate, newdata) plt.subplot(2, 1, 2) plt.title('Quiet') plt.plot(newdata) plt.show() result = pylab.specgram(data, NFFT=1024, Fs=sample_rate, noverlap=900) pylab.show() # %%
def plot_spectrogram(data, fs, levels=100, sigma=1, perc_low=1, perc_high=99, nfft=1024, noverlap=512): """ Data ------------ data: Quantity array or Numpy ndarray Your time series of voltage values fs: Quantity Sampling rate in Hz Spectrogram parameters ------------ levels: int The number of color levels displayed in the contour plot (spectrogram) sigma: int The standard deviation argument for the gaussian blur perc_low, perc_high: int Out of the powers displayed in your spectrogram, these are the low and high percentiles which mark the low and high ends of your colorbar. E.g., there might be a period in the start of the experiment where the voltage time series shifts abruptly, which wouldappear as a vertical 'bar' of high power in the spectrogram; setting perc_high to a value lower than 100 will make the color bar ignore these higher values (>perc_high), and display the 'hottest' colors as the highest power values other than these (<perc_high), allowing for better visualization of actual data. Similar effects can be accomplished with vmin/vmax args to countourf. nfft: int The number of data points used in each window of the FFT. Argument is directly passed on to matplotlib.specgram(). See the documentation `matplotlib.specgram()` for options. noverlap: int The number of data points that overlap between FFT windows. Argument is directly passed on to matplotlib.specgram(). See the documentation `matplotlib.specgram()` for options. """ plt.rcParams['image.cmap'] = 'jet' spec, freqs, bins, __ = plt.specgram( data, NFFT=nfft, Fs=int(fs), noverlap=noverlap) #gives us time and frequency bins with their power Z = np.flipud(np.log10(spec) * 10) Z = ndi.gaussian_filter(Z, 1) extent = 0, np.amax(bins), freqs[0], freqs[-1] levels = np.linspace(np.percentile(Z, perc_low), np.percentile(Z, perc_high), 100) x1, y1 = np.meshgrid(bins, freqs) plt.ylabel('Frequency (Hz)', fontsize=12) plt.xlabel('Time (seconds)', fontsize=12) plt.suptitle("Spectrogram title", fontsize=15, y=.94) plt.contourf(x1, list(reversed(y1)), Z, vmin=None, vmax=None, extent=extent, levels=levels) plt.colorbar() plt.axis('auto') plt.show()
A = 1 B = 10. #Hz Fs = 100 #Hz tau = 100 #s N = tau * Fs beta = B / Fs / N n = np.arange(N + 1) x = np.cos(np.pi * beta * n**2) plt.plot(x) plt.show() plt.figure(2) pylab.specgram(x, NFFT=1024, Fs=Fs, noverlap=900) #, cmap=pylab.cm.gist_heat) plt.show() input_array = x #np.random.rand(10000000) # instantiate PyAudio (1) p = pyaudio.PyAudio() # open stream (2), 2 is size in bytes of int16 stream = p.open(format=p.get_format_from_width(2), channels=1, rate=44100, output=True) # play stream (3), blocking call stream.write(input_array)
label="T. Signal", linewidth=0.7) plt.ylabel("T. Signal") plt.title('Transformada Fourier de SignalSuma y Signal') plt.xlabel("Frecuencia") plt.grid() plt.legend() plt.savefig("TransformadasFourier.pdf") plt.close() ##espectogramas plt.figure() plt.subplot(2, 1, 1) plt.title('Transformada Fourier de SignalSuma y Signal') plt.specgram(signal_suma, NFFT=256 * 2, Fs=dt_suma) plt.colorbar() plt.grid() plt.subplot(2, 1, 2) plt.specgram(signal, NFFT=256 * 2, Fs=dt) plt.colorbar() plt.grid() plt.savefig("Espectrograma.pdf") plt.close() print("SE UTILIZO UNA IMPLEMENTACION PROPIA PARA REEMPLAZAR FFTFREQ") #####se almacena temblor.txt y se grafica temblor = np.genfromtxt("temblor.txt")[4:] dt_temblor = 1 / 100
def getspectrogram(input): #********************参数设置********************% winsize = 512 #%%帧长设置为512 一般取20-50 shift = 256 # %%帧移设置为256 一般取帧长的一半 fh = 600 # %%设定最高基音频率 fl = 60 # %%设定最低基音频率 # 读取语音 #filename = '0a9f9af7_nohash_0.wav' filename = input #subprocess.call(['ffmpeg', '-i', 'XXX.mp3', 'XXX.wav']) wavefile try: wavefile = wave.open(filename, 'r') # open for writing except wave.Error as e: # if you get here it means an error happende, maybe you should warn the user # but doing pass will silently ignore it pass ''' if not (wavefile.getname() == 'RIFF' or wavefile.getname() == 'WAVE'): return ''' #读取wav文件的四种信息的函数 nchannels = wavefile.getnchannels() sample_width = wavefile.getsampwidth() framerate = wavefile.getframerate() numframes = wavefile.getnframes() print('nchannels:' + str(nchannels)) print('sample_width:' + str(sample_width)) print('framerate:' + str(framerate)) print('numframes:' + str(numframes)) # get wav_data wav_data = wavefile.readframes(-1) wav_data = np.fromstring(wav_data, 'Int16') Time = np.linspace(0, len(wav_data) / framerate, num=len(wav_data)) pl.figure(1) pl.title('Signal Wave...') pl.plot(Time, wav_data) #pl.show() #framerate就是16000, specgram! Fs = framerate pl.figure(2) pl.subplots_adjust(left=0, right=1, bottom=0, top=1) pl.specgram(wav_data, NFFT=1024, Fs=Fs, noverlap=512) pl.axis('off') pl.axis('tight') #pl.savefig("0a9f9af7_nohash_0.png") pl.savefig('/Users/gongman/Desktop/eightgram/%s.png' % input) pl.show() #a = '0a9f9af7_nohash_0.wav' #getspectrogram(a)
for motorList in os.listdir(source_mode_path): source_mode_list_path = os.path.join(source_mode_path, motorList) for csvList in os.listdir(source_mode_list_path): csvPath = os.path.join(source_mode_list_path, csvList) for i in range(5): str_data = np.loadtxt(open(csvPath, 'rb'), delimiter="\n", skiprows=0) part_data = str_data[i * framerate:i * framerate + framerate] matrices, _, _, _ = plt.specgram(part_data, NFFT=nfft, Fs=framerate, noverlap=noverlap, mode='psd') filepath, tempfilename = os.path.split(csvPath) filename, extension = os.path.splitext(tempfilename) front = tempfilename[0:6] k = tempfilename[6:9] # int_k = int(k) + i * 20 # 需要改成原来第二张图 ,序号为6,7,8,9,10 int_k = (int(k) - 1) * 5 + (i + 1) fin_k = str(int_k) fin_k = fin_k.zfill(3)
# ---- Save Processed Sound Recording ---- f = wave.open(r"improved.wav", "wb") # 1 channel with a sample width of 2 and frame rate of 2*fs which is standard f.setnchannels(1) f.setsampwidth(2) f.setframerate(2 * fs) timeFilteredSoundWave = timeFilteredSoundWave.astype(int) f.writeframes(timeFilteredSoundWave.tostring()) # --- EXTRA ---- # Here, the spectogram is presented to see the variation of frequencies with respect to the time domain signal. # This representation is very useful to analyse the profile of the sound and the frequencies distribution # along the time. Thus, frequency over time is represented. plt.figure(4) plt.subplot(311) plt.plot(t, soundwave) plt.xlabel('Time (s)') plt.xlim([0, len(t) / fs]) plt.ylabel(' Magnitude') plt.subplot(312) powerSpectrum, frequenciesFound, time, imageAxis = plt.specgram(soundwave, Fs=fs) plt.xlabel('Time (s)') plt.ylabel('Frequency (Hz)') plt.subplot(313) powerSpectrum, frequenciesFound, time, imageAxis = plt.specgram( timeFilteredSoundWave, Fs=fs) plt.xlabel('Time (s)') plt.ylabel('Frequency (Hz)') plt.show()
plt.figure(figsize=(10, 5)) plt.subplot(2, 2, 1) plt.plot(frecuencias, transformada) plt.xlabel("frecuencias") plt.ylabel("transformada") plt.title("transformada signal con fft") plt.subplot(2, 2, 2) plt.plot(frecuencias2, transformada2) plt.xlabel("frecuencias") plt.ylabel("transformada") plt.title("transformada signalsuma con fft") plt.savefig("Fourier_trans.png") plt.figure(figsize=(10, 5)) plt.subplot(2, 2, 1) plt.specgram(signal_y, NFFT=256, Fs=1 / dtiempo) #ojojojojojojojo esto es 1/dt plt.xlabel("tiempo") plt.ylabel("frecuencias") plt.title("espectograma de la señal signal") plt.subplot(2, 2, 2) plt.specgram(suma_y, NFFT=256, Fs=1 / dtiempo2) plt.title("espectograma de la señal signalsuma") plt.xlabel("tiempo") plt.ylabel("frecuencias") plt.savefig("espectograma2señales.png") temblor = np.genfromtxt("temblor.txt", skip_header=4) plt.figure() plt.plot(temblor) plt.xlabel("tiempo") plt.ylabel("señal sismica")