def wav_to_spectrogram(audio_path, save_path, spectrogram_dimensions=(64, 64), noverlap=16, cmap="grey_r"): """ Creates a spectrogram of a wav file. :param audio_path: path of wav file :param save_path: path of spectrogram to save :param spectrogram_dimensions: number of pixels the spectrogram should be. Defaults (64,64) :param noverlap: See http://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.spectrogram.html :param cmap: the color scheme to use for the spectrogram. Defaults to 'gray_r' :return: """ sample_rate, samples = wav.read(audio_path) plt.specgram(samples, cmap=cmap, noverlap=noverlap) plt.axis("off") plt.tight_layout() plt.savefig(save_path, bbox_inches="tight", pad_inches=0) plt.tight_layout() # TODO: Because I cant figure out how to create a plot without padding # I am using `.trim()`, It would be better to do this in the plot itself. # Also probably better to do the sizing in the plot too. with Image(filename=save_path) as i: i.trim() i.resize(spectrogram_dimensions[0], spectrogram_dimensions[1]) i.save(filename=save_path)
def plot_segment(segment, window_size, step_size): """Plots the waveform, power over time, spectrogram with formants, and power over frequency of a Segment.""" pyplot.figure() pyplot.subplot(2, 2, 1) pyplot.plot(numpy.linspace(0, segment.duration, segment.samples), segment.signal) pyplot.xlim(0, segment.duration) pyplot.xlabel('Time (s)') pyplot.ylabel('Sound Pressure') pyplot.subplot(2, 2, 2) steps, power = segment.power(window_size, step_size) pyplot.plot(steps, power) pyplot.xlim(0, segment.duration) pyplot.xlabel('Time (s)') pyplot.ylabel('Power (dB)') pyplot.subplot(2, 2, 3) pyplot.specgram(segment.signal, NFFT=window_size, Fs=segment.sample_rate, noverlap=step_size) formants = segment.formants(window_size, step_size, 2) pyplot.plot(numpy.linspace(0, segment.duration, len(formants)), formants, 'o') pyplot.xlim(0, segment.duration) pyplot.xlabel('Time (s)') pyplot.ylabel('Frequency (Hz)') pyplot.subplot(2, 2, 4) frequencies, spectrum = segment.power_spectrum(window_size, step_size) pyplot.plot(frequencies / 1000, 10 * numpy.log10(numpy.mean(spectrum, axis=0))) pyplot.xlabel('Frequency (kHz)') pyplot.ylabel('Power (dB)')
def analize_dat(self, file_path, start, length, window, overlap): target_path = self._get_dat_target_path(file_path) start_sample = start * self.fs end_sample = start_sample + length * self.fs signal = [] with open(target_path) as f: for i, line in enumerate(f): if len(line.strip()) and line[0] == ';': continue if i < start_sample: continue if i >= end_sample: break vals = self._parse_line(line) if len(vals) > 2: signal.append(vals[1]) np_signal = np.array(signal) del signal plt.specgram( np_signal, NFFT = window, Fs = self.fs, window = mlab.window_hanning, scale_by_freq = True, noverlap = overlap) plt.draw()
def spectrum(signal): plt.specgram( signal, Fs=44100) plt.ylim([0, 22050]) plt.savefig('spectrum.png') plt.clf()
def write_results(self, kiwi_result, individual_calls, filename, audio, rate, segmented_sounds): # sample_name_with_dir = filename.replace(os.path.split(os.path.dirname(filename))[0], '')[1:] self.Log.info('%s: %s' % (filename.replace('/Recordings',''), kiwi_result)) self.DevLog.info('<h2>%s</h2>' % kiwi_result) self.DevLog.info('<h2>%s</h2>' % filename.replace('/Recordings','')) self.DevLog.info('<audio controls><source src="%s" type="audio/wav"></audio>', filename.replace('/var/www/','').replace('/Recordings','')) # Plot spectrogram plt.ioff() plt.specgram(audio, NFFT=2**11, Fs=rate) # and mark on it with vertical lines found audio features for i, (start, end) in enumerate(segmented_sounds): start /= rate end /= rate plt.plot([start, start], [0, 4000], lw=1, c='k', alpha=0.2, ls='dashed') plt.plot([end, end], [0, 4000], lw=1, c='g', alpha=0.4) plt.text(start, 4000, i, fontsize=8) if individual_calls[i] == 1: plt.plot((start + end) / 2, 3500, 'go') elif individual_calls[i] == 2: plt.plot((start + end) / 2, 3500, 'bv') plt.axis('tight') title = plt.title(kiwi_result) title.set_y(1.03) spectrogram_sample_name = filename + '.png' plt.savefig(spectrogram_sample_name) plt.clf() path = spectrogram_sample_name.replace('/var/www/','').replace('/Recordings','') self.DevLog.info('<img src="%s" alt="Spectrogram">', path) self.DevLog.info('<hr>')
def t_hist_specgram(t_hist, time): time_step = (time[-1]-time[0])/len(time) plt.figure() plt.specgram(t_hist, NFFT=256, Fs=1./time_step) plt.xlabel('Time (s)'); plt.ylabel('Frequency (Hz)') plt.xlim([0, time[-1]-time[0]]) plt.show()
def plot_me(signal, i, imax, MySampleRate = SampleRate, NFFT = 8192, noverlap = 1024): a = pyplot.subplot(imax, 2, 2 * i + 1) pyplot.title("Left %i" % MySampleRate) pyplot.specgram(signal[0], NFFT = NFFT, Fs = MySampleRate, noverlap = noverlap ) a = pyplot.subplot(imax, 2, 2 * (i + 1)) pyplot.title("Right %i" % MySampleRate) pyplot.specgram(signal[1], NFFT = NFFT, Fs = MySampleRate, noverlap = noverlap )
def creat_img(a,str_data,nchannels,sampwidth,framerate,nframes): #f = wave.open(r"C:/py/soudn/static/img/m"+a+".wav", "rb") # 读取格式信息 # (nchannels, sampwidth, framerate, nframes, comptype, compname) #params = f.getparams() #nchannels, sampwidth, framerate, nframes = params[:4] #str_data = f.readframes(nframes) #f.close() #将波形数据转换为数组 wave_data = np.fromstring(str_data, dtype=np.short) wave_data.shape = -1, nchannels wave_data = wave_data.T time = np.arange(0, nframes) * (1.0 / framerate) wave_data = wave_data/32768.0 plt.subplot(211) plt.title('Amplitude Fig') plt.ylabel('Amplitude') plt.plot(time,wave_data[0]) plt.subplot(212) plt.title('Spectrogram Fig') plt.xlabel('Time') plt.ylabel('Frequency') plt.specgram(wave_data[0], NFFT=1024, Fs=framerate, noverlap=400) plt.ylim(200,2500) plt.savefig("C:/py/soudn/static/img/m"+a+".png")
def analyze(self): data = ( self.prepare_numpy_matrix() ) # np.hstack(self.data.signal_data(self.slice)) #filter_low_high_pass(np.hstack(self.data.signal_data(self.slice))) plt.specgram(data, NFFT=self.nfft * self.schema.sampling_rate_hz, Fs=self.schema.sampling_rate_hz) plt.axis([0, 30, 0, 35]) self.make_plot(self.file_name)
def plot_wav_and_spec(wav_path, f0): wav = get_wav(wav_path) fs = wav.getframerate() nf = wav.getnframes() ns = nf/float(fs) wav = fromstring(wav.readframes(-1), 'Int16') fig = pyplot.figure() pyplot.title(wav_path) w = pyplot.subplot(311) w.set_xlim(right=nf) w.plot(wav) pyplot.xlabel("Frames") s = pyplot.subplot(312) pyplot.specgram(wav, Fs=fs) s.set_xlim(right=ns) s.set_ylim(top=8000) if f0: f = pyplot.subplot(313) x_points = [(ns/len(f0))*x for x in range(1, len(f0)+1)] y_points = [x for x in f0] pyplot.plot(x_points, y_points) f.set_xlim(right=ns) pyplot.xlabel("Seconds") pyplot.show()
def plotSpectrogram(self,N=4096,title='Spectrogramme'): plt.clf() plt.specgram(self.signal,N,self.framerate) plt.colorbar() plt.xlabel('Temps (en secondes)') plt.ylabel('Frequence (en Hz)') plt.title(title + ', Fe=' + str(self.framerate) + ' (' + str(N) + ' points)')
def viz_sound(sound, name, npts=1000): plt.figure() plt.specgram(sound) plt.title(name) plt.figure() plt.plot(sound[:npts]) plt.title(name)
def test_read_wave(): f = Sndfile("../fcjf0/sa1.wav", 'r') data = f.read_frames(46797) data_arr = np.array(data) #print data_arr pyplot.figure() pyplot.specgram(data_arr) pyplot.show()
def show_spectrogram(data, date, file_time): plt.specgram(data, pad_to=nfft, NFFT=nfft, noverlap=noverlap, Fs=fs) plt.title(date + "T" + file_time + "Z") plt.ylim(0, 600) plt.yticks(np.arange(0, 601, 50.0)) plt.xlabel("Time (sec)") plt.ylabel("Frequencies (hz)") plt.show() plt.close()
def spectrogram(): fs, data = sc.io.wavfile.read('/home/zechthurman/Songscape/03. Kali 47 (Original Mix).wav') t = data.size/500 dt = 0.1 NFFT = 1024 Fs = int(1.0/dt) # the sampling frequency plt.specgram(data[:,1], NFFT=NFFT, Fs=Fs, noverlap=900, cmap= plt.cm.gist_heat) plt.show()
def generate_spectrogram(wav_file): # Method to generate the spectrogram sound_info, frame_rate = generate_sound_data(wav_file) plt.subplot(111) plt.title("Spectrogram of %r" %sound) plt.xlabel("Time in (s)") plt.ylabel("Frequency in Hz") plt.specgram(sound_info, Fs=frame_rate) plt.savefig(sound_source+"/"+sound+"_spectrogram.png")
def plot_specgram(sound_names, raw_sounds): i = 1 for n, f in zip(sound_names, raw_sounds): plt.subplot(10, 1, i) specgram(np.array(f), Fs=66650) plt.title(n.title()) i += 1 plt.suptitle("Figure 2: Spectrogram", x=0.5, y=0.915, fontsize=18) plt.show()
def wav_specgram(filename): import scipy import matplotlib.pyplot as plt rate, signal = scipy.io.wavfile.read(filename) if signal.ndim > 1: signal = signal[:, 0] plt.specgram(signal, Fs=rate, xextent=(0, 0.1)) plt.show()
def plot_specgram(sound_names, raw_sounds): i = 1 fig = plt.figure(figsize(25,60), dpi = 900) for n, f in zip(sound_names, raw_sounds): plt.subplot(10,1,i) specgram(np.array(f), Fs=22050) plt.title(n.title()) i += 1 plt.suptitle('Figure 2: spectogram', x=.5, y=.915, fontsize=18) plt.show()
def main(path): # Connect to the cs server with the proper credentials. session = FTP() session.connect("cs.appstate.edu") user = raw_input("Type your username.") passwd = getpass.getpass("Type your password.") session.login(user, passwd) session.cwd(path) try: session.mkd("../Spectrograms") except error_perm: pass # Gets the flac files in the passed in directory match = "*.flac" count = 1 # Print the total number of .mp3 files that are in the directory, # and go through them all print "Total number of files: " + str(len(session.nlst(match))) left_index = 1 right_index = 1 for name in session.nlst(match): read = StringIO.StringIO() session.retrbinary("RETR " + name, read.write) data = read.getvalue() bee_rate, bee_data = get_data_from_flac(data) plt.specgram(bee_data, pad_to=nfft, NFFT=nfft, noverlap=noverlap, Fs=fs) plt.title(name) jpeg_temp = tempfile.NamedTemporaryFile(suffix=".jpeg") plt.savefig(jpeg_temp.name) plt.close() spec = open(jpeg_temp.name, 'r') if "left" in name: session.storbinary("STOR ../Spectrograms/%05d_left.jpeg" % left_index, spec) left_index += 1 if "right" in name: session.storbinary("STOR ../Spectrograms/%05d_right.jpeg" % right_index, spec) right_index += 1 spec.close() jpeg_temp.close() print "File number: " + str(count) count += 1 # Close the StringIO read.close() # Close the FTP connection session.quit() print "Done."
def graph_spectrogram(wav_file): rate, data = get_wav_info(wav_file) nfft = 200 # Length of each window segment fs = 8000 # Sampling frequencies noverlap = 120 # Overlap between windows nchannels = data.ndim if nchannels == 1: pxx, freqs, bins, im = plt.specgram(data, nfft, fs, noverlap = noverlap) elif nchannels == 2: pxx, freqs, bins, im = plt.specgram(data[:,0], nfft, fs, noverlap = noverlap) return pxx
def main(): frame_size = 2048 num_features = 12 filename = None try: filename = sys.argv[1] sample_frequency, x = wavfile.read(filename) if x.ndim > 1: x = x.T[0, :] sample_frequency = float(sample_frequency) num_frames = x.size // frame_size except IndexError: sample_frequency = 44100.0 num_frames = 100 size = frame_size * num_frames dt = 1.0 / sample_frequency if filename is None: t = np.linspace(0, size * dt, size, endpoint=False) signal_frequency = 1000.0 x = np.sin(2 * np.pi * signal_frequency * t) + 0.25 * np.random.rand(size) print('Input signal: %d frames, %d samples at %0.0f Hz' % ( num_frames, size, sample_frequency, )) # 2. split signal into frames and calculate MFCC features for each frame features = np.zeros((num_frames, num_features)) for i in range(num_frames): idx_begin = i * frame_size idx_end = (i + 1) * frame_size frame = x[idx_begin:idx_end] features[i, :] = mfcc(frame, sample_frequency, num_features=num_features) # 3. plot the spectrogram of the signal and the MFCC chart below plt.figure() plt.subplot(211) plt.specgram(x, NFFT=frame_size, Fs=sample_frequency) plt.xlim([0, dt * size]) plt.ylim([0, sample_frequency / 2.0]) plt.xlabel('Time [s]') plt.ylabel('Frequency [Hz]') plt.title('Spectrogram') plt.subplot(212) x_scale = np.linspace(0, dt * size, num_frames, endpoint=False) y_scale = np.arange(0, num_features + 1, 1) plt.xlim([0, dt * size]) plt.ylim([0, num_features]) # features transposed so time is on the X axis plt.pcolormesh(x_scale, y_scale, features.T) plt.xlabel('Time [s]') plt.ylabel('Feature number') plt.title('MFCC chart') plt.show()
def _GenerateSpectrogram(wavefile, timestamp): """Generates a spectrogram that works with the recorded sound""" metadata_path = SystemState.AudioState.metadata_path filename = metadata_path + timestamp + '.png' signal = wavefile.readframes(-1) signal = numpy.fromstring(signal, 'Int16') framerate = wavefile.getframerate() plt.title(time.ctime(float(timestamp)), fontsize=24) plt.subplot(111) plt.specgram(signal, Fs=framerate, NFFT=128, noverlap=0) plt.savefig(filename, dpi=100, figsize=(8,6), format='png') plt.close()
def plot_me(signal, MySampleRate, NFFT = 8192, noverlap = 1024): a = plt.subplot(2, 1, 1) plt.title("Original signal") plt.xlabel("s") plt.ylabel("Hz") plt.specgram(signal[0], NFFT = NFFT, Fs = MySampleRate, noverlap = noverlap ) plt.colorbar() a = plt.subplot(2, 1, 2) plt.title("Processed signal") plt.xlabel("s") plt.ylabel("Hz") plt.specgram(signal[1], NFFT = NFFT, Fs = MySampleRate, noverlap = noverlap ) plt.colorbar()
def plot(inputs, outputs, SampleRate=44100, NFFT=8192, noverlap=1024): pyplot.figure() if len(inputs) > 0: a = pyplot.subplot(2, len(inputs), 1) pyplot.title("Input L") pyplot.specgram(inputs[0], NFFT=NFFT, Fs=SampleRate, noverlap=noverlap) # pyplot.plot(inputs[0]) if len(inputs) > 1: a = pyplot.subplot(2, 2, 2) pyplot.title("Input R") pyplot.specgram(inputs[1], NFFT=NFFT, Fs=SampleRate, noverlap=noverlap) # pyplot.plot(inputs[1]) if len(outputs) > 0: a = pyplot.subplot(2, len(outputs), len(outputs) + 1) pyplot.title("Output L") pyplot.specgram(outputs[0], NFFT=NFFT, Fs=SampleRate, noverlap=noverlap) # pyplot.plot(outputs[0]) if len(outputs) > 1: a = pyplot.subplot(2, 2, 4) pyplot.title("Output R") pyplot.specgram(outputs[1], NFFT=NFFT, Fs=SampleRate, noverlap=noverlap) # pyplot.plot(outputs[1]) return pyplot
def spectrogram_ridges(chan,gap_thresh = 50,min_length = 150): ''' Identifies fractures in the signal based on the spectrogram. Connects local maxima for each frequency bin of the spectrogram matrix, . Looks for vertical lines of a given length within the frequency content. The goal is to find broadband noises within the signal (fractures). Inputs: chan - (nparray) input signal array optional gap_thresh (int) the maximum number of freq bins that can be skipped, while still considering the ridge line connected. min_length (int) the minumum length of ridge lines to be considered a fracture. Outputs: fractures (list) indicies of the identified fractures within the signal ''' fractures = [] Pxx, freqs, bins, im = plt.specgram(chan, NFFT=512, Fs=48000, noverlap=0) ridge_lines = identify_ridge_lines(Pxx, 0*np.ones(len(bins)), gap_thresh) for x in ridge_lines: if len(x[1]) > min_length: fractures.append(bins[x[1][0]]) plt.plot(bins[x[1][-10:]],freqs[len(freqs)-x[0][-10:]-1],'b') return fractures
def process_image(file_name, enlarge = True, image_height = 129, image_width = 23): '''Retrieves time data from the aiff file and compute the spectogram for time_data''' '''enlarge: gives option to resize the image to new dimensions WxH by interpolation''' if file_name.endswith('.aiff'): f = aifc.open(file_name, 'r') str_frames = f.readframes(f.getnframes()) Fs = f.getframerate() time_data = np.fromstring(str_frames, np.short).byteswap() f.close() Pxx, freqs, bins, im = plt.specgram(time_data,NFFT=256,Fs=Fs,noverlap=90,cmap=plt.cm.gist_heat) Pxx = Pxx[[freqs<250.]] # Right-whales call occur under 250Hz #print Pxx.shape from scipy.misc import imresize from sklearn import preprocessing if enlarge: #change image size Pxx_prep = imresize(np.log10(Pxx),(image_height,image_width), interp= 'lanczos').astype('float32') #Pxx_prep = imresize(Pxx,(image_height,image_width), interp= 'lanczos').astype('float32') else: #image size not changed Pxx_prep = np.log(Pxx).astype('float32') #Pxx_prep = preprocessing.MinMaxScaler().fit_transform(Pxx_prep) #rescale to 0-1 Pxx_prep = preprocessing.StandardScaler(copy=True, with_mean=True, with_std=True).fit_transform(Pxx_prep) #rescale by std Pxx_ = (Pxx_prep*255.0).astype(int) # Returning raw values to perform operations. Used to obtain raw data for ipynb #Pxx_ = Pxx_prep #Pxx_ = Pxx return Pxx_ else: print("Error in file: "+ file_name + "...\n") pass
def compute_specgram(data_loc,train_folder,file_name): '''Retrieves time data from the aiff file and compute the spectogram for time_data''' if not os.path.isfile(data_loc + '/' + train_folder + '/specgrams/' + file_name.split('.')[0] + '.png'): try: plt.figure(figsize=(18.,16.), dpi=50) #900x800 f = aifc.open(os.path.join(data_loc,train_folder, file_name), 'r') str_frames = f.readframes(f.getnframes()) #Fs = f.getframerate() Fs= 4000 time_data = np.fromstring(str_frames, np.short).byteswap() f.close() # Pxx is the segments x freqs array of instantaneous power, freqs is # the frequency vector, bins are the centers of the time bins in which # the power is computed, and im is the matplotlib.image.AxesImage # instance # spectrogram of file Pxx, freqs, bins, im = plt.specgram(time_data,Fs=Fs,noverlap=90,cmap=plt.cm.gist_heat) plt.axis('off') plt.savefig(data_loc + '/'+ train_folder + '/specgrams/'+ file_name.split('.')[0] + '.png', bbox_inches='tight') plt.close() except ValueError: print("Error in file: "+ file_name + "...\n")
def graph_spectrogram(wav_file, wav_folder): name_save = wav_file.replace(".wav", ".png") name_save_cv2 = wav_file.replace(".wav", "_cv2.png") rate, data = get_wav_info(wav_file) nfft = 256 # Length of the windowing segments fs = 256 # Sampling frequency plt.clf() pxx, freqs, bins, im = plt.specgram(data, nfft, fs) plt.axis('off') plt.gray() plt.savefig(name_save, dpi=50, # Dots per inch frameon='false', aspect='normal', bbox_inches='tight', pad_inches=0) # Expore plote as image fig = plt.gcf() fig.canvas.draw() # Get the RGBA buffer from the figure w, h = fig.canvas.get_width_height() buf = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8) buf.shape = (w, h, 3) # canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode # buf = np.roll(buf, 2) cv2.imwrite(name_save_cv2, buf)
def harmonics(): synth = WaveSynth() freq = 1500 num_harmonics = 6 h_all = synth.harmonics(freq, 1, [(n, 1/n) for n in range(1, num_harmonics+1)]) even_harmonics = [(1, 1)] # always include fundamental tone harmonic even_harmonics.extend([(n, 1/n) for n in range(2, num_harmonics*2, 2)]) h_even = synth.harmonics(freq, 1, even_harmonics) h_odd = synth.harmonics(freq, 1, [(n, 1/n) for n in range(1, num_harmonics*2, 2)]) h_all.join(h_even).join(h_odd) import matplotlib.pyplot as plot plot.title("Spectrogram") plot.ylabel("Freq") plot.xlabel("Time") plot.specgram(h_all.get_frame_array(), Fs=synth.samplerate, noverlap=90, cmap=plot.cm.gist_heat) plot.show()
sin2 = 2 * numpy.sin(2 * numpy.pi * 200 * t) # add interval of high pitched signal masks = _get_mask(t, 2, 4, 1.0, 0.0) + \ _get_mask(t, 14, 15, 1.0, 0.0) sin2 = sin2 * masks noise = 0.02 * numpy.random.randn(len(t)) final_signal = sin1 + sin2 + noise return final_signal if __name__ == '__main__': step = 0.001 sampling_freq = 1000 t = numpy.arange(0.0, 20.0, step) y = generate_signal(t) # we can visualize this now # in time ax1 = plt.subplot(211) plt.plot(t, y) # and in frequency plt.subplot(212) plt.specgram(y, NFFT=1024, noverlap=900, Fs=sampling_freq, cmap=plt.cm.gist_heat) plt.show()
plt.ylabel(r'$\mu$V',ha='left',rotation='horizontal') ax = plt.gca() for loc, spine in ax.spines.items(): if loc in ['right', 'top']: spine.set_color('none') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') plt.axes([.40,.60,.57,.35]) Fs= 500 plotSpectrum(summed_LFP,Fs) plt.title('FFT') plt.axes([.40,.08,.57,.40]) powerSpectrum, freqenciesFound, time, imageAxis = plt.specgram(summed_LFP, 130, Fs) plt.ylim(0, 100) plt.yticks(range(0, 100,10)) plt.title('Spectograma') plt.xlabel('Time') plt.ylabel('Frequency') fig.savefig('neuron-l'+str(ww)+'-f'+str(w)+'.png', dpi=300) #plt.show()
sdr.sample_rate = Fs # Hz sdr.center_freq = center_freq # Hz sdr.gain = 'auto' # Read specified number of complex samples from tuner. # Real and imaginary parts are normalized in the range [-1,1] samples = sdr.read_samples(N) # Clean up the SDR device sdr.close() del (sdr) # Convert samples to a numpy array x1 = np.array(samples).astype("complex64") # Plot spectogram plt.specgram(x1, NFFT=2048, Fs=Fs) plt.title("Samples spectogram (x1)") plt.ylim(-Fs / 2, Fs / 2) plt.savefig("x1_spec.png", bbox_inches='tight', pad_inches=0.5) plt.close() # To mix the data down, generate a digital complex exponential # (with the same length as x1) with phase -F_offset/Fs fc1 = np.exp(-1.0j * 2.0 * np.pi * F_offset / Fs * np.arange(len(x1))) # Multiply x1 and the digital complex expontential (baseband) x2 = x1 * fc1 # Generate plot of shifted signal plt.specgram(x2, NFFT=2048, Fs=Fs) plt.title("Shifted signal (x2)") plt.xlabel("Time (s)")
#pp.yticks(np.arange(-15000,15000+5000,5000),fontsize = 12) pp.ylim(amp_min, amp_max) #pp.tick_params( axis='x', labelbottom='off') #pp.tick_params( axis='y', labelleft='off') pp.legend(fontsize='small', loc=1) #Sonogram pp.subplot(4, 1, 2) #grid(True) nfft_ = int(w[0] * 0.010) Pxx, freqs, bins, im = pp.specgram(x1_part, NFFT=int(w[0] * 0.008), Fs=w[0], noverlap=int(w[0] * 0.005)) pp.xlim(0, step_time + 0.001) pp.yticks(np.arange(0, Fmax, 1000), fontsize=12) pp.ylim(0, Fmax) pp.ylabel('Frequency [Hz]') pp.tick_params(axis='x', labelbottom='off') pp.subplot(4, 1, 3) #grid(True) nfft_ = int(w[0] * 0.010) Pxx, freqs, bins, im = pp.specgram(x1_part, NFFT=int(w[0] * 0.008),
elif test == 2: import matplotlib matplotlib.use('TkAgg') matplotlib.interactive(True) import matplotlib.pyplot as plt nperseg = int(p.SAMPLE_RATE * p.WINDOW_SIZE) noverlap = int(p.SAMPLE_RATE * (p.WINDOW_SIZE - p.WINDOW_SHIFT)) wav_file = Path("../data/aspire/000/fe_03_00047-A-025005-025135.wav") audio, _ = torchaudio.load(wav_file) # pyplot specgram audio = torch.squeeze(audio) fig = plt.figure(0) plt.specgram(audio, Fs=p.SAMPLE_RATE, NFFT=p.NFFT, noverlap=noverlap, cmap='plasma') # implemented transformer - scipy stft transformer = Spectrogram(sample_rate=p.SAMPLE_RATE, window_stride=p.WINDOW_SHIFT, window_size=p.WINDOW_SIZE, nfft=p.NFFT) data, f, t = transformer(audio) mag = data[0] fig = plt.figure(1) plt.pcolormesh(t, f, np.log10(np.expm1(data[0])), cmap='plasma') fig = plt.figure(2) plt.pcolormesh(t, f, data[1], cmap='plasma') #print(max(data[0].view(257*601)), min(data[0].view(257*601))) #print(max(data[1].view(257*601)), min(data[1].view(257*601))) # scipy spectrogram f, t, z = sp.signal.spectrogram(audio, fs=p.SAMPLE_RATE, nperseg=nperseg, noverlap=noverlap,
signal = pois[ex2plot[poi]] # wave plt.figure() plt.plot(np.linspace(0, len(signal) / sr, len(signal)), signal) plt.ylabel('Amplitude') plt.xlabel('Time (s)') plt.title('POI {} in session {}, found in file #{}'.format( poi, session, audio_filename[ex2plot[poi]])) # When no figure is specified the current figure is saved pdf_wave.savefig() plt.close() #spectrogram plt.figure() powerSpectrum, freqenciesFound, time, imageAxis = plt.specgram( signal, Fs=sr) plt.axis(ymin=0, ymax=10000) plt.xlabel('Time') plt.ylabel('Frequency') plt.title('POI {} in session {}, found in file #{}'.format( poi, session, audio_filename[ex2plot[poi]])) # When no figure is specified the current figure is saved pdf_spectrogram.savefig() plt.close() # Save .wav file of snippet sf.write( str(session) + '_' + 'POI' + str(poi) + '.wav', signal, sr) print('Saved figures to PDF') pdf_wave.close()
plt.xlabel('Time [sec]') plt.show() ''' # test finished # test for mel spectrogram signal_in = np.array(pd.to_numeric(df_EFR_85_aenu_retest.iloc[0, 0:4096])) mel_S = librosa.feature.melspectrogram(y=signal_in, sr=9606) plt.figure() librosa.display.specshow(librosa.power_to_db(S, ref=np.max), y_axis='mel', fmax=8000, x_axis='time') plt.specgram(mel_S) plt.colorbar(format='%+2.0f dB') plt.title('Mel spectrogram') plt.tight_layout() plt.show() # df_spectrogram # spectrum fs=9606, nperseg=256, noverlap=128, nfft=9606 ''' df_spectrum_txt(signal_input=df_EFR_85_aenu_retest, store_path='/home/bruce/Dropbox/Project/6.Result/data_spectrogram/EFR/85/', store_name='EFR_85_r') df_spectrum_txt(signal_input=df_EFR_85_aenu_test, store_path='/home/bruce/Dropbox/Project/6.Result/data_spectrogram/EFR/85/', store_name='EFR_85_t')
# and choose a window that minimizes "spectral leakage" # (https://en.wikipedia.org/wiki/Spectral_leakage) window = np.blackman(NFFT) spec_cmap = 'ocean' import matplotlib as mpl mpl.use("Agg") from matplotlib import pyplot as plt # Plot the H1 spectrogram: plt.figure(figsize=(10, 6)) spec_H1, freqs, bins, im = plt.specgram(strain_H1[indxt], NFFT=NFFT, Fs=fs, window=window, noverlap=NOVL, cmap=spec_cmap, xextent=[-deltat, deltat]) plt.xlabel('time (s)') plt.ylabel('Frequency (Hz)') plt.colorbar() plt.axis([-deltat, deltat, 0, 2000]) plt.title('aLIGO H1 strain data near ' + eventname) plt.savefig(sys.argv[1]) # Plot the L1 spectrogram: # plt.figure(figsize=(10,6)) # spec_H1, freqs, bins, im = plt.specgram(strain_L1[indxt], NFFT=NFFT, Fs=fs, window=window, # noverlap=NOVL, cmap=spec_cmap, xextent=[-deltat,deltat])
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sat May 19 21:57:41 2018 @author: helton """ import matplotlib.pyplot as plt import numpy as np #%% create signal NFFT = 1024 dt = 0.01 Fs = int(1.0 / dt) f1 = 2 f2 = 8 t = np.arange(0, 10, dt) s = np.sin(2 * np.pi *f1* t) + 0.5*np.sin(2 * np.pi * f2*t) #%% plot values plt.subplot(311) plt.plot(t, s) plt.subplot(312) plt.psd(s, 512, Fs) plt.subplot(313) plt.specgram(s, NFFT, Fs, noverlap=100) plt.show()
N = 512 hammingWindow = np.hamming(N) samplingrate = 5000 length = (end - start) / samplingrate # FFTで用いるハミング窓 hammingWindow = np.hamming(N) #plt.figure(figsize=(7, 2)) # スペクトログラムを描画 plt.subplot(2, 1, 2) pxx, freqs, bins, im = plt.specgram(specdataa, NFFT=N, Fs=samplingrate, noverlap=N - 1, window=hammingWindow, xextent=(starttime, endtime)) axis([starttime, starttime + length, 0, samplingrate / 2]) xlabel("time [second]") plt.ylim(0, 1024) ylabel("frequency [Hz]") plt.colorbar(orientation='horizontal') plt.savefig('B39LFP' + sys.argv[1] + '-' + sys.argv[2] + sys.argv[3] + '.png', dpi=300) #plt.savefig('B39LFP'+ sys.argv[1] +'-'+ sys.argv[2] + sys.argv[3] + '.png',dpi=300) #plt.savefig('B39'+ sys.argv[1] +'-'+ sys.argv[2] +'ripple-spec.png',dpi=300) #plt.savefig('B39'+ sys.argv[1] +'-'+ sys.argv[2] +'spec-ripple.png',dpi=300)
for file in os.listdir( directory ): # This loop will carry on going as long as there are more files to process. filename = os.fsdecode(file) if filename.endswith(".wav"): # print(filename) # print(os.path.join(directory, filename)) file_to_process = os.path.join(folder3, filename) # print(file_to_process) if os.path.isfile(file_to_process): print( "From create_spectogram .... We found a wav file filtered.wav ....... " ) samplingFrequency, signalData = wavfile.read(file_to_process) plot.rcParams['figure.figsize'] = [6.5, 5.5] plot.subplot(211) plot.specgram(signalData, Fs=samplingFrequency, cmap='twilight') # plot.xlabel('Time, seconds') # plot.ylabel('Frequency') plot.savefig( '/home/tegwyn/ultrasonic_classifier/images/spectograms/specto.png', bbox_inches='tight') # plot.show() # sys.exit() # Exit with status os.EX_OK # using os._exit() method # The value of os.EX_OK is 0 os._exit(os.EX_OK)
def get_spectrogram(audio_file): sample_rate, X = wav.read(audio_file) print (sample_rate, X.shape ) a,b,c,d = plt.specgram(X, Fs=sample_rate, xextent=(0,30)) return a,b,c,d,plt
def get_tsp(N, Fs, flg_ud=1, flg_eval=0): if np.log2(N) != int(np.log2(N)): print "TSP length must be power of 2" return 0 elif N<512: print "TSP length is too small" return 0 if flg_ud != 1 and flg_ud != 0: print "TSP up and down flag is invalied" return 0 # TSP parameters N_set = [512, 1024, 2048, 4096, 8192, 16384] stretch_set = [7, 10, 12, 13, 14, 15] if N in N_set: stretch = float(stretch_set[N_set.index(N)]) elif N>16384: stretch = 15.0 M = int((stretch/32.0)*float(N)) t = [float(ind)/float(Fs) for ind in range(0,N)] tsp_spec = np.zeros(N, dtype=complex) itsp_spec = np.zeros(N, dtype=complex) tsp_spec[0] = 1 tsp_spec[N/2] = np.exp(float(flg_ud*2-1)*1j*float(M)*np.pi) itsp_spec[0] = 1.0/tsp_spec[0] itsp_spec[N/2] = 1.0/tsp_spec[N/2] for i in np.arange(1,N/2): tsp_spec[i] = np.exp(float(flg_ud*2-1)*1j*4*float(M)*np.pi*(float(i-1)**2)/(float(N)**2)) itsp_spec[i] = 1.0/tsp_spec[i] tsp_spec[N-i] = np.conjugate(tsp_spec[i]) itsp_spec[N-i] = 1.0/tsp_spec[N-i] tsp_sig = (np.fft.ifft(tsp_spec,N)).real itsp_sig = (np.fft.ifft(itsp_spec,N)).real # Circular shift if flg_ud == 1: tsp_sig = np.roll(tsp_sig, -(N/2-M)) itsp_sig = np.roll(itsp_sig, N/2-M) elif flg_ud == 0: tsp_sig = np.roll(tsp_sig, N/2-M) itsp_sig = np.roll(itsp_sig, -(N/2-M)) # Evaluation if flg_eval: print "Evaluating TSP signal..." imp_eval_spec = np.fft.fft(tsp_sig,N)*np.fft.fft(itsp_sig,N) imp_eval = np.fft.ifft(imp_eval_spec,N) imp_eval_power = 20*np.log10(np.roll(np.abs(imp_eval), N/2)) plt.figure() plt.plot(t, tsp_sig) plt.xlabel("Time [s]") plt.ylabel("Amplitude") plt.figure() plt.plot(t, itsp_sig) plt.xlabel("Time [s]") plt.ylabel("Amplitude") stft_len = 256 stft_overlap = 128 stft_win = np.hamming(stft_len) plt.figure() pxx, stft_freq, stft_bin, stft_t = plt.specgram(tsp_sig, NFFT=stft_len, Fs=Fs, window=stft_win, noverlap=stft_overlap) plt.axis([0, N/Fs, 0, Fs/2]) plt.xlabel("Time [s]") plt.ylabel("Frequency [Hz]") plt.figure() plt.plot(imp_eval_power) plt.ylabel("[dB]") #plt.show() return (tsp_sig, itsp_sig)
'cycle_ratio': 1., 'attack_ratio': 0., 'decay_ratio': 0., 'ramp_on': False, 'bkgrd_noise': 0. } if __name__ is '__main__': fs = 100 f0 = 1. Dur = 5. nt = int(Dur * fs) f1 = 10 s = [ el for el in signalgenerator(which='sweep', tsig=Dur, noisy=0.01, fs=fs, f0=f0, f1=f1, A=2, A1=-90)() ] from waves import wavwrite wavwrite(s, fs, 'sweep', normalize=True) from matplotlib.pyplot import specgram, cm specgram(s, NFFT=2**10, cmap=cm.bone_r) from pyphs.plots.singleplots import singleplot t = [el / float(fs) for el in range(nt)] singleplot(t, (s, ))
def plotSpecgram(data, rate, title): NFFT = 1024 Pxx, freqs, bins, im = plt.specgram(data, NFFT, Fs=rate) plt.title(title) plt.ylabel("Frecuencia [hz]") plt.show()
def savepdf(self): fig = plt.figure(figsize=(1000, 1000)) if (self.fname[0].endswith('.wav')): plt.subplot(2, 2, 1) plt.plot(self.data, linewidth=0.5, scalex=True) plt.subplot(2, 2, 2) plt.specgram(self.data, Fs=self.fs, cmap=self.comboBox.currentText()) plt.subplot(2, 2, 3) plt.plot(self.InversedData, linewidth=0.5, scalex=True) plt.subplot(2, 2, 4) plt.specgram(self.InversedData.real, Fs=self.fs, cmap=self.comboBox.currentText()) if not (self.fname[0].endswith('.wav')): index = (len(self.data) - 1) - ((len(self.data) - 1) % 3) spectrogramData = [] for i in range(0, 3): if (self.checkBox[i].isChecked() == True): if i == 0: plt.subplot(3, 2, 1) spectrogramData = list(self.data[index][0:]) plt.plot(spectrogramData, linewidth=0.5, scalex=True) plt.subplot(3, 2, 2) elif i == 1: if (len(self.data) - 1 - index >= 1): plt.subplot(3, 2, 3) spectrogramData = list(self.data[index + 1][0:]) plt.plot(spectrogramData, linewidth=0.5, scalex=True) plt.subplot(3, 2, 4) else: plt.subplot(3, 2, 3) spectrogramData = list(self.data[index - 2][0:]) plt.plot(spectrogramData, linewidth=0.5, scalex=True) plt.subplot(3, 2, 4) else: if (len(self.data) - 1 - index == 2): plt.subplot(3, 2, 5) spectrogramData = list(self.data[index + 2][0:]) plt.plot(spectrogramData, linewidth=0.5, scalex=True) plt.subplot(3, 2, 6) else: plt.subplot(3, 2, 5) spectrogramData = list(self.data[index - 1][0:]) plt.plot(spectrogramData, linewidth=0.5, scalex=True) plt.subplot(3, 2, 6) plt.specgram(spectrogramData, Fs=250) plt.subplots_adjust(bottom=0.1, right=0.9, top=1.0) plt.show() plt.close() fn, _ = QtWidgets.QFileDialog.getSaveFileName( self, "Export PDF", None, "PDF files(.pdf);;AllFiles()") if fn: if QtCore.QFileInfo(fn).suffix() == "": fn += ".pdf" fig.savefig(fn)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import sys import numpy as np import matplotlib.pyplot as plt if __name__ == "__main__": data = np.memmap(sys.argv[1], mode='r', dtype=np.dtype('<h')) data = (data << 4) >> 4 # data = data[350000:1000000] dataMix = np.multiply( data, np.cos((np.arange(data.size) * 2 * np.pi * 120.0e+6 / 240.0e+6) + 0.06287)) plt.specgram(dataMix, NFFT=1024, Fc=2380e6, Fs=240e+6) plt.show()
fft_data = rfft(data) fd_axis = rate / len(data) * np.arange(0, len(data)) fft_refine = rfft(refine_sig) fr_axis = rate / len(refine_sig) * np.arange(0, len(refine_sig)) plt.figure('data_fft') plt.plot(fd_axis, abs(fft_data)) plt.ylabel('Magnitude') plt.xlabel('Frequency') plt.figure('refine_fft') plt.plot(fr_axis, abs(fft_refine)) plt.ylabel('Magnitude') plt.xlabel('Frequency') plt.figure('Spectogram Original') Pxx, freqs, bins, im = plt.specgram(data, pad_to=512, Fs=rate) plt.figure('Spectogram Refine') Pxx, freqs, bins, im = plt.specgram(refine_sig, pad_to=512, Fs=rate) plt.show()
import wave import matplotlib.pyplot as plt import numpy as np import os filepath = "..\\融合wav\\" #添加路径 filename = os.listdir(filepath) plt.rcParams['font.sans-serif'] = ['KaiTi'] # 指定默认字体 plt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题 for file in filename: f = wave.open(filepath + file, 'rb') params = f.getparams() nchannels, sampwidth, framerate, nframes = params[:4] strData = f.readframes(nframes) # 读取音频,字符串格式 waveData = np.fromstring(strData, dtype=np.int16) # 将字符串转化为int waveData = waveData * 1.0 / (max(abs(waveData))) # wave幅值归一化 waveData = np.reshape(waveData, [nframes, nchannels]).T f.close() # plot the wave plt.specgram(waveData[0], Fs=framerate, scale_by_freq=True, sides='default') plt.title(file + '语谱图') plt.ylabel('Frequency(Hz)') plt.xlabel('Time(s)') plt.savefig('..\\语谱图\\' + file + '.png') plt.close() print(file, "语谱图已保存")
def _spectrum(): global TIME_STAMP, POSITION, PARAMETER # wave wave_tmp, split_tmp = waveform_combo.get(), split_segments.get() wave = FILE_DATA[wave_tmp][:] # obtain waveform data shp = wave.shape wave_array = wave.reshape(shp[0] * shp[1], ) # reshape field data to one dimensional array split_tmp = int(split_tmp) wave_segment = wave_array[POSITION[split_tmp - 1] + PARAMETER: POSITION[split_tmp]] # chosed wave segment time_segment = TIME_STAMP[ POSITION[split_tmp - 1] + PARAMETER:POSITION[split_tmp]] # corresponding time segment fce = Q / (M * 2 * np.pi) * wave_segment * 1e-12 # FFT fs, nfft, noverlap = fs_entry.get(), nfft_entry.get(), noverlap_entry.get( ) # obtain FFT setting fs, nfft, noverlap = int(fs), int(nfft), int(noverlap) # figure ylim_d, ylim_u = ylim_entry_1.get(), ylim_entry_2.get( ) # obtain plot setting clim_d, clim_u = clim_entry_1.get(), clim_entry_2.get() fig_w, fig_h = figsize_entry_1.get(), figsize_entry_2.get() ylim_u, ylim_d, clim_u, clim_d, fig_w, fig_h = int(ylim_u), int(ylim_d), \ int(clim_u), int(clim_d), int(fig_w), int(fig_h) # plot method = method_button['text'] # obtain value of method button plt.figure(figsize=(fig_w, fig_h)) cmap = cmap_combo.get() cmap_index = { 'autumn': plt.autumn, # colormap type selection dictionary 'bone': plt.bone, 'copper': plt.copper, 'cool': plt.cool, 'flag': plt.flag, 'gray': plt.gray, 'hot': plt.hot, 'hsv': plt.hsv, 'inferno': plt.inferno, 'jet': plt.jet, 'magma': plt.magma, 'nipy_spectral': plt.nipy_spectral, 'pink': plt.pink, 'plasma': plt.plasma, 'prism': plt.prism, 'spring': plt.spring, 'summer': plt.summer, 'virdis': plt.viridis, 'winter': plt.winter } cmap_index[cmap]() # choose one cmap type spec_data, spec_freq, spec_time, spec_img = plt.specgram( wave_segment, NFFT=nfft, Fs=fs, noverlap=noverlap) # specgram ax_x, ax_y = plt.gca().get_position().x0, plt.gca().get_position( ).y0 # left_bottom cornor position of current axes fce_fft = [] if method == 'specgram': # specgram method time_tick = [] for time in range(spec_time.shape[0]): time_trans = datetime.datetime.fromtimestamp( time_segment[int((time + 0.5) * (nfft - noverlap))] ) # find real time corresponding to FFT position fce_fft.append(fce[int((time + 0.5) * (nfft - noverlap))]) time_tick.append( time_trans.strftime('%H:%M:%S') + '\n' + time_trans.strftime('%f') + '\n' + time_trans.strftime('%Y.%m.%d')) # create time ticks fce_fft = np.array(fce_fft) # print(fce_fft) plt.plot(spec_time, fce_fft, color="k", linestyle="--", label="electron cyclotron freq.") plt.plot(spec_time, fce_fft / 2, color="k", linestyle="--", label="half electron cyclotron freq.") plt.xticks(list(spec_time), time_tick) # set time ticks plt.locator_params(axis='x', nbins=10) plt.figtext(ax_x - 0.05, ax_y - 0.05, 'Time:\nms:\nDate:') plt.colorbar() plt.clim(clim_d, clim_u) else: # pcolormesh method plt.clf() time_tick = [] fce_fft = [] for time in range(spec_time.shape[0]): time_tick.append(time_segment[int( (time + 0.5) * (nfft - noverlap))]) # find real time corresponding to FFT position fce_fft.append(fce[int((time + 0.5) * (nfft - noverlap))]) fce_fft = np.array(fce_fft) time_tick = np.array(time_tick) time_tick = time_tick * 1e9 # increase precision time_tick_final = time_tick.astype( 'datetime64[ns]' ) + TIME_ERROR # convert float to numpy.datetime64 type plt.pcolormesh(time_tick_final, spec_freq, spec_data, norm=LogNorm()) # pcolormesh # plt.plot(spec_time, fce_fft, color="k", linestyle="--", label="electron cyclotron freq.") # plt.plot(spec_time, fce_fft/2, color="k", linestyle="--", label="half electron cyclotron freq.") plt.figtext(ax_x - 0.05, ax_y - 0.02, 'Time:') plt.colorbar() plt.clim(10**clim_d, 10**clim_u) plt.ylim(ylim_d, ylim_u) plt.title(FILE_NAME.upper() + ' ' + wave_tmp[0:2] + ' Spectrum') plt.ylabel('Frequency (Hz)') plt.xlabel('Time (UTC)') plt.show()
Parameters ---------- num_samples : int Number of Samples to Return in an Array ; If -1, then Return All Returns ------- A Portion of the Sample History : (num_samples) ndarray """ if 0 < num_samples < len(self.history): return self.history.take(range(self.history_index, self.history_index + num_samples), mode="wrap") return self.history.take( range(self.history_index, self.history_index + len(self.history)), mode="wrap", ) if __name__ == "__main__": import matplotlib.pyplot as plt thread = RadioThread() thread.start() sleep(1) powerSpectrum, freqenciesFound, time, imageAxis = plt.specgram( thread.get_sample_history(), Fc=100000000, Fs=2000000) plt.xlabel("Time") plt.ylabel("Frequency") plt.show()
def plot(self, i=None, ax=None, getNumEvents=False, getLevels=False, getPlotOpts=False, overlay=False, **kwargs): plotOpts = {'LabelsOff': False, 'NormalizeTrial': False, 'RewardMarker': 3,\ 'TimeOutMarker': 4, 'PlotAllData': False, 'TitleOff': False,\ 'FreqLims': [], 'RemoveLineNoise': False, 'RemoveLineNoiseFreq': 50,\ 'LogPlot': False, 'TFfftWindow': 256, 'TFfftOverlap': 150,\ 'TFfftPoints': 256, 'TFfftStart': 500, 'TFfftFreq': 150,\ "Type": DPT.objects.ExclusiveOptions(["FreqPlot", 'Signal', 'TFfft'], 1)} for (k, v) in plotOpts.items(): plotOpts[k] = kwargs.get(k, v) plot_type = plotOpts['Type'].selected() if getPlotOpts: return plotOpts if getLevels: return ['trial', 'all'] if getNumEvents: if plotOpts['PlotAllData']: # to avoid replotting the same data. return 1, 0 if plot_type == 'FreqPlot' or plot_type == 'Signal' or plot_type == 'TFfft': if i is not None: nidx = i else: nidx = 0 return self.numSets, nidx if ax is None: ax = plt.gca() if not overlay: ax.clear() sRate = self.samplingRate VMPlot.create(self, trial_idx=i, ax=ax, plotOpts=plotOpts, marker_multiplier=30) if i == None or i == 0: rlfp = RPLLFP() self.data = rlfp.data if plot_type == 'Signal': data = self.data[self._data_timestamps] if plotOpts['RemoveLineNoise']: data = removeLineNoise(data, plotOpts['RemoveLineNoiseFreq'], sRate) ax.plot(self.get_data_timestamps_plot(), data) self.plot_markers() elif plot_type == 'FreqPlot': if plotOpts['PlotAllData']: data = self.data else: data = self.data[self._data_timestamps] if plotOpts['RemoveLineNoise']: data = removeLineNoise(data, plotOpts['RemoveLineNoiseFreq'], sRate) datam = np.mean(data) fftProcessed, f = computeFFT(data - datam, sRate) ax.plot(f, fftProcessed) if plotOpts['LogPlot']: ax.set_yscale('log') elif plot_type == 'TFfft': if plotOpts['PlotAllData']: dIdx = self.trialIndices[:, -1] - self.trialIndices[:, 0] mIdx = np.amax(dIdx) spTimeStep = plotOpts['TFfftWindow'] - plotOpts['TFfftOverlap'] spTimeBins = int( round( np.floor(mIdx / spTimeStep) - plotOpts['TFfftOverlap'] / spTimeStep)) nFreqs = (plotOpts['TFfftPoints'] / 2) + 1 ops = np.zeros((int(nFreqs), spTimeBins)) opsCount = np.zeros((int(nFreqs), spTimeBins)) for j in range(self.numSets): tftIdx = self.trialIndices[j, :] data = self.data[int(tftIdx[0]) - 1:int(tftIdx[-1])] if plotOpts['RemoveLineNoise']: data = removeLineNoise(data, plotOpts['RemoveLineNoiseFreq'], sRate) datam = np.mean(data) window = np.hamming(plotOpts['TFfftWindow']) [s, f, t, im] = plt.specgram(data - datam, window=window, NFFT=plotOpts['TFfftPoints'], noverlap=plotOpts['TFfftOverlap'], Fs=sRate) psIdx = range(0, s.shape[1]) ops[:, psIdx] = ops[:, psIdx] + s opsCount[:, psIdx] = opsCount[:, psIdx] + 1 x = np.arange( 0, mIdx - 1, plotOpts['TFfftWindow'] - plotOpts['TFfftOverlap']) x = x[:len(x) - 2] y = np.arange(0, (sRate / 2) + 1, sRate / plotOpts['TFfftPoints']) i = ops / opsCount im = ax.pcolormesh(x, y, i) ax.set_ylim([0, plotOpts['TFfftFreq']]) # Uncomment colorbar line after PanGUI is fixed. # plt.colorbar(im, ax = ax) else: tIdx = self.trialIndices[i, :] idx = [ tIdx[0] - ((plotOpts['TFfftStart'] + 500) / 1000 * sRate), tIdx[0] - ((plotOpts['TFfftStart'] + 1) / 1000 * sRate) ] data = self.data[int(idx[0]) - 1:int(idx[-1])] datam = np.mean(data) window = np.hamming(plotOpts['TFfftWindow']) [s, f, t, im] = plt.specgram(data - datam, window=window, NFFT=plotOpts['TFfftPoints'], noverlap=plotOpts['TFfftOverlap'], Fs=sRate) Pmean = np.mean(s, axis=1) Pstd = np.std(s, axis=1, ddof=1) idx = [(tIdx[0] - (plotOpts['TFfftStart'] / 1000 * sRate)), tIdx[1], tIdx[2]] data = self.data[int(idx[0]) - 1:int(idx[-1])] datam = np.mean(data) window = np.hamming(plotOpts['TFfftWindow']) [s, f, t, im] = plt.specgram(data - datam, window=window, NFFT=plotOpts['TFfftPoints'], noverlap=plotOpts['TFfftOverlap'], Fs=sRate) spec_Pnorm = np.zeros(s.shape) for row in range(s.shape[0]): spec_Pnorm[row, :] = (s[row, :] - Pmean[row]) / Pstd[row] spec_T = np.arange( (-plotOpts['TFfftStart'] / 1000), t[-1] - (plotOpts['TFfftStart'] / 1000 + plotOpts['TFfftWindow'] / sRate / 2) + (plotOpts['TFfftWindow'] - plotOpts['TFfftOverlap']) / sRate, (plotOpts['TFfftWindow'] - plotOpts['TFfftOverlap']) / sRate) ax.axvline(0, color='k') ax.axvline((self.timeStamps[i][1] - self.timeStamps[i][0]) * 30000 / 1000, color='k') im = ax.pcolormesh(spec_T, f, spec_Pnorm, vmin=-10, vmax=10) ax.set_ylim([0, plotOpts['TFfftFreq']]) # Uncomment colour bar line after PanGUI is fixed # plt.colorbar(im, ax = ax) if not plotOpts['LabelsOff']: if plot_type == 'FreqPlot': ax.set_xlabel('Frequency (Hz)') ax.set_ylabel('Magnitude') elif plot_type == 'TFfft': ax.set_xlabel('Time (s)') ax.set_ylabel('Frequency (Hz)') else: ax.set_xlabel('Time (ms)') ax.set_ylabel('Voltage (uV)') if not plotOpts['TitleOff']: channel = DPT.levels.get_shortname("channel", os.getcwd())[1:] ax.set_title('channel' + str(channel)) if len(plotOpts['FreqLims']) > 0: if plot_type == 'FreqPlot': ax.xlim(plotOpts['FreqLims']) elif plot_type == 'TFfft': ax.ylim(plotOpts['FreqLims']) return ax
def main(): x1 = sinwave(A1 , f1) N = 4096 # Multiplicar por um numero grande para aumentar o numero "empurrar a virgula" x1Int = np.int16(x1 * 2 ** 13) plt.figure(facecolor='w', figsize=(20, 30)) plt.plot(t[0 : 1010], x1[0 : 1010], 'k') plt.title('Sinal') plt.axis('tight') plt.xlabel(r'$t$ (segundos)') plt.ylabel('Intensidade') plt.savefig("lab2ex1_Sinal.png", bbox_inches='tight', transparent = False) plt.show() # Ouvir o som. Nao e' obrigatorio soundPlay.soundPlay(x1 , fs) filename = 'x1.wav' wavfile.write(filename, fs , x1Int) xfft = np.fft.fft(x1[0 : N]) freq1 = np.fft.fftfreq(len(xfft)) * fs x1mag = np.abs(xfft) / N Xfase = np.angle(xfft) #espectro amplitude plt.figure(facecolor = 'w', figsize=(10 , 20)) plt.stem(freq1, x1mag, 'k', linewidth=3) plt.axis([-1100 , 1100 , 0 , 2]) plt.ylabel('Espectro amplitude', fontsize = 18) plt.xlabel('f(Hz)', fontsize = 18) plt.xticks(fontsize = 22) plt.yticks(fontsize = 22) plt.grid() plt.savefig("lab2ex1_espectroAmplitude.png", bbox_inches='tight', transparent = False) plt.show() #espectro fase plt.figure(facecolor = 'w' , figsize=(30 , 20)) plt.stem(freq1, Xfase, 'k', linewidth = 3) plt.axis([-1100 , 1100 , -np.pi , np.pi]) plt.ylabel('Espectro de fase', fontsize = 18) plt.xlabel('f(Hz)', fontsize = 18) plt.xticks(fontsize = 22) plt.yticks(fontsize = 22) plt.grid() plt.savefig("lab2ex1_espectroFase.png" , bbox_inches = 'tight' , transparent = False) plt.show() plt.figure(facecolor = 'w' , figsize = (30 , 20)) plt.specgram(x1, NFFT=2 * N, Fs = fs, noverlap = 0) plt.xlabel('Tempo(s)' , fontsize = 18) plt.ylabel('f(Hz)' , fontsize = 18) plt.axis([0 , 0.73 , 0 , 1000]) plt.savefig("lab2ex1_espectrograma.png" , bbox_inches = 'tight' , transparent = False) plt.show()
# scale by the number of points so that the magnitude does not depend on the length fourier = fourier / float(n) #calculate the frequency at each point in Hz freqArray = np.arange(0, (n / 2), 1.0) * (rate * 1.0 / n) plt.plot(freqArray / 1000, 10 * np.log10(fourier), color='green') plt.xlabel('Frequency (kHz)') plt.ylabel('Power (dB)') plt.show() plt.figure(2, figsize=(8, 6)) plt.subplot(211) Pxx, freqs, bins, im = plt.specgram(channel1, Fs=rate, NFFT=1024, cmap=plt.get_cmap('autumn_r')) cbar = plt.colorbar(im) plt.xlabel('Time (s)') plt.ylabel('Frequency (Hz)') cbar.set_label('Intensity dB') plt.subplot(212) Pxx, freqs, bins, im = plt.specgram(channel2, Fs=rate, NFFT=1024, cmap=plt.get_cmap('autumn_r')) cbar = plt.colorbar(im) plt.xlabel('Time (s)') plt.ylabel('Frequency (Hz)') cbar.set_label('Intensity (dB)') plt.show()
def _plural(): global FILE_DATA, POSITION, PARAMETER, TIME_STAMP split_tmp = split_segments.get() split_tmp = int(split_tmp) start, end = POSITION[split_tmp - 1] + PARAMETER, POSITION[split_tmp] data_list = list(FILE_DATA) wave_data_indices = [ i for i, s in enumerate(data_list) if '_waveform' in s ] # find all waveforms in current WFC data wave_name = [data_list[j] for j in wave_data_indices] # FFT fs, nfft, noverlap = fs_entry.get(), nfft_entry.get(), noverlap_entry.get() fs, nfft, noverlap = int(fs), int(nfft), int(noverlap) # figure ylim_d, ylim_u = ylim_entry_1.get(), ylim_entry_2.get() clim_d, clim_u = clim_entry_1.get(), clim_entry_2.get() fig_w, fig_h = figsize_entry_1.get(), figsize_entry_2.get() ylim_u, ylim_d, clim_u, clim_d, fig_w, fig_h = int(ylim_u), int(ylim_d), \ int(clim_u), int(clim_d), int(fig_w), int(fig_h) # plot method = method_button['text'] plt.figure(figsize=(fig_w, fig_h)) cmap = cmap_combo.get() cmap_index = { 'autumn': plt.autumn, 'bone': plt.bone, 'copper': plt.copper, 'cool': plt.cool, 'flag': plt.flag, 'gray': plt.gray, 'hot': plt.hot, 'hsv': plt.hsv, 'inferno': plt.inferno, 'jet': plt.jet, 'magma': plt.magma, 'nipy_spectral': plt.nipy_spectral, 'pink': plt.pink, 'plasma': plt.plasma, 'prism': plt.prism, 'spring': plt.spring, 'summer': plt.summer, 'virdis': plt.viridis, 'winter': plt.winter } cmap_index[cmap]() num = len(wave_name) spec_time_method1 = np.zeros(0) time_tick_method1 = [] for index in range(num): # make subplots time_tick = [] wave_component = FILE_DATA[wave_name[index]][:] shp = wave_component.shape wave_component_array = wave_component.reshape(shp[0] * shp[1], ) wave_component_segment = wave_component_array[start:end] # fce = Q / (M * 2 * np.pi) * wave_component_segment * 1e-12 # fce_half = fce / 2 time_component_segment = TIME_STAMP[start:end] plt.subplot(num, 1, index + 1) spec_data_p, spec_freq_p, spec_time_p, spec_img_p = \ plt.specgram(wave_component_segment, NFFT=nfft, Fs=fs, noverlap=noverlap) if method == 'specgram': if index == num - 1: for time in range(spec_time_p.shape[0]): time_trans = datetime.datetime.fromtimestamp( time_component_segment[int( (time + 0.5) * (nfft - noverlap))]) time_tick.append( time_trans.strftime('%H:%M:%S') + '\n' + time_trans.strftime('%f') + '\n' + time_trans.strftime('%Y.%m.%d')) spec_time_method1 = spec_time_p time_tick_method1 = time_tick plt.colorbar() plt.clim(clim_d, clim_u) else: for time in range(spec_time_p.shape[0]): time_tick.append(time_component_segment[int( (time + 0.5) * (nfft - noverlap))]) plt.cla() time_tick = np.array(time_tick) time_tick = time_tick * 1e9 time_tick_final = time_tick.astype('datetime64[ns]') + TIME_ERROR plt.pcolormesh(time_tick_final, spec_freq_p, spec_data_p, norm=LogNorm()) plt.colorbar() plt.clim(10**clim_d, 10**clim_u) plt.title(FILE_NAME.upper() + ' ' + wave_name[index][0:2].upper() + ' Spectrum') plt.ylim(ylim_d, ylim_u) plt.ylabel('Frequency (Hz)') if index < num - 1: plt.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False) ax_x, ax_y = plt.gca().get_position().x0, plt.gca().get_position().y0 if method == 'specgram': plt.xticks(list(spec_time_method1), time_tick_method1) plt.locator_params(axis='x', nbins=10) plt.figtext(ax_x - 0.05, ax_y - 0.05, 'Time:\nms:\nDate:') else: plt.figtext(ax_x - 0.05, ax_y - 0.02, 'Time:') plt.xlabel('Time (UTC)') plt.show()
s2 = np.append(s2, sub2) # шоб я ещё понимала, чем являются эти старт и стоп... # тут изменение их значений, вроде, надо, чтобы # на следующем шаге в s1 и s2 удобнее было добавлять нужные значения start = stop + 1 stop = start + samplingFrequency print('Секундочку, сейчас я это построю...') # а тут всё это строится # Plot the signal # зачем я крашу график, на котором рисую сигнал? потому что могу! # Потому что нашла эту фичу, пока гуглила, как работает subplot # Кстати, если вам интересно, как называются разные цвета на графиках, то информация об этом есть # тут https://undoshutdown.blogspot.com/2018/06/matplotlib-python.html plot.subplot(211, facecolor='ivory') plot.plot(s1, s2) plot.xlabel('Номер измерения') plot.ylabel('Амплитуда') # Plot the spectrogram plot.subplot(212) # а вот сейчас я не понимать, почему они много разных переменных приравняли к одной штуковине, но оно же работает... powerSpectrum, freqenciesFound, time, imageAxis = plot.specgram( s2, Fs=samplingFrequency) plot.xlabel('Время') plot.ylabel('Частота') plot.show()
def getFPeak( y, fs=1.0, fRange=None, nfft=None ) : """ Find the peak in the spectral power density of a signal, y. USAGE: [fp,t]=getFPeak( y, [fs=1], [fRange=[0 fs/2]], [nfft=sqrt(2*N)] ) where y=signal to process, 1D array fs=sampling frequency of y (Default=1) fRange=Frequency range to look for peaks, in units of Fs (Default=0 to 0.5*fs) nfft=Number of data points in each bin of y output fp=1D array of peak values at each time point t=times corresponding to peak frequencies. Ted Golfinopoulos, 18 July 2012 """ ts=1/fs #Sampling time #If no value for frequency range is specified, take all frequencies to Nyquist if fRange is None : fRange=[0.0, fs*0.5] # fRange=[min(fRange), max(fRange)] #Make sure fRange is sorted as min/max. if (max(fRange)>fs*0.5) : raise IOError('Maximum of frequency range must be < Nyquist frequency') if nfft is None : nfft=sqrt(2*len(y)) nfft=pow(2,ceil(log2(nfft))) #Make sure nfft is a power of 2. #Calculate spectrogram [Pxx, F, T,im]=specgram(y, int(nfft), fs) # print('Length of Pxx={0:d}'.format(len(Pxx))) # print('Length of F={0:d}'.format(len(F))) # print('Length of T={0:d}'.format(len(T))) # print('NFFT={0:f}'.format(float(nfft))) #Find peaks in spectrogram - search in each time bin of Pxx across all frequencies for a peak. #Limit search to specified frequency range. fp=zeros(len(T))-1 #Sentinel values for the peak. # for P in transpose(Pxx) : # print(F[P==max(P)]) # for i in range(0,size(Pxx,1)) : # fp[i]=F[Pxx[:,i]==max(Pxx[:,i])] temp=Pxx[logicAnd(F>fRange[0],F<fRange[1]),:] Fsubset=F[logicAnd(F>fRange[0],F<fRange[1])] # print('Size of reduced Pxx: {0:d}x{1:d} '.format(size(temp,0),size(temp,1))) try : fp=squeeze([ Fsubset[P==max(P)] for P in transpose(Pxx[logicAnd(F>fRange[0],F<fRange[1]),:]) ]) #Pull out frequency at which maximum power occurs for each time bin. except : #Case where frequency bins have multiple entries at same max value. # pdb.set_trace() temp=[ Fsubset[P==max(P)] for P in transpose(Pxx[logicAnd(F>fRange[0],F<fRange[1]),:]) ] #Pull out frequency at which maximum power occurs for each time bin. #Take first element whose value is equal to the maximum value for the relevant frequency bin. fp=[f[0] for f in temp] fp=squeeze(fp) return fp, T
def main(): plt.figure(figsize=(10, 12)) plt.suptitle('Overview of Conversion Process for Ray Traced Energy Histograms') plots_x = 2 plots_y = 4 ax = None speed_of_sound = 340.0 acoustic_impedance = 400.0 room_volume = 10000.0 output_sample_rate = 44100.0 rt60 = 0.4 signal_length = rt60 # Generate and plot sequence sequence = generate_dirac_sequence(speed_of_sound, room_volume, output_sample_rate, signal_length) times = np.arange(len(sequence)) / output_sample_rate ax = plt.subplot(plots_y, plots_x, 1) ax.set_title('1. Poisson Dirac Sequence') ax.set_xlabel('time / s') ax.set_ylabel('amplitude') ax.plot(times, sequence) # Weight sequence by histograms histogram_sr = 1000.0 histogram_steps = histogram_sr * signal_length bands = 8 histogram = gen_histogram(np.linspace(histogram_steps, histogram_steps / 2, bands)) * 0.004 weighted = weight_sequence(histogram, histogram_sr, sequence, output_sample_rate, acoustic_impedance) ax = plt.subplot(plots_y, plots_x, 2) ax.set_title('2. Per-band Weighted Sequences') ax.set_xlabel('time / s') ax.set_ylabel('amplitude') for i in weighted: ax.plot(times, i) # Plot weighted sequences in frequency domain ax = plt.subplot(plots_y, plots_x, 3) ax.set_title('3. Weighted Sequences in the Frequency Domain') ax.set_xscale('log') ax.set_xlabel('frequency / Hz') ax.set_ylabel('modulus / dB') for i in weighted: frequency_domain = np.fft.rfft(i) frequencies = np.fft.rfftfreq(len(i)) * output_sample_rate ax.plot(frequencies, a2db(np.abs(frequency_domain) / len(i))) # Plot filtered sequences in frequency domain minf = 20.0 / output_sample_rate maxf = 20000.0 / output_sample_rate band_edges = band_edge_frequency(np.arange(bands + 1), bands, minf, maxf) lower_edges = band_edges[:-1] upper_edges = band_edges[1:] ax = plt.subplot(plots_y, plots_x, 4) ax.set_title('4. Filtered Sequences in the Frequency Domain') ax.set_xscale('log') ax.set_xlabel('frequency / Hz') ax.set_ylabel('modulus / dB') wf = width_factor(minf, maxf, bands, 1.0) frequencies = np.fft.rfftfreq(len(sequence)) ffts = np.fft.rfft(weighted, axis=1) for i in range(bands): ffts[i] *= compute_bandpass_magnitude(frequencies, lower_edges[i], upper_edges[i], wf, 0) for i in ffts: ax.plot(frequencies * output_sample_rate, a2db(np.abs(i) / len(frequencies))) # Plot filtered sequences in time domain ax = plt.subplot(plots_y, plots_x, 5) ax.set_title('5. Filtered Sequences in the Time Domain') ax.set_xlabel('time / s') ax.set_ylabel('amplitude') iffts = np.fft.irfft(ffts, axis=1) for i in reversed(iffts): ax.plot(times, i) # Plot final output final_output = np.sum(iffts, axis=0) ax = plt.subplot(plots_y, plots_x, 6) ax.set_title('6. Summed Bands in the Time Domain') ax.set_xlabel('time / s') ax.set_ylabel('amplitude') ax.plot(times, final_output) # Plot spectrogram ax = plt.subplot(plots_y, 1, plots_y) ax.set_title('7. Spectrogram of Broadband Signal') ax.set_xlabel('time / s') ax.set_ylabel('frequency / Hz') Pxx, freqs, bins, im = plt.specgram(final_output, NFFT=1024, Fs=output_sample_rate, noverlap=512) plt.tight_layout() plt.subplots_adjust(top=0.9) plt.show() if render: plt.savefig( 'raytrace_process.svg', bbox_inches='tight', dpi=300, format='svg')
if end-start < TIME_SCALE*Fs: l = l+1 print(l) continue for i in range(start,end,TIME_SCALE*Fs): j=j+1 new_start = i new_end = i+TIME_SCALE*Fs if i+TIME_SCALE*Fs > end: new_start = end-TIME_SCALE*Fs new_end = end fig_name = get_fig_name(split_tmp, new_start, new_end, save_dict, save_name) if os.path.exists(fig_name) == True: continue initFigure() spec_data, spec_freq, spec_time, spec_img = plt.specgram(B[new_start:new_end], NFFT=nfft, Fs=Fs, noverlap=noverlap, scale='dB',cmap='jet') time_array, fce, flag = time_setting(new_start,new_end) if flag == 1: continue try: plot_setting(spec_time,time_array,fce) fce_plot(spec_time,fce) except: k = k+1 continue saveFigure(fig_name) print(wfc_name) print(j) print(k)
#import the pyplot and wavfile modules import matplotlib.pyplot as plot from scipy.io import wavfile # Read the wav file (mono) samplingFrequency, signalData = wavfile.read('test32bit.wav') # Plot the signal read from wav file plot.subplot(211) plot.title('Spectrogram of a wav file') plot.plot(signalData) plot.xlabel('Sample') plot.ylabel('Amplitude') plot.subplot(212) plot.specgram(signalData, Fs=samplingFrequency) plot.xlabel('Time') plot.ylabel('Frequency') plot.show()