def clean_signal(sig, samp_rate):

    band_pass_sig = butter_bandpass_filter(sig, 0.1, 210, samp_rate, order=2)
    band_stop_sig = butter_bandstop_filter(band_pass_sig, 59, 61, samp_rate, order=2)
    clean_sig = butter_bandstop_filter(band_stop_sig, 119, 121, samp_rate, order=2)

    return clean_sig
示例#2
0
def clean_signal(sig, samp_rate):

    band_pass_sig = butter_bandpass_filter(sig, 0.1, 210, samp_rate, order=2)
    band_stop_sig = butter_bandstop_filter(band_pass_sig, 59, 61, samp_rate, order=2)
    clean_sig = butter_bandstop_filter(band_stop_sig, 119, 121, samp_rate, order=2)

    return clean_sig
示例#3
0
def transform_file(f, file, f_lo, f_hi, save_file_loc, n_channels):
    neural_sig = edfreader.EdfReader(file)
    samp_rate = int(neural_sig.samplefrequency(0))
    buffer_size = samp_rate*100
    window_size = samp_rate*2


    size = int(neural_sig.samples_in_file(1))
    frequency = np.zeros(shape=(n_channels, (f_hi-f_lo)/3))
    cnt=0
    if size > buffer_size*3:
        for chunk in xrange(0,size-3*buffer_size, buffer_size):
            if not os.path.isfile(save_file_loc + str(f)
                                                + "_" + str((chunk+1)/window_size) + ".p"):

                print ("Processing chunk " + str(chunk/(buffer_size)) + " of "
                + str((size-samp_rate)/(buffer_size)) + " in file " + str(f) + "\n")
                chan_sig = np.zeros(shape=(n_channels, buffer_size))
                for c in xrange(1, 1+n_channels):
                    sig = np.zeros(buffer_size*4)
                    neural_sig.readsignal(c, chunk-buffer_size,
                                          buffer_size*3, sig)

                    band_pass_sig = butter_bandpass_filter(sig, 0.1, 210, samp_rate, order=2)
                    band_stop_sig = butter_bandstop_filter(band_pass_sig, 59, 61, samp_rate, order=2)
                    clean_sig = butter_bandstop_filter(band_stop_sig, 119, 121, samp_rate, order=2)
                    chan_sig[c-1,:] = clean_sig[buffer_size:buffer_size*2]


                for sub_chunk in xrange(0,buffer_size-window_size,window_size):
                   if not os.path.isfile(save_file_loc + str(f) + "_" + str(cnt) + ".p"):

                        for c in xrange(1, 1+n_channels):
                            frequency[c-1,:] = (np.abs(np.fft.fft(
                                chan_sig[c-1,sub_chunk:sub_chunk+window_size]))**2)[f_lo:f_hi:3]
                        pickle.dump(frequency, open(save_file_loc + str(f)
                                                    + "_" + str(cnt) + ".p", "wb"))
                        #print (save_file_loc + str(f)
                        #                            + "_" + str(cnt) + ".p" + " saved")

                        # y, x = np.mgrid[slice(0, n_channels, 1),
                        # slice(0, 100*3, 3)]
                        # plt.pcolormesh(x,y,frequency, cmap='RdBu', vmin=-1, vmax=1)
                        # plt.axis([x.min(), x.max(), y.min(), y.max()])
                        # plt.colorbar()
                        # #plt.title(track)
                        # plt.xlabel("Frequency")
                        # plt.ylabel("Channel")
                        # plt.show()
                   else:
                        print (save_file_loc + str(f)
                                                    + "_" + str(cnt) + ".p" + " skipped")
                   cnt += 1
            else:
                cnt += buffer_size/window_size
示例#4
0
文件: audio.py 项目: sunh20/pyESig2
                            '-i', fname,
                            '-vn',
                            '-f', 'wav',
                            '-c:a', 'pcm_mulaw',
                            '-ar', str(samp_rate*2),
                            '-ac', '1', '-']
                pipe = sp.Popen(command, stdout=sp.PIPE, bufsize=10**9)

                win_size = samp_rate
                f_rate = 30
                offset = samp_rate/f_rate
                raw_audio = pipe.stdout.read(win_size*2*num_secs)
                audio_array = np.fromstring(raw_audio, dtype="int16")
                #plt.plot(audio_array[::100])
                #plt.show()
                #pdb.set_trace()
                audio_array = butter_bandpass_filter(audio_array,50,3500, samp_rate*2)
                audio_len = len(audio_array)/samp_rate
                power_array = np.zeros((len(audio_array)*30)/samp_rate)

                print "writing " + str(vid)
                for c in range(0,audio_len):
                    for f in range(0,f_rate):
                        signal_window = audio_array[c*win_size + offset*f:(c+1)*win_size + offset*f]
                        power = np.mean((np.abs(np.fft.fft(signal_window*np.hamming(len(signal_window))))[100:3500])**2)
                        power_array[c*f_rate+f] = power
                if len(power_array)>30:
                    power_array[-30:]=power_array[-31]
                print "wrote " + str(vid)
                pickle.dump(power_array, open(output_file_loc + sbj_id + "_" + str(day) + "_" + num + ".p", "wb"))
                pipe.kill()