Esempio n. 1
0
def subtract(newdoc, newdoc2, current_user):


    # newdoc_wav_path = newdoc.file 
    # print newdoc_wav_path


    # recording_path = os.path.join(settings.MEDIA_ROOT, newdoc.file )
    # print newdoc.file
    # type newdoc.file
    # print newdoc.file.url 

    # get paths for raw audio and music to be subtracted
    raw_audio_filename = os.path.basename(newdoc.file.url)
    original_audio_filename = os.path.basename(newdoc2.file.url)

    # newdoc_url = newdoc.file.url 
    # print type(newdoc_url)

    # Create paths to read in audio files
    recording_path = os.path.join(settings.MEDIA_ROOT, raw_audio_filename)
    original_audio_path = os.path.join(settings.MEDIA_ROOT, original_audio_filename)
    # recording_path = os.path.join(settings.BASE_DIR, newdoc_url)
    # print recording_path
    # print settings.BASE_DIR

    rate, data = scipy.io.wavfile.read(recording_path)
    r = data.astype(np.float64)

    rate, data = scipy.io.wavfile.read(original_audio_path)
    o = data.astype(np.float64)

    print len(o)
    print len(r)
    print 'applying adaptive filt'
    # Apply adaptive filter
    M = 100 #8000 (takelly long time) #100  # Number of filter taps in adaptive filter
    step = 0.1  # Step size
    y, e, w = adf.nlms(o, r, M, step, returnCoeffs=True)

    scaled_e = np.int16(e/np.max(np.abs(e)) * 32767)

    # d = room_simulate.room_sim(u)
    # scaled_d = np.int16(d/np.max(np.abs(d)) * 32767)
    
    # output_path = os.path.join(settings.MEDIA_ROOT, newdoc_filename+'TEST2.wav' )
    output_filename = os.path.splitext(os.path.basename(newdoc.file.url))[0]
    print output_filename

    output_path = os.path.join(settings.MEDIA_ROOT,output_filename+'_SUBTRACTED.wav')
    # output_path = os.path.join(settings.MEDIA_ROOT, ['/Ghosts_TEST2.wav'] )
    write(output_path , 44100, scaled_e)

    f = open(output_path)
    output_file = File(f)

    newdoc3 = Audio(user = current_user, file = output_file)
    newdoc3.save()
	
    return e
Esempio n. 2
0
def comparison(u, d, filter_length, step_size, type):
    if type is "NLMS":
        y0, e0, w0 = LMSFilter(u, d, filter_length, step_size).normalized()
        y1, e1, w1 = adaptfilt.nlms(u, d, filter_length, step_size)
    elif type is "LMS":
        y0, e0, w0 = LMSFilter(u, d, filter_length, step_size).regular()
        y1, e1, w1 = adaptfilt.lms(u, d, filter_length, step_size)
    else:
        print("you must specify type of comparison: NLMS, LMS")
        return False
    fig1, axs = plt.subplots(3, 2, sharex='all')
    plt.subplots_adjust(hspace=0.35)
    fig1.suptitle(f"Comparison between 2 algorithms {type} filter")
    x = [i for i in range(len(d))]
    xt = [i for i in range(filter_length, len(d) + 1)]
    axs[0, 0].plot(x, d, 'tab:blue')
    axs[0, 0].set_title('desired signal')
    axs[1, 0].plot(xt, y0, 'tab:purple')
    axs[1, 0].set_title('my output')
    axs[2, 0].plot(xt, e0, 'tab:red')
    axs[2, 0].set_title('my error')
    axs[0, 1].plot(x, d, 'tab:blue')
    axs[0, 1].set_title('desired signal')
    axs[1, 1].plot(xt, y1, 'tab:purple')
    axs[1, 1].set_title('adaptfilt output')
    axs[2, 1].plot(xt, e1, 'tab:red')
    axs[2, 1].set_title('adaptfilt error')
    for ax in axs.flat:
        ax.label_outer()
        ax.grid(True)
    plt.savefig(f"{type}-comparison")
    return True
Esempio n. 3
0
def subtract(newdoc, newdoc2, current_user):

    # newdoc_wav_path = newdoc.file
    # print newdoc_wav_path

    # recording_path = os.path.join(settings.MEDIA_ROOT, newdoc.file )
    # print newdoc.file
    # type newdoc.file
    # print newdoc.file.url

    # get paths for raw audio and music to be subtracted
    raw_audio_filename = os.path.basename(newdoc.file.url)
    original_audio_filename = os.path.basename(newdoc2.file.url)

    # newdoc_url = newdoc.file.url
    # print type(newdoc_url)

    # Create paths to read in audio files
    recording_path = os.path.join(settings.MEDIA_ROOT, raw_audio_filename)
    original_audio_path = os.path.join(settings.MEDIA_ROOT,
                                       original_audio_filename)
    # recording_path = os.path.join(settings.BASE_DIR, newdoc_url)
    # print recording_path
    # print settings.BASE_DIR

    rate, data = scipy.io.wavfile.read(recording_path)
    r = data.astype(np.float64)

    rate, data = scipy.io.wavfile.read(original_audio_path)
    o = data.astype(np.float64)

    print len(o)
    print len(r)
    print 'applying adaptive filt'
    # Apply adaptive filter
    M = 100  #8000 (takelly long time) #100  # Number of filter taps in adaptive filter
    step = 0.1  # Step size
    y, e, w = adf.nlms(o, r, M, step, returnCoeffs=True)

    scaled_e = np.int16(e / np.max(np.abs(e)) * 32767)

    # d = room_simulate.room_sim(u)
    # scaled_d = np.int16(d/np.max(np.abs(d)) * 32767)

    # output_path = os.path.join(settings.MEDIA_ROOT, newdoc_filename+'TEST2.wav' )
    output_filename = os.path.splitext(os.path.basename(newdoc.file.url))[0]
    print output_filename

    output_path = os.path.join(settings.MEDIA_ROOT,
                               output_filename + '_SUBTRACTED.wav')
    # output_path = os.path.join(settings.MEDIA_ROOT, ['/Ghosts_TEST2.wav'] )
    write(output_path, 44100, scaled_e)

    f = open(output_path)
    output_file = File(f)

    newdoc3 = Audio(user=current_user, file=output_file)
    newdoc3.save()

    return e
Esempio n. 4
0
def nlms(u, d, M=100):
    """A classical adaptive filtering"""
    # Apply adaptive filter
    M = 100  # Number of filter taps in adaptive filter
    step = 0.1  # Step size
    y, e, w = adf.nlms(u, d, M, step, returnCoeffs=True)

    return y, e, w
Esempio n. 5
0
def adaptiveFiltering(ch0, ch1):
    binSizeTim = np.size(ch0, 0)
    biNum = np.size(ch0, 1)

    M = 5
    step = binSizeTim / 5.0

    N = binSizeTim - M + 1
    ch0out = np.zeros((N, biNum))
    ch1out = np.zeros((N, biNum))

    for bi in range(biNum):
        ch0out[:, bi], ch1out[:,
                              bi], _ = adaptfilt.nlms(ch1[:, bi], ch0[:, bi],
                                                      M, step)

    return ch0out, ch1out
Esempio n. 6
0
d = np.convolve(snd, coeffs)
d = d/20.0
lis = lis/20.0
d = d[:len(lis)] # Trims sender's audio to the same length as that of the listener's in order to mix them
d = d + lis - (d*lis)/256.0   # Mix with listener's voice.
d = np.round(d,0)

# Hear how the mixed signal sounds before proceeding with the filtering.
dsound = d.astype('int16')
wavfile.write(waveout, lfs, dsound)

music = pyglet.resource.media('output.wav')
music.play()
time.sleep(len(dsound)/lfs)
# Apply adaptive filter
y, e, w = adf.nlms(snd[:len(d)], d, tap, step, returnCoeffs=True)

# The algorithm stores the processed result in the variable 'e', which is the mix of the error signal and the listener's voice.
# Hear how e sounds now.  Ideally we on behalf of the sender, should hear only the listener's voice.  Practically, some echo would still be present.

e = e.astype('int16')
wavfile.write('adapt.wav', lfs, e)
music = pyglet.resource.media(filtout)
music.play()
time.sleep(len(e)/lfs)

# Calculate and plot the mean square weight error
mswe = adf.mswe(w, coeffs)
plt.figure()
plt.title('Mean squared weight error')
plt.plot(mswe)
Esempio n. 7
0
# Generate received signal d(n) using randomly chosen coefficients
coeffs = np.concatenate(([0.8], np.zeros(8), [-0.7], np.zeros(9),
                         [0.5], np.zeros(11), [-0.3], np.zeros(3),
                         [0.1], np.zeros(20), [-0.05]))

d = np.convolve(u, coeffs)

# Add background noise
v = np.random.randn(len(d)) * np.sqrt(5000)
d += v

# Apply adaptive filter
M = 100  # Number of filter taps in adaptive filter
step = 0.1  # Step size
y, e, w = adf.nlms(u, d, M, step, returnCoeffs=True)

# Calculate mean square weight error
mswe = adf.mswe(w, coeffs)

# Plot speech signals
plt.figure()
plt.title("Speech signals")
plt.plot(u, label="Emily's speech signal, u(n)")
plt.plot(d, label="Speech signal from John, d(n)")
plt.grid()
plt.legend()
plt.xlabel('Samples')

# Plot error signal - note how the measurement noise affects the error
plt.figure()
Esempio n. 8
0
                 color='k',
                 label="music without noise")
plt.fill_between(times, s_data2[:, 0], s_data2[:, 1], label="music with noise")
plt.xlim(times[0], times[-1])
plt.grid()
plt.legend()
plt.xlabel('time')

d = ((data[:, 0]) / 2 + (data[:, 1]) / 2)
u = ((s_data2[:, 0]) / 2 + (s_data2[:, 1]) / 2)
# wavfile.write('rzeczywiscie.wav',samplerate,u.astype('int16'))

# filter
M = 1000
step = 0.1
y, e, w = adf.nlms(u, d, M, step)

dopasowanie2 = slice(0, len(y))
s_times = times[dopasowanie2]

plt.subplot(212)
plt.title("Music after filtration")
plt.fill_between(s_times, y, color='m', label="music after filtration")
plt.xlim(s_times[0], s_times[-1])
plt.grid()
plt.legend()
plt.xlabel('time')
plt.tight_layout(0, 0.1, 0)
plt.show()
dec = input('Do you want to see frequency spectum of signals? [Y/N]')
while i < 80:
    music_frames = music.readframes(10000)
    voice_frames = voice.readframes(10000)

    u = np.fromstring(music_frames, np.int16)
    u = np.float64(u)

    print(i)

    d = np.fromstring(voice_frames, np.int16)
    d = np.float64(d)

    # Apply adaptive filter
    M = 20  # Number of filter taps in adaptive filter
    step = 0.1  # Step size
    y, e, w = adf.nlms(u, d, M, step, returnCoeffs=True)

    full_e = np.concatenate([full_e, e])
    full_d = np.concatenate([full_d, d])
    full_u = np.concatenate([full_u, u])
    i += 1

scaled = np.int16(full_e / np.max(np.abs(full_e)) * 32767)

write('splitaudio.wav', 32000, scaled)

# scaled_voice_file = wave.open("test.wav", "r")

# scaled_voice = scaled_voice_file.readframes(320000)

# print(len(scaled_voice))
Esempio n. 10
0
def subtract(newdoc, newdoc2, current_user):


    # newdoc_wav_path = newdoc.file 
    # print newdoc_wav_path


    # recording_path = os.path.join(settings.MEDIA_ROOT, newdoc.file )
    # print newdoc.file
    # type newdoc.file
    # print newdoc.file.url 

    # get paths for raw audio and music to be subtracted
    raw_audio_filename = os.path.basename(newdoc.file.url)
    original_audio_filename = os.path.basename(newdoc2.file.url)

    # newdoc_url = newdoc.file.url 
    # print type(newdoc_url)

    # Create paths to read in audio files
    recording_path = os.path.join(settings.MEDIA_ROOT, raw_audio_filename)
    original_audio_path = os.path.join(settings.MEDIA_ROOT, original_audio_filename)
    # recording_path = os.path.join(settings.BASE_DIR, newdoc_url)
    # print recording_path
    # print settings.BASE_DIR


    input_wav = wave.open (recording_path, "r")
    (nchannels, sampwidth, framerate, nframes, comptype, compname) = input_wav.getparams ()
    frames = input_wav.readframes (nframes * nchannels)
    out = struct.unpack_from ("%dh" % nframes * nchannels, frames)

    r = np.asarray(out, np.float64)

    # rate, data = scipy.io.wavfile.read(recording_path)
    # r = data.astype(np.float64)

    input_wav = wave.open (original_audio_path, "r")
    (nchannels, sampwidth, framerate, nframes, comptype, compname) = input_wav.getparams ()
    frames = input_wav.readframes (nframes * nchannels)
    out = struct.unpack_from ("%dh" % nframes * nchannels, frames)

    o = np.asarray(out, np.float64)

    # rate, data = scipy.io.wavfile.read(original_audio_path)
    # o = data.astype(np.float64)

    print len(o)
    print len(r)
    print 'applying adaptive filt'
    # Apply adaptive filter
    M = 100 #8000 (takelly long time) #100  # Number of filter taps in adaptive filter
    step = 0.1  # Step size
    y, e, w = adf.nlms(o, r, M, step, returnCoeffs=True)

    scaled_e = np.int16(e/np.max(np.abs(e)) * 32767)

    # d = room_simulate.room_sim(u)
    # scaled_d = np.int16(d/np.max(np.abs(d)) * 32767)
    
    # output_path = os.path.join(settings.MEDIA_ROOT, newdoc_filename+'TEST2.wav' )

    output_filename = os.path.splitext(os.path.basename(newdoc.file.url))[0]
    print output_filename

    output_path = os.path.join(settings.MEDIA_ROOT,output_filename+'_SUBTRACTED.wav')
    # output_path = os.path.join(settings.MEDIA_ROOT, ['/Ghosts_TEST2.wav'] )

    
    output_wav = wave.open (output_path, "w")
    output_wav.setparams((nchannels, sampwidth, framerate, nframes, comptype, compname))
    output_wav.writeframes(scaled_e)

    # write(output_path , 44100, scaled_e)

    f = open(output_path)
    output_file = File(f)

    newdoc3 = Audio(user = current_user, file = output_file)
    newdoc3.save()
	
    return output_path
Esempio n. 11
0
d = np.convolve(u, coeffs)
d = d / 20.0
v = v / 20.0
d = d[:len(
    v
)]  # Trims sender's audio to the same length as that of the listener's in order to mix them
d = d + v - (d * v) / 256.0  # Mix with listener's voice.
d = np.round(d, 0)

# Hear how the mixed signal sounds before proceeding with the filtering.
dsound = d.astype('int16')
wavfile.write(waveout, lfs, dsound)
winsound.PlaySound(waveout, winsound.SND_ALIAS)

# Apply adaptive filter
y, e, w = adf.nlms(u[:len(d)], d, M, step, returnCoeffs=True)

# The algorithm stores the processed result in the variable 'e', which is the mix of the error signal and the listener's voice.
# Hear how e sounds now.  Ideally we on behalf of the sender, should hear only the listener's voice.  Practically, some echo would still be present.

e = e.astype('int16')
wavfile.write(waveout, lfs, e)
winsound.PlaySound(waveout, winsound.SND_ALIAS)

# Calculate and plot the mean square weight error
mswe = adf.mswe(w, coeffs)
plt.figure()
plt.title('Mean squared weight error')
plt.plot(mswe)
plt.grid()
plt.xlabel('Samples')
Esempio n. 12
0
def subtract(newdoc, newdoc2, current_user):

    # newdoc_wav_path = newdoc.file
    # print newdoc_wav_path

    # recording_path = os.path.join(settings.MEDIA_ROOT, newdoc.file )
    # print newdoc.file
    # type newdoc.file
    # print newdoc.file.url

    # get paths for raw audio and music to be subtracted
    raw_audio_filename = os.path.basename(newdoc.file.url)
    original_audio_filename = os.path.basename(newdoc2.file.url)

    # newdoc_url = newdoc.file.url
    # print type(newdoc_url)

    # Create paths to read in audio files
    recording_path = os.path.join(settings.MEDIA_ROOT, raw_audio_filename)
    original_audio_path = os.path.join(settings.MEDIA_ROOT,
                                       original_audio_filename)
    # recording_path = os.path.join(settings.BASE_DIR, newdoc_url)
    # print recording_path
    # print settings.BASE_DIR

    input_wav = wave.open(recording_path, "r")
    (nchannels, sampwidth, framerate, nframes, comptype,
     compname) = input_wav.getparams()
    frames = input_wav.readframes(nframes * nchannels)
    out = struct.unpack_from("%dh" % nframes * nchannels, frames)

    r = np.asarray(out, np.float64)

    # rate, data = scipy.io.wavfile.read(recording_path)
    # r = data.astype(np.float64)

    input_wav = wave.open(original_audio_path, "r")
    (nchannels, sampwidth, framerate, nframes, comptype,
     compname) = input_wav.getparams()
    frames = input_wav.readframes(nframes * nchannels)
    out = struct.unpack_from("%dh" % nframes * nchannels, frames)

    o = np.asarray(out, np.float64)

    # rate, data = scipy.io.wavfile.read(original_audio_path)
    # o = data.astype(np.float64)

    print len(o)
    print len(r)
    print 'applying adaptive filt'
    # Apply adaptive filter
    M = 100  #8000 (takelly long time) #100  # Number of filter taps in adaptive filter
    step = 0.1  # Step size
    y, e, w = adf.nlms(o, r, M, step, returnCoeffs=True)

    scaled_e = np.int16(e / np.max(np.abs(e)) * 32767)

    # d = room_simulate.room_sim(u)
    # scaled_d = np.int16(d/np.max(np.abs(d)) * 32767)

    # output_path = os.path.join(settings.MEDIA_ROOT, newdoc_filename+'TEST2.wav' )

    output_filename = os.path.splitext(os.path.basename(newdoc.file.url))[0]
    print output_filename

    output_path = os.path.join(settings.MEDIA_ROOT,
                               output_filename + '_SUBTRACTED.wav')
    # output_path = os.path.join(settings.MEDIA_ROOT, ['/Ghosts_TEST2.wav'] )

    output_wav = wave.open(output_path, "w")
    output_wav.setparams(
        (nchannels, sampwidth, framerate, nframes, comptype, compname))
    output_wav.writeframes(scaled_e)

    # write(output_path , 44100, scaled_e)

    f = open(output_path)
    output_file = File(f)

    newdoc3 = Audio(user=current_user, file=output_file)
    newdoc3.save()

    return output_path
Esempio n. 13
0
# Perform filtering
M = 60  # No. of taps to estimate
mu1 = 0.0008  # Step size 1 in LMS
mu2 = 0.0004  # Step size 1 in LMS
beta1 = 0.08  # Step size 2 in NLMS and AP
beta2 = 0.04  # Step size 2 in NLMS and AP
K = 3  # Projection order 1 in AP

# LMS
y_lms1, e_lms1, w_lms1 = adf.lms(u, d, M, mu1, returnCoeffs=True)
y_lms2, e_lms2, w_lms2 = adf.lms(u, d, M, mu2, returnCoeffs=True)
mswe_lms1 = adf.mswe(w_lms1, coeffs)
mswe_lms2 = adf.mswe(w_lms2, coeffs)

# NLMS
y_nlms1, e_nlms1, w_nlms1 = adf.nlms(u, d, M, beta1, returnCoeffs=True)
y_nlms2, e_nlms2, w_nlms2 = adf.nlms(u, d, M, beta2, returnCoeffs=True)
mswe_nlms1 = adf.mswe(w_nlms1, coeffs)
mswe_nlms2 = adf.mswe(w_nlms2, coeffs)

# AP
y_ap1, e_ap1, w_ap1 = adf.ap(u, d, M, beta1, K, returnCoeffs=True)
y_ap2, e_ap2, w_ap2 = adf.ap(u, d, M, beta2, K, returnCoeffs=True)
mswe_ap1 = adf.mswe(w_ap1, coeffs)
mswe_ap2 = adf.mswe(w_ap2, coeffs)

# Plot results
plt.figure()
plt.title('Convergence comparison of different adaptive filtering algorithms')
plt.plot(mswe_lms1, 'b', label='LMS with stepsize=%.4f' % mu1)
plt.plot(mswe_lms2, 'b--', label='LMS with stepsize=%.4f' % mu2)
Esempio n. 14
0
    def Removing_MotionArtifact(self, Int_FilterLength, Array_Signal):
        _,_,Array_NoiseReference = self.ConvertInvertFourier(Array_Signal=Array_Signal)
        Flt_StepSize = 0.1

        Array_NoiseEst, Array_NoiseRemoved, Array_FilterCoefs = adaptfilt.nlms(u=Array_NoiseReference,d=Array_Signal, M=Int_FilterLength, step=Flt_StepSize)
        return Array_NoiseRemoved
def estimate(y,y1,ref1):
    data2=[]
    sas=[]
    sdelay=[]
    ##nT=5                  ##samples in one time period
    Fs=62.5                 #500#sampling frequency in Hz, sampling interval=2ms
##    window=int(Fs)        ##samples per Beta
    window =30
    order=10
    boundary=window-1       ##correction for dividion
    length=len(y)
    wave=thinkdsp.Wave(ys=y,framerate=Fs)
    spectrum=wave.make_spectrum()
    spectrum_heart=wave.make_spectrum()
    spectrum_resp=wave.make_spectrum()

    fft_mag=list(np.absolute(spectrum.hs))
    fft_length= len(fft_mag)

    spectrum_heart.high_pass(cutoff=0.5,factor=0.001)
    spectrum_heart.low_pass(cutoff=4,factor=0.001)
    fft_heart=list(np.absolute(spectrum_heart.hs))
    spectrum_resp.high_pass(cutoff=0.15,factor=0)
    spectrum_resp.low_pass(cutoff=0.4,factor=0)
    fft_resp=list(np.absolute(spectrum_resp.hs))
    

    max_fft_heart=max(fft_heart)
    heart_sample=fft_heart.index(max_fft_heart)
    fund_freq=heart_sample*Fs/length*60
##    fund_freq-=0.2*fund_freq
    nT=int(fund_freq/60*Fs)
    max_fft_resp=max(fft_resp)
    resp_sample=fft_resp.index(max_fft_resp)
    rr=resp_sample*Fs/length*60
    
    ## FIRST ADAPTIVE FILTER
    x1,e1,w1=adf.nlms(ref1,y,order,1)
    x2,e2,w2=adf.nlms(ref1,y1,order,1)
    
    ## calculating fundamental time period of input signal x1
    length=len(x1)
    wave=thinkdsp.Wave(ys=x1,framerate=Fs)
    spectrum=wave.make_spectrum()
    spectrum_heart=wave.make_spectrum()
    spectrum_resp=wave.make_spectrum()

    fft_mag=list(np.absolute(spectrum.hs))
    fft_length= len(fft_mag)

    spectrum_heart.high_pass(cutoff=0.5,factor=0.001)
    spectrum_heart.low_pass(cutoff=4,factor=0.001)
    fft_heart=list(np.absolute(spectrum_heart.hs))
    spectrum_resp.high_pass(cutoff=0.15,factor=0)
    spectrum_resp.low_pass(cutoff=0.4,factor=0)
    fft_resp=list(np.absolute(spectrum_resp.hs))

    max_fft_heart=max(fft_heart)
    heart_sample=fft_heart.index(max_fft_heart)
    fund_freq=heart_sample*Fs/length*60
    nT=int(fund_freq/60*Fs)
    max_fft_resp=max(fft_resp)
    resp_sample=fft_resp.index(max_fft_resp)
    rr=resp_sample*Fs/length*60

    ##Second Filter algorithm

    ex1s=.0
    ex2s=.0
    ex1x2=.0
    esas=.0
    esdx1=.0
    esdx2=.0
    ess=.0
    esds=.0
    essd=.0
    rv=.8
    alpha=.0
    beta=[]
    length=length-length%window
    ## calculating first/initial rv
    for i in range(0,nT):
        sas.append(x1[i]-rv*x2[i])  ##getting values of sas till nT for sdelay
        ex1s=(ex1s*i+x1[i]**2)/(i+1)
        ex2s=(ex2s*i+x2[i]**2)/(i+1)
        ess=(ess*i+sas[i]**2)/(i+1)
        esdx1=(esdx1*i+x1[i]*sas[i])/(i+1)
        esdx2=(esdx2*i+x2[i]*sas[i])/(i+1)
        ex1x2=(ex1x2*i+x1[i]*x2[i])/(i+1)
        if((ex2s*esdx1 - ex1x2*esdx2)):
            rv = (ex1x2*esdx1 - ex1s*esdx2)/(ex2s*esdx1 - ex1x2*esdx2)    
    ##    print esdx1,ex2s,esdx1,ex1x2,esdx2
    ##    print (ex2s*esdx1 - ex1x2*esdx2),rv

    ##calculating rv,alpha and updating beta every 20 samples
    for i in range(nT,length-window):
        for j in range(0,window):    
            sas.append(x1[i]-rv*x2[i])
            ex1s=(ex1s*j+x1[i]**2)/(j+1)
            ex2s=(ex2s*j+x2[i]**2)/(j+1)
            ess=(ess*j+sas[i]**2)/(j+1)
            esdx1=(esdx1*j+x1[i]*sas[i-nT])/(j+1)
            esdx2=(esdx2*j+x2[i]*sas[i-nT])/(j+1)
            ex1x2=(ex1x2*j+x1[i]*x2[i])/(j+1)
        ##        esds=(esds*j+sas[i-nT]**2)/(j+1)
        ##        essd=(essd*j+sas[i]*sas[i-nT])/(j+1)
            i=i+1
            
        if((ex2s*esdx1 - ex1x2*esdx2)):
            rv = (ex1x2*esdx1 - ex1s*esdx2)/(ex2s*esdx1 - ex1x2*esdx2)
        ##  b = essd/esds
        alpha = (ex1s-rv*ex1x2)/ess
        for j in range(0,window):
            beta.append((rv*alpha)/(1-alpha))
    ##    print beta
        i=i-1

    venous_ref=[]
    for i in range(0,length-nT):
        venous_ref.append(x1[nT+i]- beta[i]*x2[nT+i])

        
    xf1,ef1,wf1=adf.nlms(venous_ref,x2[nT:],2*order,1)

    data2= data2 + list(xf1)

    length=len(data2)
    wave=thinkdsp.Wave(ys=data2,framerate=Fs)
    spectrum=wave.make_spectrum()
    spectrum_heart=wave.make_spectrum()
    spectrum_resp=wave.make_spectrum()

    fft_mag=list(np.absolute(spectrum.hs))
    fft_length= len(fft_mag)

    spectrum_heart.high_pass(cutoff=0.7,factor=0.001)
    spectrum_heart.low_pass(cutoff=4,factor=0.001)
    fft_heart=list(np.absolute(spectrum_heart.hs))
    spectrum_resp.high_pass(cutoff=0.15,factor=0)
    spectrum_resp.low_pass(cutoff=0.4,factor=0)
    fft_resp=list(np.absolute(spectrum_resp.hs))

    max_fft_heart=max(fft_heart)
    heart_sample=fft_heart.index(max_fft_heart)
    fund_freq=heart_sample*Fs/length*60
    nT=int(fund_freq/60*Fs)
    max_fft_resp=max(fft_resp)
    resp_sample=fft_resp.index(max_fft_resp)
    rr=resp_sample*Fs/length*60
##    rr=10.2437645098
    print "Heart rate       [normal 60-100]                       : ", fund_freq,'BPM'
    print "Respiration Rate [normal 10-20]                        : ", rr, "RPM"
    return fund_freq
Esempio n. 16
0
def adaptive_noise_cancellation(noise, signal, filter_size=64, learning_rate=0.1, normalized=True):
    if normalized:
        y, e, w = adaptfilt.nlms(noise, noise + signal, filter_size, learning_rate)
    else:
        y, e, w = adaptfilt.lms(noise, noise + signal, filter_size, learning_rate)
    return numpy.concatenate((e[0:filter_size - 1], e))
Esempio n. 17
0
for value in valuesfile:
    #print value
    if value is '':
        break
    a= value.split(' ')
    if a[0] is '' or a[1] is '':break
    sensor=float(a[0])
    sensor1=float(a[1])
    ref.append(sensor1-sensor)
    data.append(sensor)
    data1.append(sensor1)

length=len(data)
print "Length:", length

x1,e1,w1=adf.nlms(ref,data,5,1)
x2,e2,w2=adf.nlms(ref,data1,5,1)
ex1s=.0
ex2s=.0
ex1x2=.0
esas=.0
esdx1=.0
esdx2=.0
ess=.0
esds=.0
essd=.0
rv=0.5
alpha=.0
beta=[]
for i in range(0,20):
    sas.append(data[i]-rv*data1[i])
Esempio n. 18
0
def subtract(newdoc, newdoc2, current_user):


    # Connect to Amazon S3
    conn = S3Connection(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
    bucket = conn.get_bucket(settings.AWS_STORAGE_BUCKET_NAME) 
    k = Key(bucket)


    # get filenames of raw audio and music to be subtracted
    raw_audio_filename = os.path.basename(newdoc.file.url)
    original_audio_filename = os.path.basename(newdoc2.file.url)

    # Create upload paths to S3 for mixed audio
    # raw_audio_s3_path = 'https://s3-us-west-2.amazonaws.com/audiofiles1234/'+raw_audio_filename
    # original_audio_s3_path = 'https://s3-us-west-2.amazonaws.com/audiofiles1234/'+original_audio_filename

    # Upload raw input to S3
    k.key = raw_audio_filename#testes#output_filename     # for now, key for bucket is filename (might want to change this in case of duplicates)
    k.set_contents_from_file(newdoc.file, rewind=True)
    k.make_public()

    # Upload original audio to S3
    k.key = original_audio_filename#testes#output_filename     # for now, key for bucket is filename (might want to change this in case of duplicates)
    k.set_contents_from_file(newdoc2.file, rewind=True)
    k.make_public()


    # Load file from AWS S3 
    url = "https://s3-us-west-2.amazonaws.com/audiofiles1234/Ghosts_echoed_RIR_noise_testfile.wav"
    recorded_audio_file = urllib2.urlopen(url)
    input_wav = wave.open (recorded_audio_file, "r")
    # input_wav = wave.open (newdoc.file, "r") #attempt (failed?) at opening file directly from filefield object

    # input_wav = wave.open (recording_path, "r")     # READING FROM DISK METHOD (PUT BACK IN?)

    # Read wav file into numpy array
    (nchannels, sampwidth, framerate, nframes, comptype, compname) = input_wav.getparams ()
    frames = input_wav.readframes (nframes * nchannels)
    out = struct.unpack_from ("%dh" % nframes * nchannels, frames)

    r = np.asarray(out, np.float64)


    # Download file from AWS S3 
    url = "https://s3-us-west-2.amazonaws.com/audiofiles1234/GhostsNStuff_mono_4s.wav"
    original_audio_file = urllib2.urlopen(url)
    input_wav = wave.open (original_audio_file, "r")

    # input_wav = wave.open (original_audio_path, "r")    # READING FROM DISK METHOD (PUT BACK IN?)

    # Read wav file into numpy array
    (nchannels, sampwidth, framerate, nframes, comptype, compname) = input_wav.getparams ()
    frames = input_wav.readframes (nframes * nchannels)
    out = struct.unpack_from ("%dh" % nframes * nchannels, frames)

    o = np.asarray(out, np.float64)


    # print len(o)
    # print len(r)
    # print 'applying adaptive filt'

    # Apply adaptive filter
    M = 100 #8000 (takelly long time) #100  # Number of filter taps in adaptive filter
    step = 0.1  # Step size
    y, e, w = adf.nlms(o, r, M, step, returnCoeffs=True)

    scaled_e = np.int16(e/np.max(np.abs(e)) * 32767)

    # Create output filename
    output_filename = os.path.splitext(os.path.basename(newdoc.file.url))[0]
    print output_filename
    output_filename_s3 = output_filename+'_SUBTRACTED_TEST.wav'

    # output_path = os.path.join(settings.MEDIA_ROOT,output_filename+'_SUBTRACTED.wav')
    output_path = 'https://s3-us-west-2.amazonaws.com/audiofiles1234/'+output_filename_s3


    # Write output wav to S3
    output_file = open(output_filename_s3, "w+")   # create pointer to file object

    output_wav = wave.open (output_file, "w")       
    output_wav.setparams((nchannels, sampwidth, framerate, nframes, comptype, compname))
    output_wav.writeframes(scaled_e)

    # output_wav.seek(0)
    # output_filename_s3 = output_filename+'TEST'
    k.key = output_filename_s3#testes#output_filename     # for now, key for bucket is filename (might want to change this in case of duplicates)
    k.set_contents_from_file(output_file, rewind=True)
    k.make_public()


    # write(output_path , 44100, scaled_e)

    # f = open(output_path)
    # output_file = File(f)

    # newdoc3 = Audio(user = current_user, file = output_file)
    # newdoc3.save()
	
    return output_path