Example #1
2
def problem4():
	# read in tada.wav
	rate, tada = wavfile.read('tada.wav')
	
	# upon inspection, we find that tada.wav is a stereo audio file. 
	# we create stereo white noise that lasts 10 seconds
	L_white = sp.int16(sp.random.randint(-32767,32767,rate*10))
	R_white = sp.int16(sp.random.randint(-32767,32767,rate*10))
	white = sp.zeros((len(L_white),2))
	white[:,0] = L_white
	white[:,1] = R_white
	
	# pad tada signal with zeros
	padded_tada = sp.zeros_like(white)
	padded_tada[:len(tada)] = tada
	ptada = padded_tada
	
	# fourier transforms
	ftada = sp.fft(ptada,axis=0)
	fwhite = sp.fft(white,axis=0)
	
	# inverse transform of convolution
	out = sp.ifft((ftada*fwhite),axis=0)
	
	# prepping output and writing file
	out = sp.real(out)
	scaled = sp.int16(out / sp.absolute(out).max() * 32767)
	wavfile.write('my_tada_conv.wav',rate,scaled)
Example #2
1
def getMSD(d1, d2):
    diff = 0
    d11 = []
    d22 = []
    
    index = 0
    for index in xrange(9):
        chunk = d1[index * 22050: index * 22050 + 22050]
        x = cp.fft(chunk)
        maximum1 = max(x)
        d11.append(maximum1)
         
    index = 0
    for index in xrange(9):
        chunk = d2[index * 22050: index * 22050 + 22050]
        x = cp.fft(chunk)
        maximum1 = max(x)
        d22.append(maximum1)
    
    currentMax1 = max(d11)
    currentMax2 = max(d22)
    
    for x in xrange(len(d11)):
#         diff += abs(d11[x]/currentMax1 - d22[x]/currentMax2) * abs(d11[x]/currentMax1 - d22[x]/currentMax2)
        diff += abs(d11[x] - d22[x]) * abs(d11[x] - d22[x])

    diff /= len(d11)
    
    return diff
Example #3
1
def get_envelope(R,dim=1):
    """
    Returns the complex version of the input signal R.
    @param R: The input data matrix.
    @param dim: The dimension along which the envelope is to be taken. default: dim=1
    """
    if dim==0:
        R=R.T
    if len(R.shape)==1:
        freqs=scipy.fft(R)
        length=len(R)/2
        freqs[length:]=0
        freqs[1:length]=2*freqs[1:length]
        ## freqs[1:length]=freqs[1:length]
        
        env=scipy.ifft(freqs)
    else:
        freqs=scipy.fft(R)
        length=R.shape[dim]/2
        #Something is fishy here:
        freqs[:,length:]=0
        freqs[:,1:length]=2*freqs[0,1:length]
        ## freqs[:,1:length]=freqs[0,1:length]
        
        env=scipy.ifft(freqs)
        if dim==0:
            return env.T

    return env
Example #4
0
def BodefromTwoTimeDomainVectors(timevector,output,input,truncfreq=100):
    """This function calculates the Bode response between two time
    domain signals.  The timevector is used to calculate the frequency
    vector, which is then used to truncate the Bode response to reduce
    calculation time and return only useful information.  Input and
    output are time domain vectors.

    The return values are
    freq, magnitude ratio, phase, complex

    The goal of this function is to be useful for small amounts of
    data and/or as part of a routine to calculate a Bode response from
    fixed sine data."""

    N=len(timevector)
    f=makefreqvect(timevector)
    co=thresh_py(f,truncfreq)
    f=f[0:co]
    curin_fft=fft(input,None,0)*2/N
    curout_fft=fft(output,None,0)*2/N
    curin_fft=curin_fft[0:co]
    curout_fft=curout_fft[0:co]
    curGxx=norm2(curin_fft)
    curGyy=norm2(curout_fft)
    curGxy=scipy.multiply(scipy.conj(curin_fft),curout_fft)
    H=scipy.divide(curGxy,curGxx)
    Hmag=abs(H)
    Hphase=mat_atan2(scipy.imag(H),scipy.real(H))*180.0/pi
    return f,Hmag,Hphase,H
def log_spectrum_distance(s,shat,winfunc):
    size = min(len(s),len(shat))
    window = winfunc(size)
    s = s[0:size]
    shat = shat[0:size]
    s_amp = sp.absolute(sp.fft(s*window))
    shat_amp = sp.absolute(sp.fft(shat*window))
    return sp.sqrt(sp.mean((sp.log10(s_amp / shat_amp)*10.0)**2.0))
def itakura_saito_spectrum_distance(s,shat,winfunc):
    size = min(len(s),len(shat))
    window = winfunc(size)
    s = s[0:size]
    shat = shat[0:size]
    s_amp = sp.absolute(sp.fft(s*window))
    shat_amp = sp.absolute(sp.fft(shat*window))
    return sp.mean(sp.log(s_amp / shat_amp) + (shat_amp/s_amp) - 1.0)
Example #7
0
def prob5():
	rate, sig = wavfile.read('tada.wav')
	sig = sp.float32(sig)
	noise = sp.float32(sp.random.randint(-32767,32767,sig.shape))
	out = sp.ifft(sp.fft(sig)*sp.fft(noise))
	out = sp.real(out)
	out = sp.int16(out/sp.absolute(out).max() * 32767)
	wavfile.write('white-conv.wav',rate,out)
Example #8
0
    def fourierTransform(self, fromPos, toPos, only = []):
        self.checkToPos(toPos)
        if len(only) > 0:
            self.allFid[toPos] = np.array([fftshift(fft(self.allFid[fromPos][fidIndex])) for fidIndex in only])
        else:
            self.allFid[toPos] = np.array([fftshift(fft(fid)) for fid in self.allFid[fromPos]])

        self.frequency = np.linspace(-self.sweepWidthTD2/2,self.sweepWidthTD2/2,len(self.allFid[fromPos][0]))
Example #9
0
def fftconv(x, y):
    """ Convolution of x and y using the FFT convolution theorem. """
    n = np.int(np.round(2 ** np.ceil(np.log2(len(x))))) + 1
    X, Y, x_y = fft(x, n), fft(y, n), []
    for i in range(n):
        x_y.append(X[i] * Y[i])

    # Returns the inverse Fourier transform with padding correction
    return fft.ifft(x_y)[4:len(x)+4]
def algoChannelSelection(left, right):

    ''' Algorithm which automatically selects the channel with dominant vocals from a stereo flamenco recording
    based on spectral band energies as described in section 2-A-I of

    Kroher, N. & Gomez, E. (2016). Automatic Transcription of Flamenco Singing from Polyphonic Music Recordings.
    ACM / IEEE Transactions on Audio, Speech and Language Processing, 24(5), pp. 901-913.

    :param left: samples of the left audio channel in 44.1kHz
    :param right: samples of the right audio channel in 44.1kHz
    :return: index of the dominant vocal channel (0 = left, 1 = right)
    '''

    # PARAMETERS
    fs = 44100 # sample rate
    wSize = 2048 # window size in samples
    hSize = 2048 # hop size in samples
    fftSize = 2048 # FFT size
    freqGuitLow = 80.0 # lower bound for guitar band
    freqGuitHigh = 400.0 # upper bound for guitar band
    freqVocLow = 500.0 # lower bound for vocal band
    freqVocHigh = 6000.0 # higher bound for vocal band

    # INIT
    window = hanning(wSize)
    numFrames = int(math.floor(float(len(left))/float(wSize)))
    # bin indices corresponding to freqeuncy band limits
    indGuitLow = int(round((freqGuitLow/fs)*fftSize))
    indGuitHigh = int(round((freqGuitHigh/fs)*fftSize))
    indVocLow = int(round((freqVocLow/fs)*fftSize))
    indVocHigh = int(round((freqVocHigh/fs)*fftSize))

    # frame-wise computation of the spectral band ratio
    sbrL = []
    sbrR = []
    for i in range(0,numFrames-100):
        frameL = left[i*hSize:i*hSize+wSize]
        specL = fft(frameL*window) / fftSize
        specL = abs(specL * conj(specL))
        guitMag = sum(specL[indGuitLow:indGuitHigh],0)
        vocMag = sum(specL[indVocLow:indVocHigh],0)
        sbrL.append(20*math.log10(vocMag/guitMag))
        frameR = right[i*hSize:i*wSize+wSize]
        specR = fft(frameR*window) / fftSize
        specR = abs(specR * conj(specR))
        guitMag = sum(specR[indGuitLow:indGuitHigh],0)
        vocMag = sum(specR[indVocLow:indVocHigh],0)
        sbrR.append(20*math.log10(vocMag/guitMag))

    # select channel based on mean SBR
    if mean(sbrL)>=mean(sbrR):
        ind = 0
    else:
        ind = 1

    return ind
Example #11
0
 def calculation(self, data):
     if(isinstance(data[0], types.ListType)):
         freq = [np.abs(fft(l)/len(l))[1:len(l)/2] for l in data]
         freq = [n.tolist() for n in freq]
     else:
         l = data
         freq = np.abs(fft(l)/len(l))[1:len(l)/2]
         #freq = [[v] for v in freq]
         #freq = freq.tolist()
     return freq  # .tolist()
Example #12
0
def CalcSpectra(x,y, input=None, output=None):
    """Calculate Gxx, Gyy, and Gxy.  Note that input and output are
    just labels.  x and y are time domain signals."""
    N = max(shape(x))
    x_fft = squeeze(fft(x, None, 0)*2/N)
    y_fft = squeeze(fft(y, None, 0)*2/N)
    Gxx = norm2(x_fft)
    Gyy = norm2(y_fft)
    Gxy = (scipy.conj(x_fft))*y_fft
    return Spectra(input, output, Gxx, Gyy, Gxy)
Example #13
0
def prob3():
	rate1,sig1 = wavfile.read('chopinw.wav')
	n = sig1.shape[0]
	rate2,sig2 = wavfile.read('balloon.wav')
	m = sig2.shape[0]
	sig1 = sp.append(sig1,sp.zeros((m,2)))
	sig2 = sp.append(sig2,sp.zeros((n,2)))
	f1 = sp.fft(sig1)
	f2 = sp.fft(sig2)
	out = sp.ifft((f1*f2))
	out = sp.real(out)
	scaled = sp.int16(out/sp.absolute(out).max() * 32767)
	wavfile.write('test.wav',rate1,scaled)
Example #14
0
def makeIR(wav_in,wav_out,fs,duration,noise=0.025):
    """ measures the response of a speaker (+amp+mic) and build an IR """
    # step 1: full duplex playback and recording. Input: provided sweep wav file
    # output: recorded time response
    ecasound_cmd="ecasound -f:16,1,%i -a:1 -i jack,system,capture " + \
    " -o /tmp/capture.wav -a:2 -i %s -o jack,system -t %i"
    ecasound_cmd=ecasound_cmd%(int(fs),wav_in,int(duration))
    # run capture    
    os.system(ecasound_cmd)
    # load input and capture wave files 
    time.sleep(3)
    f=wave.open(wav_in,'rb')
    len1=f.getnframes()
    #nc1=f.getnchannels()
    #bp1=f.getsampwidth()
    data=f.readframes(len1)
    f.close()
    Y1=scipy.float32(scipy.fromstring(data,dtype='int16'))
    f=wave.open('/tmp/capture.wav','rb')
    len2=f.getnframes()
    #nc1=f.getnchannels()
    #bp1=f.getsampwidth()
    data=f.readframes(len2)
    f.close()    
    Y2=scipy.float32(scipy.fromstring(data,dtype='int16'))
    # truncate and normalize wave file 
    #(or we could pad the shortest to the longest... TODO!)
    minlen = min([len1,len2])
    Y2=Y2[0:minlen]
    Y2=Y2/max(abs(Y2))
    Y1=Y1[0:minlen]
    Y1=Y1/max(abs(Y1))
    # compute frequency response function as ration of both spectra
    FRF=scipy.fft(Y2)/scipy.fft(Y1)
    # compute impulse response as inverse FFT of FRF
    IRraw=scipy.real(scipy.ifft(FRF))
    # get rid of initial lag in IR
    thr=max(abs(IRraw))*noise
    offset=max([0 , min(min(scipy.where(abs(IRraw)>thr)))-5 ])
    IR=IRraw[offset:-1] 
    IRnorm=IR/max(abs(IR))
    # TODO: add post pro options such as low/high pass and decay
    # write output IR
    f = wave.open(wav_out, 'w')
    f.setparams((1, 2, fs, 0, 'NONE', 'not compressed'))
    maxVol=2**15-1.0 #maximum amplitude
    wvData=""
    for i in range(len(IRnorm)):
        wvData+=pack('h', maxVol*IRnorm[i])
    f.writeframes(wvData)
    f.close()
Example #15
0
def pCodePhaseSearch(sample, LOFreq, PRNSpectrumConjugate):

    """ Search in a given LO freq space all the code phases
        at once

    """

    I, Q, _ = generateIQ(sample.size, LOFreq)

    # mix is down with the LO
    sampledMixedI = sample * I
    sampledMixedQ = sample * Q

    # munge them into a single array of complex numbers for the fft
    combinedMixed = sampledMixedI + 1j*sampledMixedQ

    # do the fft
    signalSpectrum = scipy.fft(combinedMixed)

    # circulator correlation in da frequency space
    correlatedSpectrum = signalSpectrum * PRNSpectrumConjugate

    # and back to time domain
    timeDomainReconstructed = np.abs(scipy.ifft(correlatedSpectrum))**2

    return timeDomainReconstructed
def plotSpectrum(y,Fs,image_name):
    """
    Plots a Single-Sided Amplitude Spectrum of y(t)
    """
    n = len(y) # length of the signal
    subplot(2,1,1)
    
    plot(arange(0,n),y)
    xlabel('Time')
    ylabel('Amplitude')
    subplot(2,1,2)
    k = arange(n)
    T = n/Fs
    frq = k/T # two sides frequency range
    frq = frq[range(n/2)] # one side frequency range

    Y = fft(y)/n # fft computing and normalization
    Y = Y[range(n/2)]

    plot(frq,abs(Y),'r') # plotting the spectrum
    xlabel('Freq (Hz)')
    ylabel('|Y(freq)|')
    print "here"
    #show()
    savefig(image_name,dpi=110)
Example #17
0
    def setFilterParams(self, form='low', freqbw=0.15, order=5, tnum=0, useCached=False):
        if useCached:
            freqbw = self._freqbw
            form = self._form
            order = self._order
        else:
            self._freqbw = freqbw
            self._form = form
            self._order = order

        try:
            fftdata = scipy.fft(self._traceSource.getTrace(tnum))
            fftdata = abs(fftdata[2:(len(fftdata) / 2)])
            maxindx = fftdata.argmax() + 2
            centerfreq = float(maxindx) / float(len(fftdata) + 2)
            # centerfreq in range 0 - 1 now

        except:
#            print "FFT FAILED!!!"
            self.b = None
            self.a = None
            return

        freq = ((centerfreq - (centerfreq * freqbw) / 2), (centerfreq + (centerfreq * freqbw) / 2))

        logging.info('Designing filter for passband: %f-%f' % (freq[0], freq[1]))

        b, a = sp.signal.butter(order, freq, form)
        self.b = b
        self.a = a
Example #18
0
def compute_spectrum(signal, rate):
    n = len(signal)
    k = np.arange(n)
    T = n/rate
    freq = k/T
    Y = sp.fft(signal)/n
    return freq, Y
    def analyzeSound(self):
        """ highlights N first peaks in frequency diagram
        """
        # on recharge les données 
        data = self.data
        sample_freq = self.sample_freq
        from scipy.fftpack import fftfreq
        freq_vect = fftfreq(data.size) * sample_freq
        
        # on trouve les maxima
        y0 = abs(fft(data))
#        y1 = abs(fft(data[:, 1]))
        maxi0 = ((diff(sign(diff(y0))) < 0) & (y0[1:-1] > y0.max()/10.)).nonzero()[0] + 1 # local max
        # maxi1 = ((diff(sign(diff(y1))) < 0) & (y1[1:-1] > y1.max()/10.)).nonzero()[0] + 1 # local max
        
        # fréquence
        ax = self.main_figure.figure.add_subplot(212)
        ax.plot(freq_vect[maxi0], y0[maxi0], "o")
        # ax.plot(freq_vect[maxi1], y1[maxi1], "o")
        
        # annotations au dessus d'une fréquence de coupure
        fc = 100
        for point in maxi0[(freq_vect[maxi0] > fc).nonzero()][:self.ui.spinBox.value()]:
            plt.annotate("%.2f" % freq_vect[point], (freq_vect[point], y0[point]))
#        for point in maxi1[(freq_vect[maxi0] > fc).nonzero()][:self.ui.spinBox.value()]:
#            plt.annotate("%.2f" % freq_vect[point], (freq_vect[point], y1[point]))
        
        self.ui.main_figure.canvas.draw()
def save_plotSpectrum(y,Fs,image_name):
    """
    Plots a Single-Sided Amplitude Spectrum of y(t)
    """
    fig = Figure(linewidth=0.0)
    fig.set_size_inches(fig_width,fig_length, forward=True)
    Figure.subplots_adjust(fig, left = fig_left, right = fig_right, bottom = fig_bottom, top = fig_top, hspace = fig_hspace)
    n = len(y) # length of the signal

    _subplot = fig.add_subplot(2,1,1)        
    print "Fi"
    _subplot.plot(arange(0,n),y)
    xlabel('Time')
    ylabel('Amplitude')
    _subploti_2=fig.add_subplot(2,1,2)
    k = arange(n)
    T = n/Fs
    frq = k/T # two sides frequency range
    frq = frq[range(n/2)] # one side frequency range

    Y = fft(y)/n # fft computing and normalization
    Y = Y[range(n/2)]

    _subplot_2.plot(frq,abs(Y),'r') # plotting the spectrum
    xlabel('Freq (Hz)')
    ylabel('|Y(freq)|')
    print "here"
    canvas = FigureCanvasAgg(fig)
    if '.eps' in outfile_name:
        canvas.print_eps(outfile_name, dpi = 110)
    if '.png' in outfile_name:
        canvas.print_figure(outfile_name, dpi = 110)
Example #21
0
def load_validation_set():
    """
    Output
        a tuple of features: (fft features, mfcc features, mean-std features)
    Description
        extracts three types of features from validation set.
    """
    ffts = dict()
    mfccs = dict()
    mean_stds = dict()

    for i in validation_ids:
        path = './validation/validation.{i}.wav'.format(i=i)

        _, X = read_wav(path)

        # FFT
        fft = np.array(abs(sp.fft(X)[:1000]))
        ffts.update({i: fft})

        # MFCC
        ceps, mspec, spec = mfcc(X)
        num_ceps = len(ceps)
        x = np.mean(ceps[int(num_ceps*1/10):int(num_ceps*9/10)], axis=0)
        mfccs.update({i: x})


        # Mean-Std
        [Fs, x] = audioBasicIO.readAudioFile(path);
        F = audioFeatureExtraction.stFeatureExtraction(x, Fs, 0.050*Fs, 0.025*Fs);
        mean_std = []
        for f in F:
            mean_std.extend([f.mean(), f.std()])
        mean_stds.update({i: np.array(mean_std)})
    return (ffts, mfccs, mean_stds)
Example #22
0
def printer(function):
    window[function] = eval("scipy.signal.%s(size)" % function)
    pylab.plot(numpy.abs(window[function]))
    pylab.xlim(0, size - 1)
    pylab.savefig("%s-%03d.png" % (function, speed))
    pylab.close()

    fft[function] = scipy.fft(window[function])
    pylab.loglog(numpy.abs(fft[function])[0 : size / 2 + 1])
    pylab.savefig("%s-fft-%03d.png" % (function, speed))
    pylab.close()

    sinus[function] = scipy.fft(y * window[function])
    pylab.loglog(numpy.abs(sinus[function])[0 : size / 2 + 1])
    pylab.savefig("%s-sinus-%03d.png" % (function, speed))
    pylab.close()
 def _fft(self, data):
     data_length = len(data)
     frequencies = scipy.fft(data) / data_length
     frequencies = frequencies[range(data_length / 2)]
     frequencies[0] = 0
     frequencies = np.abs(frequencies)
     return frequencies
Example #24
0
    def plot_spectrum(self, ax=None):
        """
        Plots spectral plot of power data
        http://www.itl.nist.gov/div898/handbook/eda/section3/spectrum.htm

        Code borrowed from:
        http://glowingpython.blogspot.com/2011/08/how-to-plot-frequency-spectrum-with.html

        Returns
        -------
        matplotlib.axis
        """ 
        if ax is None:
            ax = plt.gca()
        Fs = 1.0/self.sample_period()
        for power in self.power_series():
            n = len(power.values) # length of the signal
            k = np.arange(n)
            T = n/Fs
            frq = k/T # two sides frequency range
            frq = frq[range(n//2)] # one side frequency range

            Y = fft(power)/n # fft computing and normalization
            Y = Y[range(n//2)]

            ax.plot(frq,abs(Y)) # plotting the spectrum

        ax.set_xlabel('Freq (Hz)')
        ax.set_ylabel('|Y(freq)|')
        return ax
def returnSpectrum(y,Fs):
    """
    Plots a Single-Sided Amplitude Spectrum of y(t)
    y = wavefile in array form
    Fs = sample frequency, sometimes called sampling rate; likely 44100
    """

    n = len(y) # length of the signal
    k = arange(n) # array from [0, 1, 2, ... n-1]
    T = n/float(Fs) # need to make Fs a float so T can be 0.xx decimal

    if y.ndim > 1: # stereo music has more than one dimension for stereo sound
        y = y[:,0] # taking 1st dimension, slice of matrix

    frq = k/T # two sides frequency range
    # take array, divide by num secs
    frq = frq[range(n/2)] # one side frequency range

    Y = fft(y)/n # fft computing and normalization
    Y = Y[range(n/2)]

    zipped =  zip(frq, abs(Y)) # takes 2 arrays of same size & combines

    frequencies = dict(zipped)
    return frequencies
Example #26
0
def test():
    run_c("fft.c")
    x_re = [float(i) for i in open("x_re")]
    x_im = [float(i) for i in open("x_im")]
    fft_x_re = [float(i) for i in open("fft_x_re")]
    fft_x_im = [float(i) for i in open("fft_x_im")]

    time_complex = [i + (j*1.0) for i, j in zip(x_re, x_im)]
    numpy_complex = s.fft(time_complex)
    numpy_magnitude = n.abs(numpy_complex)

    chips_complex = [i + (j*1.0j) for i, j in zip(fft_x_re, fft_x_im)]
    chips_magnitude = n.abs(chips_complex)

    f, subplot = pyplot.subplots(3, sharex=True)

    pyplot.subplots_adjust(hspace=1.0)
    subplot[0].plot(x_re, 'g')
    subplot[1].plot(numpy_magnitude, 'r')
    subplot[2].plot(chips_magnitude, 'b')
    pyplot.xlim(0, 1023)
    subplot[0].set_title("Time Domain Signal (64 point sine)")
    subplot[1].set_title("Frequency Spectrum - Numpy")
    subplot[2].set_title("Frequency Spectrum - Chips")
    subplot[0].set_xlabel("Sample")
    subplot[1].set_xlabel("Sample")
    subplot[2].set_xlabel("Sample")
    pyplot.savefig("../docs/source/examples/images/example_5.png")
    pyplot.show()
Example #27
0
def standard_plot(data, rate):
    sample_length = len(data)
    k = arange(sample_length)
    period = sample_length / rate
    freqs = (k / period)[range(sample_length / 2)] #right-side frequency range
    Y = (fft(data) / sample_length)[range(sample_length / 2)]
    semilogy(freqs, abs(Y)) # plotting the spectrum
Example #28
0
def get_audio_data():
    global data, stream

    if stream is None:
        pa = pyaudio.PyAudio()
        stream = pa.open(
            format=pyaudio.paInt16,
            channels=1,
            rate=SAMPLING_RATE,
            input=True,
            frames_per_buffer=NUM_SAMPLES
        )

    while True:
        try:
            raw_data  = np.fromstring(stream.read(NUM_SAMPLES), dtype=np.int16)
            signal = raw_data / 32768.0
            fft = sp.fft(signal)
            spectrum = abs(fft)[:NUM_SAMPLES/2]
            power = spectrum**2
            bins = [simps(a) for a in np.split(power, 16)]
            new_data = signal.tolist(), spectrum.tolist(), bins
            with mutex:
                data = new_data
        except:
            with mutex:
                data = None
Example #29
0
def hanning_standard_plot(data, rate):
    sample_length = len(data)
    k = arange(sample_length)
    period = sample_length / rate
    freqs = (k / period)[range(sample_length / 2)] #right-side frequency range
    Y = (fft(data * np.hanning(sample_length)) / sample_length)[range(sample_length / 2)]
    semilogy(freqs, abs(Y))
Example #30
0
 def get_amplitude(self, signal, l):
     if self.amplitude.has_key(l):
         return self.amplitude[l]
     else:
         amp = sp.absolute(sp.fft(get_frame(signal, self.winsize, l) * self.window))
         self.amplitude[l] = amp
         return amp
Example #31
0
# sampling rate
Ts = 1.0 / Fs
# sampling interval
t = arange(0, 1, Ts)  # time vector

ff = 30
# frequency of the test signal
y = sin(2 * pi * ff * t)
y = y + .6 * sin(4 * pi * ff * t) + .4 * sin(8 * pi * ff * t) + .2 * sin(
    12 * pi * ff * t)  #summing sines

win = signal.hann(6)  #LP Filter using hanning window
yFilt = signal.convolve(y, win, mode='same') / sum(win)

#compute FFT
yfft = fft(y)
FFT = abs(sp.fft(y))
freqs = sp.fftpack.fftfreq(t.size, t[1] - t[0])
#replace negative values
FFT[abs(FFT) < 0.000001] = 0  # some zeros
FFT = np.ma.masked_equal(FFT, 0)

pitchDetection(y, t, 'Normal')
pitchDetection(yFilt, t, 'Filtered')

pylab.subplot(212)
pylab.plot(abs(freqs.real), sp.log10(FFT), 'x')

subplot(2, 1, 1)
plot(t, y, 'g')
plot(t, yFilt, 'r')
Example #32
0
     color='g',
     linestyle='dashed')
grid()
subplot(212)
ylabel(r'$\bar{u}^s$')
xlabel(r't')
plt.ylim(-2.1, 2.1)
plt.xlim(xmin=.49)
p1 = plot(dataPlot[4900:, 0], dataPlot[4900:, 3])
#p2 = plot(dataPlot[4900:, 0], np.sin(50*dataPlot[4900:, 0]))
#plt.legend((p1[0], p2[0]), (r'$\bar{u}^s(t)$', r'$-\rho(t)$'), ncol=2)
savefig('esmc_sigma_u_z')

u_z = dataPlot[4900:, 3]
n = len(u_z)
Y = scipy.fft(dataPlot[4900:, 3]) / n
k = arange(n)
T = n * h
frq = k / T
frq = frq[list(range(n / 2))]
Y = Y[list(range(n / 2))]
plot(frq, abs(Y), 'r')
xlabel(r'freq (Hz)')
title(r'Frequency spectrum of $\bar{u}^s$')
savefig('esmc_u_freq.png')
# TODO
# compare with the reference
#ref = getMatrix(SimpleMatrix("result.ref"))
#if (norm(dataPlot - ref[1:,:]) > 1e-12):
#    print("Warning. The result is rather different from the reference file.")
Example #33
0
def peakdetect_fft(y_axis, x_axis, pad_len = 5):
    """
    Performs a FFT calculation on the data and zero-pads the results to
    increase the time domain resolution after performing the inverse fft and
    send the data to the 'peakdetect' function for peak 
    detection.
    
    Omitting the x_axis is forbidden as it would make the resulting x_axis
    value silly if it was returned as the index 50.234 or similar.
    
    Will find at least 1 less peak then the 'peakdetect_zero_crossing'
    function, but should result in a more precise value of the peak as
    resolution has been increased. Some peaks are lost in an attempt to
    minimize spectral leakage by calculating the fft between two zero
    crossings for n amount of signal periods.
    
    The biggest time eater in this function is the ifft and thereafter it's
    the 'peakdetect' function which takes only half the time of the ifft.
    Speed improvementd could include to check if 2**n points could be used for
    fft and ifft or change the 'peakdetect' to the 'peakdetect_zero_crossing',
    which is maybe 10 times faster than 'peakdetct'. The pro of 'peakdetect'
    is that it resutls in one less lost peak. It should also be noted that the
    time used by the ifft function can change greatly depending on the input.
    
    keyword arguments:
    y_axis -- A list containg the signal over which to find peaks
    x_axis -- A x-axis whose values correspond to the y_axis list and is used
        in the return to specify the postion of the peaks.
    pad_len -- (optional) By how many times the time resolution should be
        increased by, e.g. 1 doubles the resolution. The amount is rounded up
        to the nearest 2 ** n amount (default: 5)
    
    return -- two lists [max_peaks, min_peaks] containing the positive and
        negative peaks respectively. Each cell of the lists contains a tupple
        of: (position, peak_value) 
        to get the average peak value do: np.mean(max_peaks, 0)[1] on the
        results to unpack one of the lists into x, y coordinates do: 
        x, y = zip(*tab)
    """
    # check input data
    x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis)
    zero_indices = zero_crossings(y_axis, window = 11)
    #select a n amount of periods
    last_indice = - 1 - (1 - len(zero_indices) & 1)
    # Calculate the fft between the first and last zero crossing
    # this method could be ignored if the begining and the end of the signal
    # are discardable as any errors induced from not using whole periods
    # should mainly manifest in the beginning and the end of the signal, but
    # not in the rest of the signal
    fft_data = fft(y_axis[zero_indices[0]:zero_indices[last_indice]])
    padd = lambda x, c: x[:len(x) // 2] + [0] * c + x[len(x) // 2:]
    n = lambda x: int(log(x)/log(2)) + 1
    # padds to 2**n amount of samples
    fft_padded = padd(list(fft_data), 2 ** 
                n(len(fft_data) * pad_len) - len(fft_data))
    
    # There is amplitude decrease directly proportional to the sample increase
    sf = len(fft_padded) / float(len(fft_data))
    # There might be a leakage giving the result an imaginary component
    # Return only the real component
    y_axis_ifft = ifft(fft_padded).real * sf #(pad_len + 1)
    x_axis_ifft = np.linspace(
                x_axis[zero_indices[0]], x_axis[zero_indices[last_indice]],
                len(y_axis_ifft))
    # get the peaks to the interpolated waveform
    max_peaks, min_peaks = peakdetect(y_axis_ifft, x_axis_ifft, 500,
                                    delta = abs(np.diff(y_axis).max() * 2))
    #max_peaks, min_peaks = peakdetect_zero_crossing(y_axis_ifft, x_axis_ifft)
    
    # store one 20th of a period as waveform data
    data_len = int(np.diff(zero_indices).mean()) / 10
    data_len += 1 - data_len & 1
    
    
    fitted_wave = []
    for peaks in [max_peaks, min_peaks]:
        peak_fit_tmp = []
        index = 0
        for peak in peaks:
            index = np.where(x_axis_ifft[index:]==peak[0])[0][0] + index
            x_fit_lim = x_axis_ifft[index - data_len // 2:
                                    index + data_len // 2 + 1]
            y_fit_lim = y_axis_ifft[index - data_len // 2:
                                    index + data_len // 2 + 1]
            
            peak_fit_tmp.append([x_fit_lim, y_fit_lim])
        fitted_wave.append(peak_fit_tmp)
    
    #pylab.plot(range(len(fft_data)), fft_data)
    #pylab.show()
    
    pylab.plot(x_axis, y_axis)
    pylab.hold(True)
    pylab.plot(x_axis_ifft, y_axis_ifft)
    #for max_p in max_peaks:
    #    pylab.plot(max_p[0], max_p[1], 'xr')
    pylab.show()
    return [max_peaks, min_peaks]
Example #34
0
        rad = "E:/genres/genres/trainset/" + g + "." + str(n).zfill(
            5) + ".fft" + ".npy"
        # 加载文件
        fft_features = np.load(rad)
        X.append(fft_features)
        # genre_list.index(g) 返回匹配上类别的索引号
        Y.append(genre_list.index(g))

# 构建的训练集
X = np.array(X)
# 构建的训练集对应的类别
Y = np.array(Y)

# 接下来,我们使用sklearn,来构造和训练我们的两种分类器
# ------train logistic classifier--------------
model = LogisticRegression()
# 需要numpy.array类型参数
model.fit(X, Y)

print('Starting read wavfile...')
# prepare test data-------------------
sample_rate, test = wavfile.read(
    "E:/genres/genres/heibao-wudizirong-remix.wav")
print(sample_rate, test, len(test))
testdata_fft_features = abs(fft(test))[:1000]
# model.predict(testdata_fft_features) 预测为一个数组,array([类别])
type_index = model.predict([testdata_fft_features])

print(type_index, type(type_index))
print(genre_list[type_index[0]])
Example #35
0
        g2=g2+((np.power(-1,p)/p)*((np.cos(p*np.pi*2*t))-1)*(np.sin(p*np.pi*t)))
    g2=g2*(2/np.pi)
    
    for p in range(1,H3):
        g3=g3+((np.power(-1,p)/p)*((np.cos(p*np.pi*2*t))-1)*(np.sin(p*np.pi*t)))
    g3=g3*(2/np.pi)

    
    return (g,g2,g3,t)



#Zad 1 P1

start11fft = time.clock()    
sc.fft(s)
end11fft = time.clock()

czas11fft=(end11fft-start11fft)*1000

print('Zad1_1_FFT') 
print(czas11fft)

print()

start11dft = time.clock() 
dft_zad_11=dft(s)
end11dft = time.clock()

czas11dft=(end11dft-start11dft)
Example #36
0
        Ts = 1.0 / Fs
        xs = numpy.arange(0, 1, Ts)

        signal = numpy.zeros(np.shape(xs))
        for i in range(len(F)):
            omega = 2 * np.pi * F[i]
            signal = signal + w[i] * numpy.cos(omega * xs)

        start_time = time.time()
        FourierSeries(signal)
        end_time = time.time()
        print("Elapsed time naive algo  %g seconds" % (end_time - start_time))

        start_time = time.time()
        fft(signal)
        end_time = time.time()
        print("Elapsed time of fft algo  %g seconds" % (end_time - start_time))

    if mode == 7:
        x = np.array([1, 1, 1, 1, 1])
        h = np.array([1, 2, 3, 4, 0])
        r = np.convolve(x, h)
        print 'Linear convolution'
        print r
        f1 = fft(x, 5)
        f2 = fft(h, 5)
        s = ifft(f1 * f2)
        print 'Circular convolution'
        print abs(s)
        f1 = fft(x, 9)
Example #37
0
            z = x
            break
        t += x
        c += 1
    return [t, "0X{}".format(z), validate.crc(t).upper()]


lockFlag = False
loScore = 0
while True:
    while stream.get_read_available() < SAMPLES:
        sleep(0.005)
    audioData = fromstring(stream.read(stream.get_read_available()),
                           dtype=short)[-SAMPLES:]
    normalizedData = audioData / 32768.0
    intensity = abs(fft(normalizedData))[:SAMPLES / 2]
    frequencies = linspace(0.0, float(RATE) / 2, num=SAMPLES / 2)
    for tone in DETECT:
        try:
            if max(intensity[(frequencies < tone+BANDWIDTH) &
                             (frequencies > tone-BANDWIDTH )]) >\
               max(intensity[(frequencies < tone-100) &
                             (frequencies > tone-200)]) + SENSITIVITY:
                b = int(tone == hi)
                msg += str(b)
                if not b:
                    loScore += 1
                    if not lockFlag:
                        print "Tone detected! Attempting to demodulate.."
                        lockFlag = True
                else:
Example #38
0
from numpy import sin, linspace, pi, loadtxt, fix, delete, savetxt
from pylab import plot, show, title, xlabel, ylabel, subplot, figure
from scipy import fft, arange
import sys

Fs = 1000
# sampling rate
Fn = Fs / 2
y = loadtxt(fname=str(sys.argv[1]))
L = len(y)
Ts = arange(0, L) / float(Fs)
# print(Ts)
# t = arange(0,L-1,Ts)/Fs # time vector
Femg = fft(y) * 2 / L
FreqVector = linspace(0, 1, fix(L / 2)) * Fn
indexV = arange(1, len(FreqVector))

subplot(2, 1, 1)

plot(Ts, y)
xlabel('Time')
ylabel('Amplitude')
subplot(2, 1, 2)
Femg = abs(Femg[indexV])
# print (len(Femg))
FreqVector = delete(FreqVector, len(FreqVector) - 1)
# print((FreqVector))

result = [i for i in FreqVector]
# print (result)
result2 = Femg[0:len(result)]
Example #39
0
test_data = ['2303']
train_data = ['2382']

# create the train set
Xtrain_aggregated = []
Ytrain_aggregated = []

for i in range(len(train_data)):
    print(i)
    X, Y = data[train_data[i]]
    for p in range(int((len(X) - window_size) / stride / k)):
        Xtrain = np.empty([k, d, 2])
        Ytrain = np.zeros([k, m])
        for j in range(k):
            s = j * stride + p * k * stride  # start from one second to give us some wiggle room for larger segments
            X_fft = fft(X[s:s + window_size])
            Xtrain[j, :, 0] = X_fft[0:d].real
            Xtrain[j, :, 1] = X_fft[0:d].imag
            # label stuff that's on in the center of the window
            for label in Y[s + d / 2]:
                if (label.data[1]) >= m:
                    continue
                else:
                    Ytrain[j, label.data[1]] = 1
        # if not np.any(Ytrain):
        #     continue
        Xtrain_aggregated.append(Xtrain)
        Ytrain_aggregated.append(Ytrain)
Xtrain_aggregated = np.array(Xtrain_aggregated)
print(type(Xtrain_aggregated))
Ytrain_aggregated = np.array(Ytrain_aggregated)
Example #40
0
def BatterBand(X, WL, WH, n):
    hightFilter = BatterHight(X, WH, n)
    lowFilter = BatterLaw(X, WL, n)
    resFilter = hightFilter * lowFilter
    return resFilter


if __name__ == '__main__':
    sig = [
        6. * sin(2. * pi * 440.0 * t / FD) + 2 * sin(pi * 4400.0 * t / FD)
        for t in range(N)
    ]

    spectrum = rfft(sig)
    sigFft = scipy.fft(sig)

    freq = rfftfreq(N, 1. / FD)

    # batterLaw = BatterLaw(freq, 1000, 10)
    batterHight = BatterHight(freq, 1000, 10)

    batterFourier = irfft(batterHight)

    newFilter = np.zeros(2 * len(spectrum))
    # newFilter = np.full(2*len(spectrum), 1)
    for i in range(100):
        newFilter[i] = batterFourier[i]

    newFilter = batterHight
    newFilter = rfft(newFilter)
Example #41
0
def compute_avgamplitude(signal, winsize, window):
    windownum = len(signal) / (winsize / 2) - 1
    avgamp = sp.zeros(winsize)
    for l in xrange(windownum):
        avgamp += sp.absolute(sp.fft(get_frame(signal, winsize, l) * window))
    return avgamp / float(windownum)
while True:
    timeout = 30 # report in to mqtt every 30 seconds
    timeout_start = time.time()
    while time.time() < timeout_start + timeout:
        test = 0
        if test == 5:
            break
        test -= 1

        while _stream.get_read_available()< NUM_SAMPLES: sleep(0.05)
        audio_data  = fromstring(_stream.read(
            _stream.get_read_available()), dtype=short)[-NUM_SAMPLES:]
        # Each data point is a signed 16 bit number, so we can normalize by dividing 32*1024
        normalized_data = audio_data / 32768.0
        intensity = abs(fft(normalized_data))[:NUM_SAMPLES/2]
        frequencies = linspace(0.0, float(SAMPLING_RATE)/2, num=NUM_SAMPLES/2)
        if frequencyoutput:
          which = intensity[1:].argmax()+1
          # use quadratic interpolation around the max
          if which != len(intensity)-1:
            y0,y1,y2 = log(intensity[which-1:which+2:])
            x1 = (y2 - y0) * .5 / (2 * y1 - y2 - y0)
            # find the frequency and output it
            freqPast = freqNow
            freqNow = (which+x1)*SAMPLING_RATE/NUM_SAMPLES
          else:
            freqNow = which*SAMPLING_RATE/NUM_SAMPLES

        if debug: print "\t\t\t\tfreq=",freqNow,"\t",freqPast
        if debug: print "\t\t\t\tnotes=",notes
#%%
import scipy as sp
import matplotlib.pyplot as plt

listA = sp.ones(500)
listA[100:300] = -1
f = sp.fft(listA)
plt.plot(f)
# %%
def Calculate_ALFF_fALFF(slow, ASamplePeriod, Time_s, plots=False):
    import os
    import math
    import scipy
    import numpy as np
    import scipy.io as sio
    import matplotlib.pyplot as plt

    slow = (slow - 2)
    AllVolume = sio.loadmat(Time_s)['time_series']
    row, col = np.shape(AllVolume)
    names = ['slow_2', 'slow_3', 'slow_4', 'slow_5']
    SlowHigh = [0.25, 0.198, 0.073, 0.027]
    SlowLow = [0.198, 0.073, 0.027, 0.01]

    HighCutoff = SlowHigh[slow]  #the High edge of the pass band
    LowCutoff = SlowLow[slow]  #the low edge of the pass band

    sampleFreq = 1 / ASamplePeriod
    sampleLength = row

    p = 1
    while True:
        if 2**p >= sampleLength:
            break
        else:
            p = p + 1
    #paddedLength = 2**(nextpow2(sampleLength))
    paddedLength = 2**(p)

    if (LowCutoff >= sampleFreq / 2):  # All high included
        idx_LowCutoff = paddedLength / 2 + 1
    else:  # high cut off, such as freq > 0.01 Hz
        idx_LowCutoff = math.ceil(LowCutoff * paddedLength * ASamplePeriod + 1)
    # Change from round to ceil: idx_LowCutoff = round(LowCutoff *paddedLength *ASamplePeriod + 1);

    if (HighCutoff >= sampleFreq / 2) and (HighCutoff == 0):  # All low pass
        idx_HighCutoff = paddedLength / 2 + 1
    else:  #Low pass, such as freq < 0.08 Hz
        idx_HighCutoff = np.fix(HighCutoff * paddedLength * ASamplePeriod + 1)
    # Change from round to fix: idx_HighCutoff	=round(HighCutoff *paddedLength *ASamplePeriod + 1);
    #Zero Padding
    a = np.zeros((paddedLength - sampleLength, len(AllVolume[2])))
    AllVolume = np.concatenate((AllVolume, a), axis=0)

    print('\t Performing FFT ...')

    AllVolume = np.transpose(AllVolume)
    AllVolume = 2 * np.true_divide(abs(scipy.fft(AllVolume)), sampleLength)
    AllVolume = np.transpose(AllVolume)

    print('Calculating ALFF for slow', slow + 2, ' ...')
    ALFF_2D = np.mean(AllVolume[idx_LowCutoff:int(idx_HighCutoff)], axis=0)

    print('Calculating fALFF for slow', slow + 2, ' ...')
    num = np.sum(AllVolume[(idx_LowCutoff):int(idx_HighCutoff)],
                 axis=0,
                 dtype=float)
    den = np.sum(AllVolume[2:int(paddedLength / 2 + 1)], axis=0, dtype=float)
    fALFF_2D = num / den

    metricas = np.concatenate((ALFF_2D, fALFF_2D), axis=0).reshape((2, col))

    if plots:
        plt.figure()
        plt.title('Power Spectral Density')
        freq = np.arange(0.0, 1 / ASamplePeriod,
                         1 / (ASamplePeriod * np.shape(AllVolume)[0]))
        plt.plot(freq, AllVolume)

        plt.figure()
        plt.title('ALFF')
        plt.plot(metricas[0, :])

        plt.figure()
        plt.title('fALFF')
        plt.plot(metricas[1, :])
    print('...done')

    ##################################
    Resul = os.getcwd()  #+'-Results'
    #os.system('mkdir '+Resul)
    ##################################
    out_mat = Resul + '/ALFF_and_fALFF_' + names[slow] + '.mat'
    sio.savemat(out_mat, {'ALFF': metricas[0], 'fALFF': metricas[1]})
    return out_mat
 def fourier_transform(self, X):
     return np.abs(fft(X, n=X.size))
Example #46
0
    Phi = linalg.spsolve(Poisson, -dx**2 * rho[0:NG - 1])

    Phi = np.concatenate((Phi, [0]))

    normphi.append(norm(Phi))

    # Electric field on the grid

    Eg = (np.roll(Phi, 1) - np.roll(Phi, -1)) / (2 * dx)

    Ep.append(Eg[round(NG / 2)])

    # Electric field fft

    ft = abs(scipy.fft(Eg))
    k = scipy.fftpack.fftfreq(Eg.size, xg[1] - xg[0])

    # interpolation grid->particle and velocity update

    vp += mat * QM * Eg * DT

    bins, edges = np.histogram(vp, bins=40, range=(-3.2, 3.2))
    left, right = edges[:-1], edges[1:]
    vc = np.array([left, right]).T.flatten()
    fv = np.array([bins, bins]).T.flatten()

    Etot = 0.5 * (Eg**2).sum() * dx

    histEnergy.append(Etot + 0.5 * Q / QM * (vp**2).sum())
Example #47
0
import scipy
from scipy import signal
import numpy as np
import matplotlib.pyplot as plt
import math
import pandas as pd
import wave

x = np.arange(0, 100, 0.01).tolist()
y = [math.sin(z) for z in x]

df = pd.read_csv('test_data.csv')
data = df['pc']
index = np.linspace(0, 100, len(data))

data = 50*np.sin(index)# + 100*np.sin(2*index)


f = scipy.fft(data)
f = f/len(data)

plt.figure(0)
plt.plot(np.linspace(0, 100, len(data)).tolist(), np.abs(f))
plt.xlim(0, 50)
plt.figure(1)
plt.plot(data)
plt.show()
Example #48
0
def stft(input_data,
         sampling_frequency_hz,
         frame_size_sec,
         hop_size_sec,
         use_hamming_window=True):
    """Calculates the Short Time Fourier Transform

    Using code based on http://stackoverflow.com/a/6891772/95592 calculate
    the STFT.

    Args:
        input_data: A 1D numpy ndarray containing the signal in the time
            domain that will be converted to the freq domain via STFT.
        sampling_frequency_hz: Sampling frequency originally used to capture
            the input_data
        frame_size_sec: Frame size given in seconds. The frame size determines
            how long each FFT will be in the time domain.
        hop_size_sec: Hop size given in seconds. The hop size is the time
            by which the frame should be shifted forward for the next
            FFT. It is not uncommon for this to be less than the frame
            size so that there is some amount of overlap.
        use_hamming_window: A Boolean indicating if the Hamming window
            should be used when performing the FFT. Using a Hamming window
            helps.

    Returns:
        A tuple containing:
            1. A 2D numpy ndarray providing the amplitude of the STFT with
                respect to the frequency and time having a shape of
                (time, freq). This array is trimmed to be single-sided instead
                of returning the double-sided FFT, and it is normalized by
                2/N where N is the length of the frequency domain info. The
                DC component is not multiplied by 2 though, it is just
                normalized by 1/N.
            2. A 1D numpy ndarray [shape = (time,)] containing the time in
                seconds for each value in the stft_data along the time axis.
            3. A 1D numpy ndarray [shape = (freq,)] containing the freq in
                Hz for each value in the stft_data along the frequency axis.
            4. A float indicating the frequency bin size in Hz or what is
                also referred to as the frequency domain step size (not
                to be confused with or equal to the sampling frequency).

    """
    num_frame_samples = int(frame_size_sec * sampling_frequency_hz)
    num_hop_samples = int(hop_size_sec * sampling_frequency_hz)
    if (use_hamming_window):
        x = np.array([
            scipy.fft(2 * scipy.hamming(num_frame_samples) *
                      input_data[i:i + num_frame_samples])
            for i in range(0,
                           len(input_data) -
                           num_frame_samples, num_hop_samples)
        ])
    else:
        x = np.array([
            scipy.fft(input_data[i:i + num_frame_samples])
            for i in range(0,
                           len(input_data) -
                           num_frame_samples, num_hop_samples)
        ])

    # Normalize the FFT results
    # See "Description and Application of Fourier Transforms and Fourier
    # Series" rev A05 by Matthew Rankin for a description on why the
    # normalization is 2 / N except for the DC component which is 1 / N
    # Only deal with the single-sided FFT, so cut it in half
    x = x[:, :num_frame_samples // 2]
    # Convert from complex to absolute values
    x = np.abs(x)
    # Divide all components by the num_frame_samples
    # Multiply all but the DC component by 2
    non_dc_normalization = 2 / num_frame_samples
    x[:, 1:] = x[:, 1:] * non_dc_normalization
    x[:, 0] = x[:, 0] / num_frame_samples

    # Create the time vector
    # FIXME(mdr): Need to add test to make sure this is correctly calculated.
    # Might want to refactor into separate function.
    time_vector_stft = np.linspace(0, (x.shape[0] - 1) * hop_size_sec,
                                   x.shape[0])

    # Calculate the width of each frequency bin
    hz_per_freq_bin = sampling_frequency_hz / num_frame_samples

    # Create the frequency vector
    freq_vector_stft = np.arange(x.shape[1]) * hz_per_freq_bin

    return (x, time_vector_stft, freq_vector_stft, hz_per_freq_bin)
Example #49
0
def make_features(driver_history):
    df = pd.read_csv(driver_history)
    x = pd.Series(df['x'])
    y = pd.Series(df['y'])

    if x[len(x) - 1] < 0:
        x = -x
    if y[len(y) - 1] < 0:
        y = -y

    # rotate each route WRT the origin
    theta = np.arctan(np.float64(y[len(y) - 1]) / np.float64(x[len(x) - 1]))
    x_ = np.cos(theta) * x[:] + np.sin(theta) * y[:]
    y_ = -np.sin(theta) * x[:] + np.cos(theta) * y[:]
    x, y = x_[:], y_[:]

    vel = 3.6 * np.power(np.power(np.diff(x), 2) + np.power(np.diff(y), 2), 0.5)
    x_vel, y_vel = 3.6 * np.diff(x), 3.6 * np.diff(y)
    accel = (1. / 3.6) * np.diff(vel)
    x_accel, y_accel = (1. / 3.6) * np.diff(x_vel), (1. / 3.6) * np.diff(y_vel)
    fft_x_accel, fft_y_accel = scipy.fft(x_accel), scipy.fft(y_accel)

    fft = scipy.fft(accel)
    for x in range(len(fft)):
        if x >= 50:
            fft[x] = 0
            fft_x_accel[x] = 0
            fft_y_accel[x] = 0
    inv_fft = scipy.ifft(fft).real
    inv_fft_x_accel = scipy.ifft(fft_x_accel).real
    inv_fft_y_accel = scipy.ifft(fft_y_accel).real

    cent_accel = abs(
        inv_fft_x_accel * (np.float64(y_vel[:-1]) / vel[:-1]) - inv_fft_y_accel * (np.float64(x_vel[:-1]) / vel[:-1]))
    cent_accel_hist = np.histogram(cent_accel, bins=50, range=(0, 30), density=False)[0]
    accel_ampl = np.power(np.power(inv_fft_x_accel, 2) + np.power(inv_fft_y_accel, 2), 0.5)
    accel_ampl_hist = np.histogram(accel_ampl, bins=20, range=(0, 4 * 9.8), density=False)[0]

    total_distance = np.sum(vel) / 3600  # in km
    avg_speed = np.mean(vel)  # in km/hr
    max_speed = max(vel)  # in km/hr
    var_speed = np.std(vel)  # in km/hr
    avg_accel = np.mean(inv_fft[inv_fft > 0])  # in km/hr^2
    avg_decel = np.mean(inv_fft[inv_fft < 0])  # in km/hr^2
    if len(inv_fft[abs(inv_fft) > 0]) > 0:
        max_accel = max(inv_fft[inv_fft > 0])  # in km/hr^2
        min_accel = min(inv_fft[inv_fft < 0])  # in km/hr^2
    else:
        max_accel = 0
        min_accel = 0
    std_accel = np.std(inv_fft)  # in km/hr^2
    speed_hist = np.histogram(vel, bins=20, range=(0, 130), density=False)[0]
    theta_hist = \
        np.histogram(np.arctan(np.float64(y) / np.float64(x)), bins=20, range=(-1.5708, 1.5708), density=False)[0]

    highway, mainroad, backroad = 0, 0, 0  # 0 is False, 1 is True
    hightime, maintime, backtime = 0, 0, 0
    hi_speed_avg, main_speed_avg, back_speed_avg = 0, 0, 0
    if np.all(np.diff(np.where(vel[vel > 80])) == 1):
        if len(vel[vel > 80]) > 300:
            highway = 1
            hightime = float(len(vel[vel > 80])) / len(x)
            hi_speed_avg = np.mean(vel[vel > 80])

    if np.all(np.diff(np.where(vel[vel > 80])) == 1):
        if len(vel[vel > 80]) in range(300):
            mainroad = 1
            maintime = float(len(vel[vel > 80])) / len(x)
            main_speed_avg = np.mean(vel[vel > 80])

    if np.any(np.diff(np.where(vel[vel < 80])) == 1):
        backroad = 1
        backtime = float(len(vel[vel < 80])) / len(x)
        back_speed_avg = np.mean(vel[vel < 80])

    returned = 0
    if min(np.linalg.norm(np.array([x[len(x) / 2:], y[len(y) / 2:]]))) < 150:
        returned = 1

    # High accel is more than g/2
    hi_accel, brake_accel, hi_decel, brake_decel = 0, 0, 0, 0
    if len(inv_fft[np.where(vel < 1)[0] - 1]) > 0:
        if max(inv_fft[np.where(vel < 1)[0] - 1]) > 0.5 * 9.8:
            hi_accel = 1
            brake_accel = max(abs(inv_fft[np.where(vel < 1)[0] - 1]))
        if min(inv_fft[np.where(vel < 1)[0] - 1]) < -0.5 * 9.8:
            hi_decel = 1
            brake_decel = max(abs(inv_fft[np.where(vel < 1)[0] - 1]))

    return np.append(
        [total_distance, returned, avg_speed, max_speed, var_speed, avg_accel, avg_decel, max_accel, min_accel,
         std_accel, highway,
         hightime, hi_speed_avg, mainroad, maintime, main_speed_avg, backroad, backtime, back_speed_avg, hi_accel,
         brake_accel,
         hi_decel, brake_decel], np.append(np.append(cent_accel_hist, speed_hist), accel_ampl_hist))
Example #50
0
    def get_fft(self, x, y, z):
        avg = (np.array(x) + np.array(y) + np.array(z))/3

        return np.abs(fft(avg)/len(avg))[1:len(avg)//2]
# Pseudocode/skeleton code for portion that reduces background noise

# importing necessary libraries
import numpy as np
import scipy as sp
from scipy.io.wavfile import read
from scipy.io.wavfile import write  
from scipy import signal

# import below library for plotting
import matplotlib.pyplot as pt

# insert input wav file below
(freq, arr) = read('')

# plotting original audio
pt.plot(array) 
pt.title('Original Audio')
pt.xlabel('Frequency')
pt.ylabel('Amplitude')

fft = sp.fft(arr)

# do something with Gaussian
# make new sound

output = array # tentative

#writing to output file
write ("", freq, output)
Example #52
0
def create_fft(fn):
    sample_rate, X = scipy.io.wavfile.read(fn)

    fft_features = abs(scipy.fft(X)[:1000])
    write_fft(fft_features, fn)
Example #53
0
 def getBW(self, umbral=0.9):
     E = np.sum(self.x * self.x)
     XW = fft(self.x)
     Ewacum = np.cumsum(np.real(XW * np.conj(XW)) / len(self.x) / (E / 2))
     ind = np.min(np.where(Ewacum > umbral))
     return self.fs / len(self.x) * ind
Example #54
0
def altMeasure(ecgmat, windowbegin, windowlength, qrswidth, qtinterval,
               meanrr):
    """Measure alternans for the ecglead defined in the defined window 
    given the matrix of qrst points created by makemat. Calculate for 
    whole cycle, return values for each point and also overall value
    in the qt interval alone"""

    #calculate power spectrum for each column
    #make sure we use an even number so that the spect contains odd no. of points
    if windowlength % 2 == 0:
        Nfft = windowlength
    else:
        Nfft = windowlength + 1

    beats, meanrr = ecgmat.shape  #mean rr is the second dim of ecgmat

    powerspect = scipy.zeros((Nfft / 2 + 1, meanrr))
    kvector = scipy.zeros(meanrr)
    valtvector = scipy.zeros(meanrr)

    for i in range(meanrr):

        timeseries = ecgmat[windowbegin:windowbegin + windowlength, i]
        timeseries -= scipy.mean(timeseries)  #remove dc

        #get the first half of the spectrum
        spect = scipy.fft(timeseries, Nfft)[:Nfft / 2 + 1]

        #get absolute magnitude and scale it by nbeats
        spect = abs(spect) / Nfft

        #powerspect is sq of this
        powerspect[:, i] = spect**2

        #except dc and nyquist, other points have to be multiplied by 2
        powerspect[1:-1, i] *= 2

        #calculate valt and k for point
        altpower = powerspect[-1, i]
        noise = powerspect[-11:-1, i]
        meannoise = scipy.mean(noise)
        stdnoise = scipy.std(noise)

        if altpower < meannoise:
            valtvector[i] = 0
        else:
            valtvector[i] = scipy.sqrt(altpower - meannoise)

        kvector[i] = (altpower - meannoise) / stdnoise

    #calculate aggregate power spectrum for st interval only
    avgpowerspect = scipy.zeros(Nfft / 2 + 1)

    for i in range(int(Nfft / 2) + 1):
        avgpowerspect[i] = scipy.mean(powerspect[i, qrswidth:qtinterval])

    altpower = avgpowerspect[-1]
    noise = avgpowerspect[-11:-1]
    meannoise = scipy.mean(noise)
    stdnoise = scipy.std(noise)

    valt = scipy.sqrt(altpower - meannoise).real  # only the real part
    k = (altpower - meannoise) / stdnoise

    return (k, valt, meannoise, kvector, valtvector, avgpowerspect)
Example #55
0
if args.hd:
    monitor_dpi = 96  # NOTE: This is for Jnana, may be different for Brahman.
    plt.figure(figsize=(1920 / monitor_dpi, 1080 / monitor_dpi),
               dpi=monitor_dpi)  # Gives us 1080p output
    mpl.rcParams.update({'font.size': 22})
else:
    plt.figure()

#ax1 = plt.gca()
#ax2 = ax1.twinx()

plt.xlabel(r'$x$ ($\mathrm{\mu}$m)')
plt.title('t/'+tUnits+' = ' +'%.3g'%t)

FFT = abs(scipy.fft(Ex))
freqs = scipy.fftpack.fftfreq(Ex.size, dx/50)

ax1 = plt.subplot(211)
ax1.plot(xGrid, Ex, '#e41a1c')
ax1.set_ylabel(r'$E_x$ (V/m)')
#Bounds of box
ax1.plot((3, 3), (-5e12, 5e12), 'k--')
ax1.plot((6, 6), (-5e12, 5e12), 'k--')
x = np.linspace(0,3,10)
y = (np.exp(2*x)/400)*5e12
expl, = ax1.plot(x, y, '0.75') #Return as a tuple not a list
expl.set_linestyle('--')


ax2 = plt.subplot(212)
Example #56
0
#    plt.grid()
#    plt.show()

# $ sox --null -r 22050 sine_a.wav synth 0.2 sine 400
# $ sox --null -r 22050 sine_b.wav synth 0.2 sine 3000
# $ sox --combine mix --volume 1 sine_b.wav --volume 0.5 sine_a.wav sine_mix.wav
wave_filename1 = "/home/saurabh/sine_a.wav"
generate_plot_fft(wave_filename1)
wave_filename1 = "/home/saurabh/sine_b.wav"
generate_plot_fft(wave_filename1)
wave_filename1 = "/home/saurabh/sine_mix.wav"
generate_plot_fft(wave_filename1)

wave_filename1 = "/home/saurabh/Downloads/100Hz_44100Hz_16bit_05sec.wav"
generate_plot_fft(wave_filename1, max_freq_plot=1000)
wave_filename1 = "/home/saurabh/Downloads/100Hz_44100Hz_16bit_30sec.wav"
generate_plot_fft(wave_filename1, max_freq_plot=1000)

#generate a signal, generate its , plot the fft
N = 600  # Number of sample points
T = 1.0 / 800.0  # sample spacing
x = np.linspace(0.0, N * T, N)  # times at which y should be calculated
y = np.sin(50.0 * 2.0 * np.pi *
           x) + 0.5 * np.sin(80.0 * 2.0 * np.pi * x)  # calculate signal
yf = sp.fft(y)  # calculate fft
xf = np.linspace(0.0, 1.0 / (2.0 * T),
                 N // 2)  #get x axis points =  freq for plotting
plt.plot(xf, 2.0 / N * np.abs(
    yf[0:N // 2]))  # plot x and y (number of y points should be equal to x)
plt.grid()
plt.show()
Example #57
0
# for power spectrum
hz = np.linspace(0, srate / 2, int(np.floor(npnts / 2) + 1))

## Morlet wavelet

# parameters
freq = 4  # peak frequency
csw = np.cos(2 * np.pi * freq * timevec)  # cosine wave
fwhm = .5  # full-width at half-maximum in seconds
gaussian = np.exp(-(4 * np.log(2) * timevec**2) / fwhm**2)  # Gaussian

# Morlet wavelet
MorletWavelet = csw * gaussian

# amplitude spectrum
MorletWaveletPow = np.abs(scipy.fft(MorletWavelet) / npnts)

# time-domain plotting
plt.subplot(211)
plt.plot(timevec, MorletWavelet, 'k')
plt.xlabel('Time (sec.)')
plt.title('Morlet wavelet in time domain')
plt.show()

# frequency-domain plotting
plt.subplot(212)
plt.plot(hz, MorletWaveletPow[:len(hz)], 'k')
plt.xlim([0, freq * 3])
plt.xlabel('Frequency (Hz)')
plt.title('Morlet wavelet in frequency domain')
plt.show()
Example #58
0
    print('Sample Rate: ', sample_rate)
    audio_samples = notchFilter(audio_samples1)
    # audio_samples = audio_samples1
    normalized_x = audio_samples / np.abs(audio_samples).max()
    soundfile.write('output.wav', normalized_x.astype(np.float32), 44100)
    # duration of the audio file
    duration = round(number_samples / sample_rate, 2)
    print('Audio Duration: {0}s'.format(duration))

    # list of possible frequencies bins
    freq_bins = arange(number_samples) * sample_rate / number_samples
    print('Frequency Length: ', len(freq_bins))
    print('Frequency bins: ', freq_bins)

    #     # FFT calculation
    fft_data = scipy.fft(audio_samples)
    print('FFT Length: ', len(fft_data))
    print('FFT data: ', fft_data)

    freq_bins = freq_bins[range(number_samples // 2)]
    normalization_data = fft_data / number_samples
    magnitude_values = normalization_data[range(len(fft_data) // 2)]
    magnitude_values = np.abs(magnitude_values)

    indices = findPeak(magnitude_values=magnitude_values, noise_level=200)
    frequencies = extractFrequency(indices=indices)
    print("frequencies:", frequencies)

    x_asis_data = freq_bins
    y_asis_data = magnitude_values
Example #59
0
    print('This is a limitation of the program')
else:
    for x in range(len(USER_FILE['Timesincestartinms'])):
        DELTA_TIME.append(0.5)
    VEL_POS = double_integration(USER_FILE['LINEARACCELERATIONXms2'],
                                 USER_FILE['LINEARACCELERATIONYms2'],
                                 USER_FILE['LINEARACCELERATIONZms2'],
                                 VEL_INITIAL, POS_INITIAL, DELTA_TIME)
    SPEED = VEL_POS[0]
    DISTANCE = VEL_POS[1]
    TIMESTOPPED = VEL_POS[2] / 60
    SPEEDREAL = SPEED[0, :]**2 + SPEED[1, :]**2 + SPEED[2, :]**2
    SPEEDREAL = SPEEDREAL**0.5
    DISTANCEREAL = DISTANCE[0, :]**2 + DISTANCE[1, :]**2 + DISTANCE[2, :]**2
    DISTANCEREAL = DISTANCEREAL**0.5
    FFT = scipy.fft(SPEEDREAL)  # (G) and (H)
    BP = FFT[:]
    for j in range(len(BP)):  # (H-red)
        if j >= 50:
            BP[j] = 0
    IBP = scipy.ifft(BP)
    X = USER_FILE['Timesincestartinms'].as_matrix()
    Y = SPEEDREAL
    FIG = plt.figure()
    AXES = FIG.add_axes([0, 0, 1, 1])
    AXES.plot(X, Y, 'r')
    AXES.set_title('Speed x Time')
    plt.xlabel("Minutes")
    plt.ylabel("m/s")

    X = USER_FILE['Timesincestartinms'].as_matrix()
Example #60
0
def get_power_spectrum(noise_current):
    auto_corr = estimated_autocorrelation(noise_current)
    ft = np.absolute(sp.fft(auto_corr))
    return ft**(2)